summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-03-03 21:16:48 -0500
committerDavid S. Miller <davem@davemloft.net>2015-03-03 21:16:48 -0500
commit71a83a6db6138b9d41d8a0b6b91cb59f6dc4742c (patch)
treef74b6e4e48257ec6ce40b95645ecb8533b9cc1f8 /drivers
parentb97526f3ff95f92b107f0fb52cbb8627e395429b (diff)
parenta6c5170d1edea97c538c81e377e56c7b5c5b7e63 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/rocker/rocker.c The rocker commit was two overlapping changes, one to rename the ->vport member to ->pport, and another making the bitmask expression use '1ULL' instead of plain '1'. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_lpat.c161
-rw-r--r--drivers/acpi/acpi_lpss.c21
-rw-r--r--drivers/acpi/ec.c4
-rw-r--r--drivers/acpi/pmic/intel_pmic.c133
-rw-r--r--drivers/acpi/resource.c2
-rw-r--r--drivers/acpi/video.c9
-rw-r--r--drivers/block/nvme-core.c513
-rw-r--r--drivers/block/nvme-scsi.c96
-rw-r--r--drivers/block/rbd.c193
-rw-r--r--drivers/block/virtio_blk.c12
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c6
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c102
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c121
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c6
-rw-r--r--drivers/char/virtio_console.c5
-rw-r--r--drivers/clk/Kconfig18
-rw-r--r--drivers/clk/Makefile4
-rw-r--r--drivers/clk/at91/clk-programmable.c2
-rw-r--r--drivers/clk/bcm/clk-kona.c2
-rw-r--r--drivers/clk/clk-asm9260.c348
-rw-r--r--drivers/clk/clk-cdce706.c700
-rw-r--r--drivers/clk/clk-composite.c29
-rw-r--r--drivers/clk/clk-divider.c228
-rw-r--r--drivers/clk/clk-gate.c18
-rw-r--r--drivers/clk/clk-mux.c16
-rw-r--r--drivers/clk/clk-qoriq.c (renamed from drivers/clk/clk-ppc-corenet.c)178
-rw-r--r--drivers/clk/clk.c1009
-rw-r--r--drivers/clk/clk.h24
-rw-r--r--drivers/clk/clkdev.c110
-rw-r--r--drivers/clk/hisilicon/clk-hi3620.c2
-rw-r--r--drivers/clk/mmp/clk-mix.c2
-rw-r--r--drivers/clk/pxa/Makefile1
-rw-r--r--drivers/clk/pxa/clk-pxa.c2
-rw-r--r--drivers/clk/pxa/clk-pxa3xx.c364
-rw-r--r--drivers/clk/qcom/Kconfig18
-rw-r--r--drivers/clk/qcom/Makefile4
-rw-r--r--drivers/clk/qcom/clk-pll.c1
-rw-r--r--drivers/clk/qcom/clk-rcg.c10
-rw-r--r--drivers/clk/qcom/clk-rcg2.c6
-rw-r--r--drivers/clk/qcom/clk-regmap-divider.c70
-rw-r--r--drivers/clk/qcom/clk-regmap-divider.h29
-rw-r--r--drivers/clk/qcom/clk-regmap-mux.c59
-rw-r--r--drivers/clk/qcom/clk-regmap-mux.h29
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c12
-rw-r--r--drivers/clk/qcom/lcc-ipq806x.c473
-rw-r--r--drivers/clk/qcom/lcc-msm8960.c585
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c48
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c32
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c217
-rw-r--r--drivers/clk/samsung/clk-exynos4.c10
-rw-r--r--drivers/clk/samsung/clk-exynos4415.c216
-rw-r--r--drivers/clk/samsung/clk-exynos7.c408
-rw-r--r--drivers/clk/samsung/clk.c13
-rw-r--r--drivers/clk/samsung/clk.h3
-rw-r--r--drivers/clk/shmobile/Makefile2
-rw-r--r--drivers/clk/shmobile/clk-div6.c18
-rw-r--r--drivers/clk/shmobile/clk-r8a73a4.c241
-rw-r--r--drivers/clk/shmobile/clk-rcar-gen2.c88
-rw-r--r--drivers/clk/st/clk-flexgen.c39
-rw-r--r--drivers/clk/st/clkgen-mux.c14
-rw-r--r--drivers/clk/sunxi/Makefile1
-rw-r--r--drivers/clk/sunxi/clk-factors.c12
-rw-r--r--drivers/clk/sunxi/clk-factors.h7
-rw-r--r--drivers/clk/sunxi/clk-mod0.c224
-rw-r--r--drivers/clk/sunxi/clk-sun6i-ar100.c2
-rw-r--r--drivers/clk/sunxi/clk-sun8i-mbus.c13
-rw-r--r--drivers/clk/sunxi/clk-sun9i-core.c119
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c219
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c262
-rw-r--r--drivers/clk/tegra/Makefile1
-rw-r--r--drivers/clk/tegra/clk-id.h2
-rw-r--r--drivers/clk/tegra/clk-periph.c14
-rw-r--r--drivers/clk/tegra/clk-pll.c18
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c18
-rw-r--r--drivers/clk/tegra/clk-tegra114.c10
-rw-r--r--drivers/clk/tegra/clk-tegra124.c168
-rw-r--r--drivers/clk/tegra/clk.c7
-rw-r--r--drivers/clk/ti/Makefile8
-rw-r--r--drivers/clk/ti/clk-3xxx-legacy.c4653
-rw-r--r--drivers/clk/ti/clk-3xxx.c8
-rw-r--r--drivers/clk/ti/clk-44xx.c2
-rw-r--r--drivers/clk/ti/clk-54xx.c2
-rw-r--r--drivers/clk/ti/clk-7xx.c2
-rw-r--r--drivers/clk/ti/clk-816x.c53
-rw-r--r--drivers/clk/ti/clk.c127
-rw-r--r--drivers/clk/ti/clock.h172
-rw-r--r--drivers/clk/ti/composite.c48
-rw-r--r--drivers/clk/ti/divider.c132
-rw-r--r--drivers/clk/ti/dpll.c121
-rw-r--r--drivers/clk/ti/fapll.c410
-rw-r--r--drivers/clk/ti/gate.c163
-rw-r--r--drivers/clk/ti/interface.c98
-rw-r--r--drivers/clk/ti/mux.c70
-rw-r--r--drivers/clk/ux500/clk-prcc.c1
-rw-r--r--drivers/clk/ux500/clk-prcmu.c1
-rw-r--r--drivers/clk/zynq/clkc.c1
-rw-r--r--drivers/clocksource/Kconfig16
-rw-r--r--drivers/clocksource/mtk_timer.c9
-rw-r--r--drivers/clocksource/pxa_timer.c2
-rw-r--r--drivers/connector/Kconfig2
-rw-r--r--drivers/cpufreq/Kconfig.arm44
-rw-r--r--drivers/cpufreq/Kconfig.powerpc2
-rw-r--r--drivers/cpufreq/Makefile9
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c33
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c4
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c10
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c84
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c4
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c2
-rw-r--r--drivers/dma/Kconfig9
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/amba-pl08x.c156
-rw-r--r--drivers/dma/at_hdmac.c130
-rw-r--r--drivers/dma/at_hdmac_regs.h3
-rw-r--r--drivers/dma/at_xdmac.c186
-rw-r--r--drivers/dma/bcm2835-dma.c46
-rw-r--r--drivers/dma/coh901318.c153
-rw-r--r--drivers/dma/cppi41.c30
-rw-r--r--drivers/dma/dma-jz4740.c20
-rw-r--r--drivers/dma/dmaengine.c84
-rw-r--r--drivers/dma/dmatest.c35
-rw-r--r--drivers/dma/dw/core.c101
-rw-r--r--drivers/dma/dw/platform.c4
-rw-r--r--drivers/dma/dw/regs.h4
-rw-r--r--drivers/dma/edma.c73
-rw-r--r--drivers/dma/ep93xx_dma.c43
-rw-r--r--drivers/dma/fsl-edma.c123
-rw-r--r--drivers/dma/fsldma.c97
-rw-r--r--drivers/dma/fsldma.h4
-rw-r--r--drivers/dma/img-mdc-dma.c1011
-rw-r--r--drivers/dma/imx-dma.c108
-rw-r--r--drivers/dma/imx-sdma.c150
-rw-r--r--drivers/dma/intel_mid_dma.c25
-rw-r--r--drivers/dma/ioat/dma_v3.c25
-rw-r--r--drivers/dma/ioat/hw.h5
-rw-r--r--drivers/dma/ioat/pci.c5
-rw-r--r--drivers/dma/ipu/ipu_idmac.c96
-rw-r--r--drivers/dma/k3dma.c203
-rw-r--r--drivers/dma/mmp_pdma.c109
-rw-r--r--drivers/dma/mmp_tdma.c85
-rw-r--r--drivers/dma/moxart-dma.c25
-rw-r--r--drivers/dma/mpc512x_dma.c111
-rw-r--r--drivers/dma/mv_xor.c9
-rw-r--r--drivers/dma/mxs-dma.c65
-rw-r--r--drivers/dma/nbpfaxi.c112
-rw-r--r--drivers/dma/of-dma.c4
-rw-r--r--drivers/dma/omap-dma.c69
-rw-r--r--drivers/dma/pch_dma.c8
-rw-r--r--drivers/dma/pl330.c230
-rw-r--r--drivers/dma/qcom_bam_dma.c85
-rw-r--r--drivers/dma/s3c24xx-dma.c73
-rw-r--r--drivers/dma/sa11x0-dma.c157
-rw-r--r--drivers/dma/sh/Kconfig14
-rw-r--r--drivers/dma/sh/Makefile1
-rw-r--r--drivers/dma/sh/rcar-dmac.c1770
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c6
-rw-r--r--drivers/dma/sh/shdma-base.c72
-rw-r--r--drivers/dma/sh/shdmac.c23
-rw-r--r--drivers/dma/sirf-dma.c59
-rw-r--r--drivers/dma/ste_dma40.c63
-rw-r--r--drivers/dma/sun6i-dma.c160
-rw-r--r--drivers/dma/tegra20-apb-dma.c42
-rw-r--r--drivers/dma/timb_dma.c8
-rw-r--r--drivers/dma/txx9dmac.c9
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c29
-rw-r--r--drivers/edac/amd64_edac.c10
-rw-r--r--drivers/edac/sb_edac.c9
-rw-r--r--drivers/firewire/core-transaction.c4
-rw-r--r--drivers/firewire/ohci.c5
-rw-r--r--drivers/firewire/sbp2.c11
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c16
-rw-r--r--drivers/gpio/gpio-tps65912.c14
-rw-r--r--drivers/gpio/gpiolib-of.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c3
-rw-r--r--drivers/gpu/drm/drm_crtc.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h15
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c7
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c22
-rw-r--r--drivers/gpu/drm/i915/intel_display.c34
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c7
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c21
-rw-r--r--drivers/gpu/drm/radeon/cik.c8
-rw-r--r--drivers/gpu/drm/radeon/cikd.h4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h4
-rw-r--r--drivers/gpu/drm/radeon/ni.c10
-rw-r--r--drivers/gpu/drm/radeon/nid.h4
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c22
-rw-r--r--drivers/gpu/drm/radeon/sid.h4
-rw-r--r--drivers/gpu/drm/tegra/dc.c79
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c8
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/hid-microsoft.c2
-rw-r--r--drivers/hid/hid-saitek.c2
-rw-r--r--drivers/hid/hid-sensor-hub.c8
-rw-r--r--drivers/hid/hid-sony.c6
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c7
-rw-r--r--drivers/hid/wacom_wac.c11
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/ads7828.c3
-rw-r--r--drivers/hwmon/pmbus/Kconfig2
-rw-r--r--drivers/i2c/Kconfig4
-rw-r--r--drivers/i2c/busses/Kconfig22
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c461
-rw-r--r--drivers/i2c/busses/i2c-cadence.c189
-rw-r--r--drivers/i2c/busses/i2c-designware-baytrail.c160
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c83
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h12
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c41
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c20
-rw-r--r--drivers/i2c/busses/i2c-imx.c33
-rw-r--r--drivers/i2c/busses/i2c-ocores.c91
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c7
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c99
-rw-r--r--drivers/i2c/busses/i2c-tegra.c2
-rw-r--r--drivers/i2c/i2c-core.c162
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c11
-rw-r--r--drivers/iio/Kconfig4
-rw-r--r--drivers/infiniband/core/ucma.c3
-rw-r--r--drivers/infiniband/core/umem_odp.c3
-rw-r--r--drivers/infiniband/core/uverbs.h1
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c158
-rw-r--r--drivers/infiniband/core/uverbs_main.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h29
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_wc_ppc64.c13
-rw-r--r--drivers/infiniband/hw/ipath/ipath_wc_x86_64.c15
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c7
-rw-r--r--drivers/infiniband/hw/mlx4/main.c11
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
-rw-r--r--drivers/infiniband/hw/mlx5/main.c4
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h38
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c38
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c312
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c12
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h68
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c241
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c183
-rw-r--r--drivers/infiniband/hw/qib/qib.h16
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h4
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c198
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c26
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c11
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c15
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c14
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c52
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c20
-rw-r--r--drivers/infiniband/hw/qib/qib_mmap.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c13
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c28
-rw-r--r--drivers/infiniband/hw/qib/qib_twsi.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c15
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs_mcast.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_x86_64.c7
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h4
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c16
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c9
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c27
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c46
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c4
-rw-r--r--drivers/input/joystick/adi.c3
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c4
-rw-r--r--drivers/input/misc/bfin_rotary.c208
-rw-r--r--drivers/input/misc/soc_button_array.c2
-rw-r--r--drivers/input/mouse/alps.c516
-rw-r--r--drivers/input/mouse/alps.h65
-rw-r--r--drivers/input/mouse/cypress_ps2.c5
-rw-r--r--drivers/input/mouse/cypress_ps2.h5
-rw-r--r--drivers/input/mouse/focaltech.c10
-rw-r--r--drivers/input/mouse/focaltech.h1
-rw-r--r--drivers/input/mouse/psmouse-base.c6
-rw-r--r--drivers/input/mouse/synaptics.c10
-rw-r--r--drivers/input/mouse/synaptics.h1
-rw-r--r--drivers/irqchip/irq-mips-gic.c8
-rw-r--r--drivers/isdn/hardware/mISDN/Kconfig2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c2
-rw-r--r--drivers/lguest/Makefile3
-rw-r--r--drivers/lguest/core.c29
-rw-r--r--drivers/lguest/hypercalls.c7
-rw-r--r--drivers/lguest/lg.h26
-rw-r--r--drivers/lguest/lguest_device.c540
-rw-r--r--drivers/lguest/lguest_user.c221
-rw-r--r--drivers/lguest/page_tables.c75
-rw-r--r--drivers/lguest/x86/core.c198
-rw-r--r--drivers/md/Kconfig4
-rw-r--r--drivers/md/dm-crypt.c392
-rw-r--r--drivers/md/dm-io.c6
-rw-r--r--drivers/md/dm-raid1.c9
-rw-r--r--drivers/md/dm-snap.c4
-rw-r--r--drivers/md/dm.c27
-rw-r--r--drivers/md/md.c14
-rw-r--r--drivers/md/persistent-data/Kconfig2
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c4
-rw-r--r--drivers/md/raid1.c5
-rw-r--r--drivers/md/raid5.c13
-rw-r--r--drivers/mfd/88pm860x-core.c2
-rw-r--r--drivers/mfd/Kconfig39
-rw-r--r--drivers/mfd/Makefile4
-rw-r--r--drivers/mfd/da9063-core.c2
-rw-r--r--drivers/mfd/da9063-i2c.c9
-rw-r--r--drivers/mfd/da9150-core.c413
-rw-r--r--drivers/mfd/davinci_voicecodec.c2
-rw-r--r--drivers/mfd/db8500-prcmu.c9
-rw-r--r--drivers/mfd/dln2.c71
-rw-r--r--drivers/mfd/hi6421-pmic-core.c2
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c3
-rw-r--r--drivers/mfd/intel_soc_pmic_core.h2
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c2
-rw-r--r--drivers/mfd/lm3533-core.c2
-rw-r--r--drivers/mfd/lpc_sch.c1
-rw-r--r--drivers/mfd/max77686.c29
-rw-r--r--drivers/mfd/mc13xxx-i2c.c2
-rw-r--r--drivers/mfd/mc13xxx-spi.c2
-rw-r--r--drivers/mfd/omap-usb-host.c10
-rw-r--r--drivers/mfd/pcf50633-core.c2
-rw-r--r--drivers/mfd/qcom_rpm.c581
-rw-r--r--drivers/mfd/retu-mfd.c2
-rw-r--r--drivers/mfd/rt5033.c142
-rw-r--r--drivers/mfd/rtsx_usb.c18
-rw-r--r--drivers/mfd/smsc-ece1099.c2
-rw-r--r--drivers/mfd/sun6i-prcm.c14
-rw-r--r--drivers/mfd/tps65217.c2
-rw-r--r--drivers/mfd/tps65218.c2
-rw-r--r--drivers/mfd/twl-core.c8
-rw-r--r--drivers/mfd/twl6040.c4
-rw-r--r--drivers/mfd/wm8994-core.c6
-rw-r--r--drivers/mmc/host/sunxi-mmc.c63
-rw-r--r--drivers/mtd/bcm47xxpart.c43
-rw-r--r--drivers/mtd/chips/map_ram.c1
-rw-r--r--drivers/mtd/chips/map_rom.c13
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c137
-rw-r--r--drivers/mtd/maps/physmap_of.c10
-rw-r--r--drivers/mtd/mtdblock.c10
-rw-r--r--drivers/mtd/mtdconcat.c3
-rw-r--r--drivers/mtd/mtdcore.c28
-rw-r--r--drivers/mtd/nand/Kconfig7
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/ams-delta.c6
-rw-r--r--drivers/mtd/nand/atmel_nand.c31
-rw-r--r--drivers/mtd/nand/denali.c40
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c9
-rw-r--r--drivers/mtd/nand/hisi504_nand.c891
-rw-r--r--drivers/mtd/nand/jz4740_nand.c29
-rw-r--r--drivers/mtd/nand/nand_base.c31
-rw-r--r--drivers/mtd/nand/nandsim.c7
-rw-r--r--drivers/mtd/nand/omap2.c31
-rw-r--r--drivers/mtd/nand/sunxi_nand.c2
-rw-r--r--drivers/mtd/nftlmount.c18
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c93
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c63
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/appletalk/Kconfig2
-rw-r--r--drivers/net/dsa/bcm_sf2.h2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c7
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c7
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c47
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c175
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c122
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c54
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c246
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c24
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c119
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c143
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c9
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c32
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c18
-rw-r--r--drivers/net/ethernet/rocker/rocker.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c7
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c9
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h114
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c10
-rw-r--r--drivers/net/ethernet/sun/niu.c6
-rw-r--r--drivers/net/ethernet/ti/Kconfig4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c9
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c5
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
-rw-r--r--drivers/net/macvtap.c7
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c82
-rw-r--r--drivers/net/phy/phy.c23
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/usb/Kconfig13
-rw-r--r--drivers/net/usb/asix_devices.c4
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/plusb.c5
-rw-r--r--drivers/net/virtio_net.c6
-rw-r--r--drivers/net/wan/cosa.c12
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig6
-rw-r--r--drivers/net/xen-netback/netback.c29
-rw-r--r--drivers/of/of_pci.c1
-rw-r--r--drivers/pci/pcie/aer/Kconfig2
-rw-r--r--drivers/platform/x86/Kconfig25
-rw-r--r--drivers/platform/x86/asus-laptop.c97
-rw-r--r--drivers/platform/x86/classmate-laptop.c2
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c7
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c77
-rw-r--r--drivers/platform/x86/samsung-laptop.c146
-rw-r--r--drivers/platform/x86/sony-laptop.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c24
-rw-r--r--drivers/platform/x86/toshiba_acpi.c1025
-rw-r--r--drivers/pnp/resource.c6
-rw-r--r--drivers/pwm/Kconfig24
-rw-r--r--drivers/pwm/Makefile2
-rw-r--r--drivers/pwm/core.c2
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c6
-rw-r--r--drivers/pwm/pwm-img.c249
-rw-r--r--drivers/pwm/pwm-sti.c30
-rw-r--r--drivers/pwm/pwm-sun4i.c366
-rw-r--r--drivers/pwm/pwm-tegra.c2
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c8
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c1
-rw-r--r--drivers/rtc/Kconfig8
-rw-r--r--drivers/rtc/rtc-ds1685.c18
-rw-r--r--drivers/scsi/am53c974.c6
-rw-r--r--drivers/scsi/be2iscsi/be_main.c1
-rw-r--r--drivers/scsi/hpsa.c4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c4
-rw-r--r--drivers/scsi/sg.c40
-rw-r--r--drivers/scsi/virtio_scsi.c6
-rw-r--r--drivers/scsi/wd719x.c1
-rw-r--r--drivers/sh/pm_runtime.c2
-rw-r--r--drivers/spi/Kconfig6
-rw-r--r--drivers/staging/board/Kconfig2
-rw-r--r--drivers/staging/emxx_udc/Kconfig2
-rw-r--r--drivers/staging/iio/Kconfig4
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c12
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h4
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c12
-rw-r--r--drivers/target/iscsi/iscsi_target.c105
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h883
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.h64
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.c30
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h1
-rw-r--r--drivers/target/target_core_file.c5
-rw-r--r--drivers/target/target_core_iblock.c5
-rw-r--r--drivers/target/target_core_pr.c25
-rw-r--r--drivers/target/target_core_sbc.c140
-rw-r--r--drivers/target/target_core_spc.c2
-rw-r--r--drivers/thermal/int340x_thermal/Makefile1
-rw-r--r--drivers/thermal/int340x_thermal/int3400_thermal.c14
-rw-r--r--drivers/thermal/int340x_thermal/int3402_thermal.c208
-rw-r--r--drivers/thermal/int340x_thermal/int3403_thermal.c208
-rw-r--r--drivers/thermal/int340x_thermal/int340x_thermal_zone.c276
-rw-r--r--drivers/thermal/int340x_thermal/int340x_thermal_zone.h68
-rw-r--r--drivers/thermal/int340x_thermal/processor_thermal_device.c92
-rw-r--r--drivers/thermal/intel_powerclamp.c1
-rw-r--r--drivers/thermal/intel_soc_dts_thermal.c46
-rw-r--r--drivers/thermal/of-thermal.c3
-rw-r--r--drivers/thermal/rcar_thermal.c26
-rw-r--r--drivers/thermal/rockchip_thermal.c36
-rw-r--r--drivers/thermal/samsung/Kconfig9
-rw-r--r--drivers/thermal/samsung/Makefile2
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.c427
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.h106
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c585
-rw-r--r--drivers/thermal/samsung/exynos_tmu.h77
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.c264
-rw-r--r--drivers/thermal/step_wise.c4
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c2
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c2
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/usb/gadget/Kconfig34
-rw-r--r--drivers/usb/gadget/legacy/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/Kconfig4
-rw-r--r--drivers/usb/phy/Kconfig2
-rw-r--r--drivers/vfio/pci/vfio_pci.c21
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c60
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h1
-rw-r--r--drivers/vfio/vfio.c119
-rw-r--r--drivers/vfio/vfio_iommu_type1.c80
-rw-r--r--drivers/vhost/net.c25
-rw-r--r--drivers/vhost/scsi.c1068
-rw-r--r--drivers/virtio/Kconfig24
-rw-r--r--drivers/virtio/Makefile3
-rw-r--r--drivers/virtio/virtio.c5
-rw-r--r--drivers/virtio/virtio_balloon.c9
-rw-r--r--drivers/virtio/virtio_mmio.c131
-rw-r--r--drivers/virtio/virtio_pci_common.c94
-rw-r--r--drivers/virtio/virtio_pci_common.h43
-rw-r--r--drivers/virtio/virtio_pci_legacy.c76
-rw-r--r--drivers/virtio/virtio_pci_modern.c695
-rw-r--r--drivers/virtio/virtio_ring.c9
-rw-r--r--drivers/watchdog/Kconfig25
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c21
-rw-r--r--drivers/watchdog/da9063_wdt.c32
-rw-r--r--drivers/watchdog/dw_wdt.c32
-rw-r--r--drivers/watchdog/gpio_wdt.c37
-rw-r--r--drivers/watchdog/hpwdt.c2
-rw-r--r--drivers/watchdog/imgpdc_wdt.c289
-rw-r--r--drivers/watchdog/imx2_wdt.c4
-rw-r--r--drivers/watchdog/it87_wdt.c6
-rw-r--r--drivers/watchdog/jz4740_wdt.c10
-rw-r--r--drivers/watchdog/mtk_wdt.c251
-rw-r--r--drivers/watchdog/omap_wdt.c2
-rw-r--r--drivers/watchdog/retu_wdt.c2
-rw-r--r--drivers/watchdog/rt2880_wdt.c9
-rw-r--r--drivers/watchdog/twl4030_wdt.c2
-rw-r--r--drivers/watchdog/w83627hf_wdt.c14
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/preempt.c44
-rw-r--r--drivers/xen/privcmd.c2
-rw-r--r--drivers/xen/xen-scsiback.c14
579 files changed, 30059 insertions, 10709 deletions
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index b18cd2151ddb..623b117ad1a2 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -55,6 +55,7 @@ acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
55ifdef CONFIG_ACPI_VIDEO 55ifdef CONFIG_ACPI_VIDEO
56acpi-y += video_detect.o 56acpi-y += video_detect.o
57endif 57endif
58acpi-y += acpi_lpat.o
58 59
59# These are (potentially) separate modules 60# These are (potentially) separate modules
60 61
diff --git a/drivers/acpi/acpi_lpat.c b/drivers/acpi/acpi_lpat.c
new file mode 100644
index 000000000000..feb61c1630eb
--- /dev/null
+++ b/drivers/acpi/acpi_lpat.c
@@ -0,0 +1,161 @@
1/*
2 * acpi_lpat.c - LPAT table processing functions
3 *
4 * Copyright (C) 2015 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/module.h>
17#include <linux/acpi.h>
18#include <acpi/acpi_lpat.h>
19
20/**
21 * acpi_lpat_raw_to_temp(): Return temperature from raw value through
22 * LPAT conversion table
23 *
24 * @lpat_table: the temperature_raw mapping table structure
25 * @raw: the raw value, used as a key to get the temerature from the
26 * above mapping table
27 *
28 * A positive converted temperarure value will be returned on success,
29 * a negative errno will be returned in error cases.
30 */
31int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table,
32 int raw)
33{
34 int i, delta_temp, delta_raw, temp;
35 struct acpi_lpat *lpat = lpat_table->lpat;
36
37 for (i = 0; i < lpat_table->lpat_count - 1; i++) {
38 if ((raw >= lpat[i].raw && raw <= lpat[i+1].raw) ||
39 (raw <= lpat[i].raw && raw >= lpat[i+1].raw))
40 break;
41 }
42
43 if (i == lpat_table->lpat_count - 1)
44 return -ENOENT;
45
46 delta_temp = lpat[i+1].temp - lpat[i].temp;
47 delta_raw = lpat[i+1].raw - lpat[i].raw;
48 temp = lpat[i].temp + (raw - lpat[i].raw) * delta_temp / delta_raw;
49
50 return temp;
51}
52EXPORT_SYMBOL_GPL(acpi_lpat_raw_to_temp);
53
54/**
55 * acpi_lpat_temp_to_raw(): Return raw value from temperature through
56 * LPAT conversion table
57 *
58 * @lpat: the temperature_raw mapping table
59 * @temp: the temperature, used as a key to get the raw value from the
60 * above mapping table
61 *
62 * A positive converted temperature value will be returned on success,
63 * a negative errno will be returned in error cases.
64 */
65int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table,
66 int temp)
67{
68 int i, delta_temp, delta_raw, raw;
69 struct acpi_lpat *lpat = lpat_table->lpat;
70
71 for (i = 0; i < lpat_table->lpat_count - 1; i++) {
72 if (temp >= lpat[i].temp && temp <= lpat[i+1].temp)
73 break;
74 }
75
76 if (i == lpat_table->lpat_count - 1)
77 return -ENOENT;
78
79 delta_temp = lpat[i+1].temp - lpat[i].temp;
80 delta_raw = lpat[i+1].raw - lpat[i].raw;
81 raw = lpat[i].raw + (temp - lpat[i].temp) * delta_raw / delta_temp;
82
83 return raw;
84}
85EXPORT_SYMBOL_GPL(acpi_lpat_temp_to_raw);
86
87/**
88 * acpi_lpat_get_conversion_table(): Parse ACPI LPAT table if present.
89 *
90 * @handle: Handle to acpi device
91 *
92 * Parse LPAT table to a struct of type acpi_lpat_table. On success
93 * it returns a pointer to newly allocated table. This table must
94 * be freed by the caller when finished processing, using a call to
95 * acpi_lpat_free_conversion_table.
96 */
97struct acpi_lpat_conversion_table *acpi_lpat_get_conversion_table(acpi_handle
98 handle)
99{
100 struct acpi_lpat_conversion_table *lpat_table = NULL;
101 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
102 union acpi_object *obj_p, *obj_e;
103 int *lpat, i;
104 acpi_status status;
105
106 status = acpi_evaluate_object(handle, "LPAT", NULL, &buffer);
107 if (ACPI_FAILURE(status))
108 return NULL;
109
110 obj_p = (union acpi_object *)buffer.pointer;
111 if (!obj_p || (obj_p->type != ACPI_TYPE_PACKAGE) ||
112 (obj_p->package.count % 2) || (obj_p->package.count < 4))
113 goto out;
114
115 lpat = kcalloc(obj_p->package.count, sizeof(int), GFP_KERNEL);
116 if (!lpat)
117 goto out;
118
119 for (i = 0; i < obj_p->package.count; i++) {
120 obj_e = &obj_p->package.elements[i];
121 if (obj_e->type != ACPI_TYPE_INTEGER) {
122 kfree(lpat);
123 goto out;
124 }
125 lpat[i] = (s64)obj_e->integer.value;
126 }
127
128 lpat_table = kzalloc(sizeof(*lpat_table), GFP_KERNEL);
129 if (!lpat_table) {
130 kfree(lpat);
131 goto out;
132 }
133
134 lpat_table->lpat = (struct acpi_lpat *)lpat;
135 lpat_table->lpat_count = obj_p->package.count / 2;
136
137out:
138 kfree(buffer.pointer);
139 return lpat_table;
140}
141EXPORT_SYMBOL_GPL(acpi_lpat_get_conversion_table);
142
143/**
144 * acpi_lpat_free_conversion_table(): Free LPAT table.
145 *
146 * @lpat_table: the temperature_raw mapping table structure
147 *
148 * Frees the LPAT table previously allocated by a call to
149 * acpi_lpat_get_conversion_table.
150 */
151void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table
152 *lpat_table)
153{
154 if (lpat_table) {
155 kfree(lpat_table->lpat);
156 kfree(lpat_table);
157 }
158}
159EXPORT_SYMBOL_GPL(acpi_lpat_free_conversion_table);
160
161MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 02e835f3cf8a..657964e8ab7e 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -105,7 +105,7 @@ static void lpss_uart_setup(struct lpss_private_data *pdata)
105 } 105 }
106} 106}
107 107
108static void byt_i2c_setup(struct lpss_private_data *pdata) 108static void lpss_deassert_reset(struct lpss_private_data *pdata)
109{ 109{
110 unsigned int offset; 110 unsigned int offset;
111 u32 val; 111 u32 val;
@@ -114,9 +114,18 @@ static void byt_i2c_setup(struct lpss_private_data *pdata)
114 val = readl(pdata->mmio_base + offset); 114 val = readl(pdata->mmio_base + offset);
115 val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC; 115 val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
116 writel(val, pdata->mmio_base + offset); 116 writel(val, pdata->mmio_base + offset);
117}
118
119#define LPSS_I2C_ENABLE 0x6c
120
121static void byt_i2c_setup(struct lpss_private_data *pdata)
122{
123 lpss_deassert_reset(pdata);
117 124
118 if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset)) 125 if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
119 pdata->fixed_clk_rate = 133000000; 126 pdata->fixed_clk_rate = 133000000;
127
128 writel(0, pdata->mmio_base + LPSS_I2C_ENABLE);
120} 129}
121 130
122static struct lpss_device_desc lpt_dev_desc = { 131static struct lpss_device_desc lpt_dev_desc = {
@@ -125,7 +134,7 @@ static struct lpss_device_desc lpt_dev_desc = {
125}; 134};
126 135
127static struct lpss_device_desc lpt_i2c_dev_desc = { 136static struct lpss_device_desc lpt_i2c_dev_desc = {
128 .flags = LPSS_CLK | LPSS_LTR, 137 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
129 .prv_offset = 0x800, 138 .prv_offset = 0x800,
130}; 139};
131 140
@@ -166,6 +175,12 @@ static struct lpss_device_desc byt_i2c_dev_desc = {
166 .setup = byt_i2c_setup, 175 .setup = byt_i2c_setup,
167}; 176};
168 177
178static struct lpss_device_desc bsw_spi_dev_desc = {
179 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
180 .prv_offset = 0x400,
181 .setup = lpss_deassert_reset,
182};
183
169#else 184#else
170 185
171#define LPSS_ADDR(desc) (0UL) 186#define LPSS_ADDR(desc) (0UL)
@@ -198,7 +213,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
198 /* Braswell LPSS devices */ 213 /* Braswell LPSS devices */
199 { "80862288", LPSS_ADDR(byt_pwm_dev_desc) }, 214 { "80862288", LPSS_ADDR(byt_pwm_dev_desc) },
200 { "8086228A", LPSS_ADDR(byt_uart_dev_desc) }, 215 { "8086228A", LPSS_ADDR(byt_uart_dev_desc) },
201 { "8086228E", LPSS_ADDR(byt_spi_dev_desc) }, 216 { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
202 { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) }, 217 { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) },
203 218
204 { "INT3430", LPSS_ADDR(lpt_dev_desc) }, 219 { "INT3430", LPSS_ADDR(lpt_dev_desc) },
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 982b67faaaf3..a8dd2f763382 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -680,7 +680,7 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
680 /* Enable GPE for event processing (SCI_EVT=1) */ 680 /* Enable GPE for event processing (SCI_EVT=1) */
681 if (!resuming) 681 if (!resuming)
682 acpi_ec_submit_request(ec); 682 acpi_ec_submit_request(ec);
683 pr_info("+++++ EC started +++++\n"); 683 pr_debug("EC started\n");
684 } 684 }
685 spin_unlock_irqrestore(&ec->lock, flags); 685 spin_unlock_irqrestore(&ec->lock, flags);
686} 686}
@@ -712,7 +712,7 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
712 acpi_ec_complete_request(ec); 712 acpi_ec_complete_request(ec);
713 clear_bit(EC_FLAGS_STARTED, &ec->flags); 713 clear_bit(EC_FLAGS_STARTED, &ec->flags);
714 clear_bit(EC_FLAGS_STOPPED, &ec->flags); 714 clear_bit(EC_FLAGS_STOPPED, &ec->flags);
715 pr_info("+++++ EC stopped +++++\n"); 715 pr_debug("EC stopped\n");
716 } 716 }
717 spin_unlock_irqrestore(&ec->lock, flags); 717 spin_unlock_irqrestore(&ec->lock, flags);
718} 718}
diff --git a/drivers/acpi/pmic/intel_pmic.c b/drivers/acpi/pmic/intel_pmic.c
index a732e5d7e322..bd772cd56494 100644
--- a/drivers/acpi/pmic/intel_pmic.c
+++ b/drivers/acpi/pmic/intel_pmic.c
@@ -16,20 +16,15 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/acpi.h> 17#include <linux/acpi.h>
18#include <linux/regmap.h> 18#include <linux/regmap.h>
19#include <acpi/acpi_lpat.h>
19#include "intel_pmic.h" 20#include "intel_pmic.h"
20 21
21#define PMIC_POWER_OPREGION_ID 0x8d 22#define PMIC_POWER_OPREGION_ID 0x8d
22#define PMIC_THERMAL_OPREGION_ID 0x8c 23#define PMIC_THERMAL_OPREGION_ID 0x8c
23 24
24struct acpi_lpat {
25 int temp;
26 int raw;
27};
28
29struct intel_pmic_opregion { 25struct intel_pmic_opregion {
30 struct mutex lock; 26 struct mutex lock;
31 struct acpi_lpat *lpat; 27 struct acpi_lpat_conversion_table *lpat_table;
32 int lpat_count;
33 struct regmap *regmap; 28 struct regmap *regmap;
34 struct intel_pmic_opregion_data *data; 29 struct intel_pmic_opregion_data *data;
35}; 30};
@@ -50,105 +45,6 @@ static int pmic_get_reg_bit(int address, struct pmic_table *table,
50 return -ENOENT; 45 return -ENOENT;
51} 46}
52 47
53/**
54 * raw_to_temp(): Return temperature from raw value through LPAT table
55 *
56 * @lpat: the temperature_raw mapping table
57 * @count: the count of the above mapping table
58 * @raw: the raw value, used as a key to get the temerature from the
59 * above mapping table
60 *
61 * A positive value will be returned on success, a negative errno will
62 * be returned in error cases.
63 */
64static int raw_to_temp(struct acpi_lpat *lpat, int count, int raw)
65{
66 int i, delta_temp, delta_raw, temp;
67
68 for (i = 0; i < count - 1; i++) {
69 if ((raw >= lpat[i].raw && raw <= lpat[i+1].raw) ||
70 (raw <= lpat[i].raw && raw >= lpat[i+1].raw))
71 break;
72 }
73
74 if (i == count - 1)
75 return -ENOENT;
76
77 delta_temp = lpat[i+1].temp - lpat[i].temp;
78 delta_raw = lpat[i+1].raw - lpat[i].raw;
79 temp = lpat[i].temp + (raw - lpat[i].raw) * delta_temp / delta_raw;
80
81 return temp;
82}
83
84/**
85 * temp_to_raw(): Return raw value from temperature through LPAT table
86 *
87 * @lpat: the temperature_raw mapping table
88 * @count: the count of the above mapping table
89 * @temp: the temperature, used as a key to get the raw value from the
90 * above mapping table
91 *
92 * A positive value will be returned on success, a negative errno will
93 * be returned in error cases.
94 */
95static int temp_to_raw(struct acpi_lpat *lpat, int count, int temp)
96{
97 int i, delta_temp, delta_raw, raw;
98
99 for (i = 0; i < count - 1; i++) {
100 if (temp >= lpat[i].temp && temp <= lpat[i+1].temp)
101 break;
102 }
103
104 if (i == count - 1)
105 return -ENOENT;
106
107 delta_temp = lpat[i+1].temp - lpat[i].temp;
108 delta_raw = lpat[i+1].raw - lpat[i].raw;
109 raw = lpat[i].raw + (temp - lpat[i].temp) * delta_raw / delta_temp;
110
111 return raw;
112}
113
114static void pmic_thermal_lpat(struct intel_pmic_opregion *opregion,
115 acpi_handle handle, struct device *dev)
116{
117 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
118 union acpi_object *obj_p, *obj_e;
119 int *lpat, i;
120 acpi_status status;
121
122 status = acpi_evaluate_object(handle, "LPAT", NULL, &buffer);
123 if (ACPI_FAILURE(status))
124 return;
125
126 obj_p = (union acpi_object *)buffer.pointer;
127 if (!obj_p || (obj_p->type != ACPI_TYPE_PACKAGE) ||
128 (obj_p->package.count % 2) || (obj_p->package.count < 4))
129 goto out;
130
131 lpat = devm_kmalloc(dev, sizeof(int) * obj_p->package.count,
132 GFP_KERNEL);
133 if (!lpat)
134 goto out;
135
136 for (i = 0; i < obj_p->package.count; i++) {
137 obj_e = &obj_p->package.elements[i];
138 if (obj_e->type != ACPI_TYPE_INTEGER) {
139 devm_kfree(dev, lpat);
140 goto out;
141 }
142 lpat[i] = (s64)obj_e->integer.value;
143 }
144
145 opregion->lpat = (struct acpi_lpat *)lpat;
146 opregion->lpat_count = obj_p->package.count / 2;
147
148out:
149 kfree(buffer.pointer);
150}
151
152static acpi_status intel_pmic_power_handler(u32 function, 48static acpi_status intel_pmic_power_handler(u32 function,
153 acpi_physical_address address, u32 bits, u64 *value64, 49 acpi_physical_address address, u32 bits, u64 *value64,
154 void *handler_context, void *region_context) 50 void *handler_context, void *region_context)
@@ -192,12 +88,12 @@ static int pmic_read_temp(struct intel_pmic_opregion *opregion,
192 if (raw_temp < 0) 88 if (raw_temp < 0)
193 return raw_temp; 89 return raw_temp;
194 90
195 if (!opregion->lpat) { 91 if (!opregion->lpat_table) {
196 *value = raw_temp; 92 *value = raw_temp;
197 return 0; 93 return 0;
198 } 94 }
199 95
200 temp = raw_to_temp(opregion->lpat, opregion->lpat_count, raw_temp); 96 temp = acpi_lpat_raw_to_temp(opregion->lpat_table, raw_temp);
201 if (temp < 0) 97 if (temp < 0)
202 return temp; 98 return temp;
203 99
@@ -223,9 +119,8 @@ static int pmic_thermal_aux(struct intel_pmic_opregion *opregion, int reg,
223 if (!opregion->data->update_aux) 119 if (!opregion->data->update_aux)
224 return -ENXIO; 120 return -ENXIO;
225 121
226 if (opregion->lpat) { 122 if (opregion->lpat_table) {
227 raw_temp = temp_to_raw(opregion->lpat, opregion->lpat_count, 123 raw_temp = acpi_lpat_temp_to_raw(opregion->lpat_table, *value);
228 *value);
229 if (raw_temp < 0) 124 if (raw_temp < 0)
230 return raw_temp; 125 return raw_temp;
231 } else { 126 } else {
@@ -314,6 +209,7 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
314{ 209{
315 acpi_status status; 210 acpi_status status;
316 struct intel_pmic_opregion *opregion; 211 struct intel_pmic_opregion *opregion;
212 int ret;
317 213
318 if (!dev || !regmap || !d) 214 if (!dev || !regmap || !d)
319 return -EINVAL; 215 return -EINVAL;
@@ -327,14 +223,16 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
327 223
328 mutex_init(&opregion->lock); 224 mutex_init(&opregion->lock);
329 opregion->regmap = regmap; 225 opregion->regmap = regmap;
330 pmic_thermal_lpat(opregion, handle, dev); 226 opregion->lpat_table = acpi_lpat_get_conversion_table(handle);
331 227
332 status = acpi_install_address_space_handler(handle, 228 status = acpi_install_address_space_handler(handle,
333 PMIC_POWER_OPREGION_ID, 229 PMIC_POWER_OPREGION_ID,
334 intel_pmic_power_handler, 230 intel_pmic_power_handler,
335 NULL, opregion); 231 NULL, opregion);
336 if (ACPI_FAILURE(status)) 232 if (ACPI_FAILURE(status)) {
337 return -ENODEV; 233 ret = -ENODEV;
234 goto out_error;
235 }
338 236
339 status = acpi_install_address_space_handler(handle, 237 status = acpi_install_address_space_handler(handle,
340 PMIC_THERMAL_OPREGION_ID, 238 PMIC_THERMAL_OPREGION_ID,
@@ -343,11 +241,16 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
343 if (ACPI_FAILURE(status)) { 241 if (ACPI_FAILURE(status)) {
344 acpi_remove_address_space_handler(handle, PMIC_POWER_OPREGION_ID, 242 acpi_remove_address_space_handler(handle, PMIC_POWER_OPREGION_ID,
345 intel_pmic_power_handler); 243 intel_pmic_power_handler);
346 return -ENODEV; 244 ret = -ENODEV;
245 goto out_error;
347 } 246 }
348 247
349 opregion->data = d; 248 opregion->data = d;
350 return 0; 249 return 0;
250
251out_error:
252 acpi_lpat_free_conversion_table(opregion->lpat_table);
253 return ret;
351} 254}
352EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler); 255EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler);
353 256
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 4752b9939987..c723668e3e27 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -46,7 +46,7 @@ static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
46 if (len && reslen && reslen == len && start <= end) 46 if (len && reslen && reslen == len && start <= end)
47 return true; 47 return true;
48 48
49 pr_info("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n", 49 pr_debug("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n",
50 io ? "io" : "mem", start, end, len); 50 io ? "io" : "mem", start, end, len);
51 51
52 return false; 52 return false;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 88a4f99dd2a7..debd30917010 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -540,6 +540,15 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
540 DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"), 540 DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"),
541 }, 541 },
542 }, 542 },
543 {
544 /* https://bugs.freedesktop.org/show_bug.cgi?id=87286 */
545 .callback = video_disable_native_backlight,
546 .ident = "SAMSUNG 900X3C/900X3D/900X3E/900X4C/900X4D",
547 .matches = {
548 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
549 DMI_MATCH(DMI_PRODUCT_NAME, "900X3C/900X3D/900X3E/900X4C/900X4D"),
550 },
551 },
543 552
544 { 553 {
545 /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */ 554 /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index cbdfbbf98392..ceb32dd52a6c 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -37,17 +37,18 @@
37#include <linux/ptrace.h> 37#include <linux/ptrace.h>
38#include <linux/sched.h> 38#include <linux/sched.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/t10-pi.h>
40#include <linux/types.h> 41#include <linux/types.h>
41#include <scsi/sg.h> 42#include <scsi/sg.h>
42#include <asm-generic/io-64-nonatomic-lo-hi.h> 43#include <asm-generic/io-64-nonatomic-lo-hi.h>
43 44
45#define NVME_MINORS (1U << MINORBITS)
44#define NVME_Q_DEPTH 1024 46#define NVME_Q_DEPTH 1024
45#define NVME_AQ_DEPTH 64 47#define NVME_AQ_DEPTH 64
46#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 48#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
47#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 49#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
48#define ADMIN_TIMEOUT (admin_timeout * HZ) 50#define ADMIN_TIMEOUT (admin_timeout * HZ)
49#define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ) 51#define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ)
50#define IOD_TIMEOUT (retry_time * HZ)
51 52
52static unsigned char admin_timeout = 60; 53static unsigned char admin_timeout = 60;
53module_param(admin_timeout, byte, 0644); 54module_param(admin_timeout, byte, 0644);
@@ -57,10 +58,6 @@ unsigned char nvme_io_timeout = 30;
57module_param_named(io_timeout, nvme_io_timeout, byte, 0644); 58module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
58MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 59MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
59 60
60static unsigned char retry_time = 30;
61module_param(retry_time, byte, 0644);
62MODULE_PARM_DESC(retry_time, "time in seconds to retry failed I/O");
63
64static unsigned char shutdown_timeout = 5; 61static unsigned char shutdown_timeout = 5;
65module_param(shutdown_timeout, byte, 0644); 62module_param(shutdown_timeout, byte, 0644);
66MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); 63MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
@@ -68,6 +65,9 @@ MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown")
68static int nvme_major; 65static int nvme_major;
69module_param(nvme_major, int, 0); 66module_param(nvme_major, int, 0);
70 67
68static int nvme_char_major;
69module_param(nvme_char_major, int, 0);
70
71static int use_threaded_interrupts; 71static int use_threaded_interrupts;
72module_param(use_threaded_interrupts, int, 0); 72module_param(use_threaded_interrupts, int, 0);
73 73
@@ -76,7 +76,8 @@ static LIST_HEAD(dev_list);
76static struct task_struct *nvme_thread; 76static struct task_struct *nvme_thread;
77static struct workqueue_struct *nvme_workq; 77static struct workqueue_struct *nvme_workq;
78static wait_queue_head_t nvme_kthread_wait; 78static wait_queue_head_t nvme_kthread_wait;
79static struct notifier_block nvme_nb; 79
80static struct class *nvme_class;
80 81
81static void nvme_reset_failed_dev(struct work_struct *ws); 82static void nvme_reset_failed_dev(struct work_struct *ws);
82static int nvme_process_cq(struct nvme_queue *nvmeq); 83static int nvme_process_cq(struct nvme_queue *nvmeq);
@@ -95,7 +96,6 @@ struct async_cmd_info {
95 * commands and one for I/O commands). 96 * commands and one for I/O commands).
96 */ 97 */
97struct nvme_queue { 98struct nvme_queue {
98 struct llist_node node;
99 struct device *q_dmadev; 99 struct device *q_dmadev;
100 struct nvme_dev *dev; 100 struct nvme_dev *dev;
101 char irqname[24]; /* nvme4294967295-65535\0 */ 101 char irqname[24]; /* nvme4294967295-65535\0 */
@@ -482,6 +482,115 @@ static int nvme_error_status(u16 status)
482 } 482 }
483} 483}
484 484
485#ifdef CONFIG_BLK_DEV_INTEGRITY
486static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
487{
488 if (be32_to_cpu(pi->ref_tag) == v)
489 pi->ref_tag = cpu_to_be32(p);
490}
491
492static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
493{
494 if (be32_to_cpu(pi->ref_tag) == p)
495 pi->ref_tag = cpu_to_be32(v);
496}
497
498/**
499 * nvme_dif_remap - remaps ref tags to bip seed and physical lba
500 *
501 * The virtual start sector is the one that was originally submitted by the
502 * block layer. Due to partitioning, MD/DM cloning, etc. the actual physical
503 * start sector may be different. Remap protection information to match the
504 * physical LBA on writes, and back to the original seed on reads.
505 *
506 * Type 0 and 3 do not have a ref tag, so no remapping required.
507 */
508static void nvme_dif_remap(struct request *req,
509 void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
510{
511 struct nvme_ns *ns = req->rq_disk->private_data;
512 struct bio_integrity_payload *bip;
513 struct t10_pi_tuple *pi;
514 void *p, *pmap;
515 u32 i, nlb, ts, phys, virt;
516
517 if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3)
518 return;
519
520 bip = bio_integrity(req->bio);
521 if (!bip)
522 return;
523
524 pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;
525 if (!pmap)
526 return;
527
528 p = pmap;
529 virt = bip_get_seed(bip);
530 phys = nvme_block_nr(ns, blk_rq_pos(req));
531 nlb = (blk_rq_bytes(req) >> ns->lba_shift);
532 ts = ns->disk->integrity->tuple_size;
533
534 for (i = 0; i < nlb; i++, virt++, phys++) {
535 pi = (struct t10_pi_tuple *)p;
536 dif_swap(phys, virt, pi);
537 p += ts;
538 }
539 kunmap_atomic(pmap);
540}
541
542static int nvme_noop_verify(struct blk_integrity_iter *iter)
543{
544 return 0;
545}
546
547static int nvme_noop_generate(struct blk_integrity_iter *iter)
548{
549 return 0;
550}
551
552struct blk_integrity nvme_meta_noop = {
553 .name = "NVME_META_NOOP",
554 .generate_fn = nvme_noop_generate,
555 .verify_fn = nvme_noop_verify,
556};
557
558static void nvme_init_integrity(struct nvme_ns *ns)
559{
560 struct blk_integrity integrity;
561
562 switch (ns->pi_type) {
563 case NVME_NS_DPS_PI_TYPE3:
564 integrity = t10_pi_type3_crc;
565 break;
566 case NVME_NS_DPS_PI_TYPE1:
567 case NVME_NS_DPS_PI_TYPE2:
568 integrity = t10_pi_type1_crc;
569 break;
570 default:
571 integrity = nvme_meta_noop;
572 break;
573 }
574 integrity.tuple_size = ns->ms;
575 blk_integrity_register(ns->disk, &integrity);
576 blk_queue_max_integrity_segments(ns->queue, 1);
577}
578#else /* CONFIG_BLK_DEV_INTEGRITY */
579static void nvme_dif_remap(struct request *req,
580 void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
581{
582}
583static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
584{
585}
586static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
587{
588}
589static void nvme_init_integrity(struct nvme_ns *ns)
590{
591}
592#endif
593
485static void req_completion(struct nvme_queue *nvmeq, void *ctx, 594static void req_completion(struct nvme_queue *nvmeq, void *ctx,
486 struct nvme_completion *cqe) 595 struct nvme_completion *cqe)
487{ 596{
@@ -512,9 +621,16 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
512 "completing aborted command with status:%04x\n", 621 "completing aborted command with status:%04x\n",
513 status); 622 status);
514 623
515 if (iod->nents) 624 if (iod->nents) {
516 dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents, 625 dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents,
517 rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 626 rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
627 if (blk_integrity_rq(req)) {
628 if (!rq_data_dir(req))
629 nvme_dif_remap(req, nvme_dif_complete);
630 dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->meta_sg, 1,
631 rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
632 }
633 }
518 nvme_free_iod(nvmeq->dev, iod); 634 nvme_free_iod(nvmeq->dev, iod);
519 635
520 blk_mq_complete_request(req); 636 blk_mq_complete_request(req);
@@ -670,6 +786,24 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
670 cmnd->rw.prp2 = cpu_to_le64(iod->first_dma); 786 cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
671 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); 787 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
672 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 788 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
789
790 if (blk_integrity_rq(req)) {
791 cmnd->rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg));
792 switch (ns->pi_type) {
793 case NVME_NS_DPS_PI_TYPE3:
794 control |= NVME_RW_PRINFO_PRCHK_GUARD;
795 break;
796 case NVME_NS_DPS_PI_TYPE1:
797 case NVME_NS_DPS_PI_TYPE2:
798 control |= NVME_RW_PRINFO_PRCHK_GUARD |
799 NVME_RW_PRINFO_PRCHK_REF;
800 cmnd->rw.reftag = cpu_to_le32(
801 nvme_block_nr(ns, blk_rq_pos(req)));
802 break;
803 }
804 } else if (ns->ms)
805 control |= NVME_RW_PRINFO_PRACT;
806
673 cmnd->rw.control = cpu_to_le16(control); 807 cmnd->rw.control = cpu_to_le16(control);
674 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 808 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
675 809
@@ -690,6 +824,19 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
690 struct nvme_iod *iod; 824 struct nvme_iod *iod;
691 enum dma_data_direction dma_dir; 825 enum dma_data_direction dma_dir;
692 826
827 /*
828 * If formated with metadata, require the block layer provide a buffer
829 * unless this namespace is formated such that the metadata can be
830 * stripped/generated by the controller with PRACT=1.
831 */
832 if (ns->ms && !blk_integrity_rq(req)) {
833 if (!(ns->pi_type && ns->ms == 8)) {
834 req->errors = -EFAULT;
835 blk_mq_complete_request(req);
836 return BLK_MQ_RQ_QUEUE_OK;
837 }
838 }
839
693 iod = nvme_alloc_iod(req, ns->dev, GFP_ATOMIC); 840 iod = nvme_alloc_iod(req, ns->dev, GFP_ATOMIC);
694 if (!iod) 841 if (!iod)
695 return BLK_MQ_RQ_QUEUE_BUSY; 842 return BLK_MQ_RQ_QUEUE_BUSY;
@@ -725,6 +872,21 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
725 iod->nents, dma_dir); 872 iod->nents, dma_dir);
726 goto retry_cmd; 873 goto retry_cmd;
727 } 874 }
875 if (blk_integrity_rq(req)) {
876 if (blk_rq_count_integrity_sg(req->q, req->bio) != 1)
877 goto error_cmd;
878
879 sg_init_table(iod->meta_sg, 1);
880 if (blk_rq_map_integrity_sg(
881 req->q, req->bio, iod->meta_sg) != 1)
882 goto error_cmd;
883
884 if (rq_data_dir(req))
885 nvme_dif_remap(req, nvme_dif_prep);
886
887 if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir))
888 goto error_cmd;
889 }
728 } 890 }
729 891
730 nvme_set_info(cmd, iod, req_completion); 892 nvme_set_info(cmd, iod, req_completion);
@@ -817,14 +979,6 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
817 return IRQ_WAKE_THREAD; 979 return IRQ_WAKE_THREAD;
818} 980}
819 981
820static void nvme_abort_cmd_info(struct nvme_queue *nvmeq, struct nvme_cmd_info *
821 cmd_info)
822{
823 spin_lock_irq(&nvmeq->q_lock);
824 cancel_cmd_info(cmd_info, NULL);
825 spin_unlock_irq(&nvmeq->q_lock);
826}
827
828struct sync_cmd_info { 982struct sync_cmd_info {
829 struct task_struct *task; 983 struct task_struct *task;
830 u32 result; 984 u32 result;
@@ -847,7 +1001,6 @@ static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
847static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd, 1001static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd,
848 u32 *result, unsigned timeout) 1002 u32 *result, unsigned timeout)
849{ 1003{
850 int ret;
851 struct sync_cmd_info cmdinfo; 1004 struct sync_cmd_info cmdinfo;
852 struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req); 1005 struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
853 struct nvme_queue *nvmeq = cmd_rq->nvmeq; 1006 struct nvme_queue *nvmeq = cmd_rq->nvmeq;
@@ -859,29 +1012,12 @@ static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd,
859 1012
860 nvme_set_info(cmd_rq, &cmdinfo, sync_completion); 1013 nvme_set_info(cmd_rq, &cmdinfo, sync_completion);
861 1014
862 set_current_state(TASK_KILLABLE); 1015 set_current_state(TASK_UNINTERRUPTIBLE);
863 ret = nvme_submit_cmd(nvmeq, cmd); 1016 nvme_submit_cmd(nvmeq, cmd);
864 if (ret) { 1017 schedule();
865 nvme_finish_cmd(nvmeq, req->tag, NULL);
866 set_current_state(TASK_RUNNING);
867 }
868 ret = schedule_timeout(timeout);
869
870 /*
871 * Ensure that sync_completion has either run, or that it will
872 * never run.
873 */
874 nvme_abort_cmd_info(nvmeq, blk_mq_rq_to_pdu(req));
875
876 /*
877 * We never got the completion
878 */
879 if (cmdinfo.status == -EINTR)
880 return -EINTR;
881 1018
882 if (result) 1019 if (result)
883 *result = cmdinfo.result; 1020 *result = cmdinfo.result;
884
885 return cmdinfo.status; 1021 return cmdinfo.status;
886} 1022}
887 1023
@@ -1158,29 +1294,18 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1158 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req); 1294 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
1159 struct nvme_queue *nvmeq = cmd->nvmeq; 1295 struct nvme_queue *nvmeq = cmd->nvmeq;
1160 1296
1161 /*
1162 * The aborted req will be completed on receiving the abort req.
1163 * We enable the timer again. If hit twice, it'll cause a device reset,
1164 * as the device then is in a faulty state.
1165 */
1166 int ret = BLK_EH_RESET_TIMER;
1167
1168 dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag, 1297 dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
1169 nvmeq->qid); 1298 nvmeq->qid);
1170
1171 spin_lock_irq(&nvmeq->q_lock); 1299 spin_lock_irq(&nvmeq->q_lock);
1172 if (!nvmeq->dev->initialized) { 1300 nvme_abort_req(req);
1173 /*
1174 * Force cancelled command frees the request, which requires we
1175 * return BLK_EH_NOT_HANDLED.
1176 */
1177 nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
1178 ret = BLK_EH_NOT_HANDLED;
1179 } else
1180 nvme_abort_req(req);
1181 spin_unlock_irq(&nvmeq->q_lock); 1301 spin_unlock_irq(&nvmeq->q_lock);
1182 1302
1183 return ret; 1303 /*
1304 * The aborted req will be completed on receiving the abort req.
1305 * We enable the timer again. If hit twice, it'll cause a device reset,
1306 * as the device then is in a faulty state.
1307 */
1308 return BLK_EH_RESET_TIMER;
1184} 1309}
1185 1310
1186static void nvme_free_queue(struct nvme_queue *nvmeq) 1311static void nvme_free_queue(struct nvme_queue *nvmeq)
@@ -1233,7 +1358,6 @@ static void nvme_clear_queue(struct nvme_queue *nvmeq)
1233 struct blk_mq_hw_ctx *hctx = nvmeq->hctx; 1358 struct blk_mq_hw_ctx *hctx = nvmeq->hctx;
1234 1359
1235 spin_lock_irq(&nvmeq->q_lock); 1360 spin_lock_irq(&nvmeq->q_lock);
1236 nvme_process_cq(nvmeq);
1237 if (hctx && hctx->tags) 1361 if (hctx && hctx->tags)
1238 blk_mq_tag_busy_iter(hctx, nvme_cancel_queue_ios, nvmeq); 1362 blk_mq_tag_busy_iter(hctx, nvme_cancel_queue_ios, nvmeq);
1239 spin_unlock_irq(&nvmeq->q_lock); 1363 spin_unlock_irq(&nvmeq->q_lock);
@@ -1256,7 +1380,10 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
1256 } 1380 }
1257 if (!qid && dev->admin_q) 1381 if (!qid && dev->admin_q)
1258 blk_mq_freeze_queue_start(dev->admin_q); 1382 blk_mq_freeze_queue_start(dev->admin_q);
1259 nvme_clear_queue(nvmeq); 1383
1384 spin_lock_irq(&nvmeq->q_lock);
1385 nvme_process_cq(nvmeq);
1386 spin_unlock_irq(&nvmeq->q_lock);
1260} 1387}
1261 1388
1262static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 1389static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
@@ -1875,13 +2002,24 @@ static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo)
1875 return 0; 2002 return 0;
1876} 2003}
1877 2004
2005static void nvme_config_discard(struct nvme_ns *ns)
2006{
2007 u32 logical_block_size = queue_logical_block_size(ns->queue);
2008 ns->queue->limits.discard_zeroes_data = 0;
2009 ns->queue->limits.discard_alignment = logical_block_size;
2010 ns->queue->limits.discard_granularity = logical_block_size;
2011 ns->queue->limits.max_discard_sectors = 0xffffffff;
2012 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
2013}
2014
1878static int nvme_revalidate_disk(struct gendisk *disk) 2015static int nvme_revalidate_disk(struct gendisk *disk)
1879{ 2016{
1880 struct nvme_ns *ns = disk->private_data; 2017 struct nvme_ns *ns = disk->private_data;
1881 struct nvme_dev *dev = ns->dev; 2018 struct nvme_dev *dev = ns->dev;
1882 struct nvme_id_ns *id; 2019 struct nvme_id_ns *id;
1883 dma_addr_t dma_addr; 2020 dma_addr_t dma_addr;
1884 int lbaf; 2021 int lbaf, pi_type, old_ms;
2022 unsigned short bs;
1885 2023
1886 id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr, 2024 id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
1887 GFP_KERNEL); 2025 GFP_KERNEL);
@@ -1890,16 +2028,51 @@ static int nvme_revalidate_disk(struct gendisk *disk)
1890 __func__); 2028 __func__);
1891 return 0; 2029 return 0;
1892 } 2030 }
2031 if (nvme_identify(dev, ns->ns_id, 0, dma_addr)) {
2032 dev_warn(&dev->pci_dev->dev,
2033 "identify failed ns:%d, setting capacity to 0\n",
2034 ns->ns_id);
2035 memset(id, 0, sizeof(*id));
2036 }
1893 2037
1894 if (nvme_identify(dev, ns->ns_id, 0, dma_addr)) 2038 old_ms = ns->ms;
1895 goto free; 2039 lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
1896
1897 lbaf = id->flbas & 0xf;
1898 ns->lba_shift = id->lbaf[lbaf].ds; 2040 ns->lba_shift = id->lbaf[lbaf].ds;
2041 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
2042
2043 /*
2044 * If identify namespace failed, use default 512 byte block size so
2045 * block layer can use before failing read/write for 0 capacity.
2046 */
2047 if (ns->lba_shift == 0)
2048 ns->lba_shift = 9;
2049 bs = 1 << ns->lba_shift;
2050
2051 /* XXX: PI implementation requires metadata equal t10 pi tuple size */
2052 pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
2053 id->dps & NVME_NS_DPS_PI_MASK : 0;
2054
2055 if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
2056 ns->ms != old_ms ||
2057 bs != queue_logical_block_size(disk->queue) ||
2058 (ns->ms && id->flbas & NVME_NS_FLBAS_META_EXT)))
2059 blk_integrity_unregister(disk);
2060
2061 ns->pi_type = pi_type;
2062 blk_queue_logical_block_size(ns->queue, bs);
2063
2064 if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) &&
2065 !(id->flbas & NVME_NS_FLBAS_META_EXT))
2066 nvme_init_integrity(ns);
2067
2068 if (id->ncap == 0 || (ns->ms && !blk_get_integrity(disk)))
2069 set_capacity(disk, 0);
2070 else
2071 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
2072
2073 if (dev->oncs & NVME_CTRL_ONCS_DSM)
2074 nvme_config_discard(ns);
1899 2075
1900 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
1901 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
1902 free:
1903 dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr); 2076 dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
1904 return 0; 2077 return 0;
1905} 2078}
@@ -1923,8 +2096,7 @@ static int nvme_kthread(void *data)
1923 spin_lock(&dev_list_lock); 2096 spin_lock(&dev_list_lock);
1924 list_for_each_entry_safe(dev, next, &dev_list, node) { 2097 list_for_each_entry_safe(dev, next, &dev_list, node) {
1925 int i; 2098 int i;
1926 if (readl(&dev->bar->csts) & NVME_CSTS_CFS && 2099 if (readl(&dev->bar->csts) & NVME_CSTS_CFS) {
1927 dev->initialized) {
1928 if (work_busy(&dev->reset_work)) 2100 if (work_busy(&dev->reset_work))
1929 continue; 2101 continue;
1930 list_del_init(&dev->node); 2102 list_del_init(&dev->node);
@@ -1956,30 +2128,16 @@ static int nvme_kthread(void *data)
1956 return 0; 2128 return 0;
1957} 2129}
1958 2130
1959static void nvme_config_discard(struct nvme_ns *ns) 2131static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
1960{
1961 u32 logical_block_size = queue_logical_block_size(ns->queue);
1962 ns->queue->limits.discard_zeroes_data = 0;
1963 ns->queue->limits.discard_alignment = logical_block_size;
1964 ns->queue->limits.discard_granularity = logical_block_size;
1965 ns->queue->limits.max_discard_sectors = 0xffffffff;
1966 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
1967}
1968
1969static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
1970 struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
1971{ 2132{
1972 struct nvme_ns *ns; 2133 struct nvme_ns *ns;
1973 struct gendisk *disk; 2134 struct gendisk *disk;
1974 int node = dev_to_node(&dev->pci_dev->dev); 2135 int node = dev_to_node(&dev->pci_dev->dev);
1975 int lbaf;
1976
1977 if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
1978 return NULL;
1979 2136
1980 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 2137 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
1981 if (!ns) 2138 if (!ns)
1982 return NULL; 2139 return;
2140
1983 ns->queue = blk_mq_init_queue(&dev->tagset); 2141 ns->queue = blk_mq_init_queue(&dev->tagset);
1984 if (IS_ERR(ns->queue)) 2142 if (IS_ERR(ns->queue))
1985 goto out_free_ns; 2143 goto out_free_ns;
@@ -1995,9 +2153,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
1995 2153
1996 ns->ns_id = nsid; 2154 ns->ns_id = nsid;
1997 ns->disk = disk; 2155 ns->disk = disk;
1998 lbaf = id->flbas & 0xf; 2156 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
1999 ns->lba_shift = id->lbaf[lbaf].ds; 2157 list_add_tail(&ns->list, &dev->namespaces);
2000 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); 2158
2001 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 2159 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
2002 if (dev->max_hw_sectors) 2160 if (dev->max_hw_sectors)
2003 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); 2161 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
@@ -2011,21 +2169,26 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
2011 disk->fops = &nvme_fops; 2169 disk->fops = &nvme_fops;
2012 disk->private_data = ns; 2170 disk->private_data = ns;
2013 disk->queue = ns->queue; 2171 disk->queue = ns->queue;
2014 disk->driverfs_dev = &dev->pci_dev->dev; 2172 disk->driverfs_dev = dev->device;
2015 disk->flags = GENHD_FL_EXT_DEVT; 2173 disk->flags = GENHD_FL_EXT_DEVT;
2016 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); 2174 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
2017 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
2018
2019 if (dev->oncs & NVME_CTRL_ONCS_DSM)
2020 nvme_config_discard(ns);
2021
2022 return ns;
2023 2175
2176 /*
2177 * Initialize capacity to 0 until we establish the namespace format and
2178 * setup integrity extentions if necessary. The revalidate_disk after
2179 * add_disk allows the driver to register with integrity if the format
2180 * requires it.
2181 */
2182 set_capacity(disk, 0);
2183 nvme_revalidate_disk(ns->disk);
2184 add_disk(ns->disk);
2185 if (ns->ms)
2186 revalidate_disk(ns->disk);
2187 return;
2024 out_free_queue: 2188 out_free_queue:
2025 blk_cleanup_queue(ns->queue); 2189 blk_cleanup_queue(ns->queue);
2026 out_free_ns: 2190 out_free_ns:
2027 kfree(ns); 2191 kfree(ns);
2028 return NULL;
2029} 2192}
2030 2193
2031static void nvme_create_io_queues(struct nvme_dev *dev) 2194static void nvme_create_io_queues(struct nvme_dev *dev)
@@ -2150,22 +2313,20 @@ static int nvme_dev_add(struct nvme_dev *dev)
2150 struct pci_dev *pdev = dev->pci_dev; 2313 struct pci_dev *pdev = dev->pci_dev;
2151 int res; 2314 int res;
2152 unsigned nn, i; 2315 unsigned nn, i;
2153 struct nvme_ns *ns;
2154 struct nvme_id_ctrl *ctrl; 2316 struct nvme_id_ctrl *ctrl;
2155 struct nvme_id_ns *id_ns;
2156 void *mem; 2317 void *mem;
2157 dma_addr_t dma_addr; 2318 dma_addr_t dma_addr;
2158 int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; 2319 int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
2159 2320
2160 mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL); 2321 mem = dma_alloc_coherent(&pdev->dev, 4096, &dma_addr, GFP_KERNEL);
2161 if (!mem) 2322 if (!mem)
2162 return -ENOMEM; 2323 return -ENOMEM;
2163 2324
2164 res = nvme_identify(dev, 0, 1, dma_addr); 2325 res = nvme_identify(dev, 0, 1, dma_addr);
2165 if (res) { 2326 if (res) {
2166 dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res); 2327 dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res);
2167 res = -EIO; 2328 dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr);
2168 goto out; 2329 return -EIO;
2169 } 2330 }
2170 2331
2171 ctrl = mem; 2332 ctrl = mem;
@@ -2191,6 +2352,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
2191 } else 2352 } else
2192 dev->max_hw_sectors = max_hw_sectors; 2353 dev->max_hw_sectors = max_hw_sectors;
2193 } 2354 }
2355 dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr);
2194 2356
2195 dev->tagset.ops = &nvme_mq_ops; 2357 dev->tagset.ops = &nvme_mq_ops;
2196 dev->tagset.nr_hw_queues = dev->online_queues - 1; 2358 dev->tagset.nr_hw_queues = dev->online_queues - 1;
@@ -2203,33 +2365,12 @@ static int nvme_dev_add(struct nvme_dev *dev)
2203 dev->tagset.driver_data = dev; 2365 dev->tagset.driver_data = dev;
2204 2366
2205 if (blk_mq_alloc_tag_set(&dev->tagset)) 2367 if (blk_mq_alloc_tag_set(&dev->tagset))
2206 goto out; 2368 return 0;
2207
2208 id_ns = mem;
2209 for (i = 1; i <= nn; i++) {
2210 res = nvme_identify(dev, i, 0, dma_addr);
2211 if (res)
2212 continue;
2213
2214 if (id_ns->ncap == 0)
2215 continue;
2216
2217 res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
2218 dma_addr + 4096, NULL);
2219 if (res)
2220 memset(mem + 4096, 0, 4096);
2221 2369
2222 ns = nvme_alloc_ns(dev, i, mem, mem + 4096); 2370 for (i = 1; i <= nn; i++)
2223 if (ns) 2371 nvme_alloc_ns(dev, i);
2224 list_add_tail(&ns->list, &dev->namespaces);
2225 }
2226 list_for_each_entry(ns, &dev->namespaces, list)
2227 add_disk(ns->disk);
2228 res = 0;
2229 2372
2230 out: 2373 return 0;
2231 dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
2232 return res;
2233} 2374}
2234 2375
2235static int nvme_dev_map(struct nvme_dev *dev) 2376static int nvme_dev_map(struct nvme_dev *dev)
@@ -2358,8 +2499,6 @@ static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
2358static void nvme_del_queue_end(struct nvme_queue *nvmeq) 2499static void nvme_del_queue_end(struct nvme_queue *nvmeq)
2359{ 2500{
2360 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; 2501 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
2361
2362 nvme_clear_queue(nvmeq);
2363 nvme_put_dq(dq); 2502 nvme_put_dq(dq);
2364} 2503}
2365 2504
@@ -2502,7 +2641,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
2502 int i; 2641 int i;
2503 u32 csts = -1; 2642 u32 csts = -1;
2504 2643
2505 dev->initialized = 0;
2506 nvme_dev_list_remove(dev); 2644 nvme_dev_list_remove(dev);
2507 2645
2508 if (dev->bar) { 2646 if (dev->bar) {
@@ -2513,7 +2651,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
2513 for (i = dev->queue_count - 1; i >= 0; i--) { 2651 for (i = dev->queue_count - 1; i >= 0; i--) {
2514 struct nvme_queue *nvmeq = dev->queues[i]; 2652 struct nvme_queue *nvmeq = dev->queues[i];
2515 nvme_suspend_queue(nvmeq); 2653 nvme_suspend_queue(nvmeq);
2516 nvme_clear_queue(nvmeq);
2517 } 2654 }
2518 } else { 2655 } else {
2519 nvme_disable_io_queues(dev); 2656 nvme_disable_io_queues(dev);
@@ -2521,6 +2658,9 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
2521 nvme_disable_queue(dev, 0); 2658 nvme_disable_queue(dev, 0);
2522 } 2659 }
2523 nvme_dev_unmap(dev); 2660 nvme_dev_unmap(dev);
2661
2662 for (i = dev->queue_count - 1; i >= 0; i--)
2663 nvme_clear_queue(dev->queues[i]);
2524} 2664}
2525 2665
2526static void nvme_dev_remove(struct nvme_dev *dev) 2666static void nvme_dev_remove(struct nvme_dev *dev)
@@ -2528,8 +2668,11 @@ static void nvme_dev_remove(struct nvme_dev *dev)
2528 struct nvme_ns *ns; 2668 struct nvme_ns *ns;
2529 2669
2530 list_for_each_entry(ns, &dev->namespaces, list) { 2670 list_for_each_entry(ns, &dev->namespaces, list) {
2531 if (ns->disk->flags & GENHD_FL_UP) 2671 if (ns->disk->flags & GENHD_FL_UP) {
2672 if (blk_get_integrity(ns->disk))
2673 blk_integrity_unregister(ns->disk);
2532 del_gendisk(ns->disk); 2674 del_gendisk(ns->disk);
2675 }
2533 if (!blk_queue_dying(ns->queue)) { 2676 if (!blk_queue_dying(ns->queue)) {
2534 blk_mq_abort_requeue_list(ns->queue); 2677 blk_mq_abort_requeue_list(ns->queue);
2535 blk_cleanup_queue(ns->queue); 2678 blk_cleanup_queue(ns->queue);
@@ -2611,6 +2754,7 @@ static void nvme_free_dev(struct kref *kref)
2611 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); 2754 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
2612 2755
2613 pci_dev_put(dev->pci_dev); 2756 pci_dev_put(dev->pci_dev);
2757 put_device(dev->device);
2614 nvme_free_namespaces(dev); 2758 nvme_free_namespaces(dev);
2615 nvme_release_instance(dev); 2759 nvme_release_instance(dev);
2616 blk_mq_free_tag_set(&dev->tagset); 2760 blk_mq_free_tag_set(&dev->tagset);
@@ -2622,11 +2766,27 @@ static void nvme_free_dev(struct kref *kref)
2622 2766
2623static int nvme_dev_open(struct inode *inode, struct file *f) 2767static int nvme_dev_open(struct inode *inode, struct file *f)
2624{ 2768{
2625 struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev, 2769 struct nvme_dev *dev;
2626 miscdev); 2770 int instance = iminor(inode);
2627 kref_get(&dev->kref); 2771 int ret = -ENODEV;
2628 f->private_data = dev; 2772
2629 return 0; 2773 spin_lock(&dev_list_lock);
2774 list_for_each_entry(dev, &dev_list, node) {
2775 if (dev->instance == instance) {
2776 if (!dev->admin_q) {
2777 ret = -EWOULDBLOCK;
2778 break;
2779 }
2780 if (!kref_get_unless_zero(&dev->kref))
2781 break;
2782 f->private_data = dev;
2783 ret = 0;
2784 break;
2785 }
2786 }
2787 spin_unlock(&dev_list_lock);
2788
2789 return ret;
2630} 2790}
2631 2791
2632static int nvme_dev_release(struct inode *inode, struct file *f) 2792static int nvme_dev_release(struct inode *inode, struct file *f)
@@ -2768,7 +2928,6 @@ static int nvme_dev_resume(struct nvme_dev *dev)
2768 nvme_unfreeze_queues(dev); 2928 nvme_unfreeze_queues(dev);
2769 nvme_set_irq_hints(dev); 2929 nvme_set_irq_hints(dev);
2770 } 2930 }
2771 dev->initialized = 1;
2772 return 0; 2931 return 0;
2773} 2932}
2774 2933
@@ -2799,6 +2958,7 @@ static void nvme_reset_workfn(struct work_struct *work)
2799 dev->reset_workfn(work); 2958 dev->reset_workfn(work);
2800} 2959}
2801 2960
2961static void nvme_async_probe(struct work_struct *work);
2802static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2962static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2803{ 2963{
2804 int node, result = -ENOMEM; 2964 int node, result = -ENOMEM;
@@ -2834,37 +2994,20 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2834 goto release; 2994 goto release;
2835 2995
2836 kref_init(&dev->kref); 2996 kref_init(&dev->kref);
2837 result = nvme_dev_start(dev); 2997 dev->device = device_create(nvme_class, &pdev->dev,
2838 if (result) 2998 MKDEV(nvme_char_major, dev->instance),
2999 dev, "nvme%d", dev->instance);
3000 if (IS_ERR(dev->device)) {
3001 result = PTR_ERR(dev->device);
2839 goto release_pools; 3002 goto release_pools;
3003 }
3004 get_device(dev->device);
2840 3005
2841 if (dev->online_queues > 1) 3006 INIT_WORK(&dev->probe_work, nvme_async_probe);
2842 result = nvme_dev_add(dev); 3007 schedule_work(&dev->probe_work);
2843 if (result)
2844 goto shutdown;
2845
2846 scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance);
2847 dev->miscdev.minor = MISC_DYNAMIC_MINOR;
2848 dev->miscdev.parent = &pdev->dev;
2849 dev->miscdev.name = dev->name;
2850 dev->miscdev.fops = &nvme_dev_fops;
2851 result = misc_register(&dev->miscdev);
2852 if (result)
2853 goto remove;
2854
2855 nvme_set_irq_hints(dev);
2856
2857 dev->initialized = 1;
2858 return 0; 3008 return 0;
2859 3009
2860 remove:
2861 nvme_dev_remove(dev);
2862 nvme_dev_remove_admin(dev);
2863 nvme_free_namespaces(dev);
2864 shutdown:
2865 nvme_dev_shutdown(dev);
2866 release_pools: 3010 release_pools:
2867 nvme_free_queues(dev, 0);
2868 nvme_release_prp_pools(dev); 3011 nvme_release_prp_pools(dev);
2869 release: 3012 release:
2870 nvme_release_instance(dev); 3013 nvme_release_instance(dev);
@@ -2877,6 +3020,29 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2877 return result; 3020 return result;
2878} 3021}
2879 3022
3023static void nvme_async_probe(struct work_struct *work)
3024{
3025 struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
3026 int result;
3027
3028 result = nvme_dev_start(dev);
3029 if (result)
3030 goto reset;
3031
3032 if (dev->online_queues > 1)
3033 result = nvme_dev_add(dev);
3034 if (result)
3035 goto reset;
3036
3037 nvme_set_irq_hints(dev);
3038 return;
3039 reset:
3040 if (!work_busy(&dev->reset_work)) {
3041 dev->reset_workfn = nvme_reset_failed_dev;
3042 queue_work(nvme_workq, &dev->reset_work);
3043 }
3044}
3045
2880static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) 3046static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
2881{ 3047{
2882 struct nvme_dev *dev = pci_get_drvdata(pdev); 3048 struct nvme_dev *dev = pci_get_drvdata(pdev);
@@ -2902,11 +3068,12 @@ static void nvme_remove(struct pci_dev *pdev)
2902 spin_unlock(&dev_list_lock); 3068 spin_unlock(&dev_list_lock);
2903 3069
2904 pci_set_drvdata(pdev, NULL); 3070 pci_set_drvdata(pdev, NULL);
3071 flush_work(&dev->probe_work);
2905 flush_work(&dev->reset_work); 3072 flush_work(&dev->reset_work);
2906 misc_deregister(&dev->miscdev);
2907 nvme_dev_shutdown(dev); 3073 nvme_dev_shutdown(dev);
2908 nvme_dev_remove(dev); 3074 nvme_dev_remove(dev);
2909 nvme_dev_remove_admin(dev); 3075 nvme_dev_remove_admin(dev);
3076 device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
2910 nvme_free_queues(dev, 0); 3077 nvme_free_queues(dev, 0);
2911 nvme_release_prp_pools(dev); 3078 nvme_release_prp_pools(dev);
2912 kref_put(&dev->kref, nvme_free_dev); 3079 kref_put(&dev->kref, nvme_free_dev);
@@ -2990,11 +3157,26 @@ static int __init nvme_init(void)
2990 else if (result > 0) 3157 else if (result > 0)
2991 nvme_major = result; 3158 nvme_major = result;
2992 3159
3160 result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
3161 &nvme_dev_fops);
3162 if (result < 0)
3163 goto unregister_blkdev;
3164 else if (result > 0)
3165 nvme_char_major = result;
3166
3167 nvme_class = class_create(THIS_MODULE, "nvme");
3168 if (!nvme_class)
3169 goto unregister_chrdev;
3170
2993 result = pci_register_driver(&nvme_driver); 3171 result = pci_register_driver(&nvme_driver);
2994 if (result) 3172 if (result)
2995 goto unregister_blkdev; 3173 goto destroy_class;
2996 return 0; 3174 return 0;
2997 3175
3176 destroy_class:
3177 class_destroy(nvme_class);
3178 unregister_chrdev:
3179 __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
2998 unregister_blkdev: 3180 unregister_blkdev:
2999 unregister_blkdev(nvme_major, "nvme"); 3181 unregister_blkdev(nvme_major, "nvme");
3000 kill_workq: 3182 kill_workq:
@@ -3005,9 +3187,10 @@ static int __init nvme_init(void)
3005static void __exit nvme_exit(void) 3187static void __exit nvme_exit(void)
3006{ 3188{
3007 pci_unregister_driver(&nvme_driver); 3189 pci_unregister_driver(&nvme_driver);
3008 unregister_hotcpu_notifier(&nvme_nb);
3009 unregister_blkdev(nvme_major, "nvme"); 3190 unregister_blkdev(nvme_major, "nvme");
3010 destroy_workqueue(nvme_workq); 3191 destroy_workqueue(nvme_workq);
3192 class_destroy(nvme_class);
3193 __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
3011 BUG_ON(nvme_thread && !IS_ERR(nvme_thread)); 3194 BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
3012 _nvme_check_size(); 3195 _nvme_check_size();
3013} 3196}
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 5e78568026c3..e10196e0182d 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -779,10 +779,8 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
779 struct nvme_dev *dev = ns->dev; 779 struct nvme_dev *dev = ns->dev;
780 dma_addr_t dma_addr; 780 dma_addr_t dma_addr;
781 void *mem; 781 void *mem;
782 struct nvme_id_ctrl *id_ctrl;
783 int res = SNTI_TRANSLATION_SUCCESS; 782 int res = SNTI_TRANSLATION_SUCCESS;
784 int nvme_sc; 783 int nvme_sc;
785 u8 ieee[4];
786 int xfer_len; 784 int xfer_len;
787 __be32 tmp_id = cpu_to_be32(ns->ns_id); 785 __be32 tmp_id = cpu_to_be32(ns->ns_id);
788 786
@@ -793,46 +791,60 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
793 goto out_dma; 791 goto out_dma;
794 } 792 }
795 793
796 /* nvme controller identify */ 794 memset(inq_response, 0, alloc_len);
797 nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
798 res = nvme_trans_status_code(hdr, nvme_sc);
799 if (res)
800 goto out_free;
801 if (nvme_sc) {
802 res = nvme_sc;
803 goto out_free;
804 }
805 id_ctrl = mem;
806
807 /* Since SCSI tried to save 4 bits... [SPC-4(r34) Table 591] */
808 ieee[0] = id_ctrl->ieee[0] << 4;
809 ieee[1] = id_ctrl->ieee[0] >> 4 | id_ctrl->ieee[1] << 4;
810 ieee[2] = id_ctrl->ieee[1] >> 4 | id_ctrl->ieee[2] << 4;
811 ieee[3] = id_ctrl->ieee[2] >> 4;
812
813 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
814 inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */ 795 inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */
815 inq_response[3] = 20; /* Page Length */ 796 if (readl(&dev->bar->vs) >= NVME_VS(1, 1)) {
816 /* Designation Descriptor start */ 797 struct nvme_id_ns *id_ns = mem;
817 inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */ 798 void *eui = id_ns->eui64;
818 inq_response[5] = 0x03; /* PIV=0b | Asso=00b | Designator Type=3h */ 799 int len = sizeof(id_ns->eui64);
819 inq_response[6] = 0x00; /* Rsvd */
820 inq_response[7] = 16; /* Designator Length */
821 /* Designator start */
822 inq_response[8] = 0x60 | ieee[3]; /* NAA=6h | IEEE ID MSB, High nibble*/
823 inq_response[9] = ieee[2]; /* IEEE ID */
824 inq_response[10] = ieee[1]; /* IEEE ID */
825 inq_response[11] = ieee[0]; /* IEEE ID| Vendor Specific ID... */
826 inq_response[12] = (dev->pci_dev->vendor & 0xFF00) >> 8;
827 inq_response[13] = (dev->pci_dev->vendor & 0x00FF);
828 inq_response[14] = dev->serial[0];
829 inq_response[15] = dev->serial[1];
830 inq_response[16] = dev->model[0];
831 inq_response[17] = dev->model[1];
832 memcpy(&inq_response[18], &tmp_id, sizeof(u32));
833 /* Last 2 bytes are zero */
834 800
835 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); 801 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
802 res = nvme_trans_status_code(hdr, nvme_sc);
803 if (res)
804 goto out_free;
805 if (nvme_sc) {
806 res = nvme_sc;
807 goto out_free;
808 }
809
810 if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) {
811 if (bitmap_empty(eui, len * 8)) {
812 eui = id_ns->nguid;
813 len = sizeof(id_ns->nguid);
814 }
815 }
816 if (bitmap_empty(eui, len * 8))
817 goto scsi_string;
818
819 inq_response[3] = 4 + len; /* Page Length */
820 /* Designation Descriptor start */
821 inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */
822 inq_response[5] = 0x02; /* PIV=0b | Asso=00b | Designator Type=2h */
823 inq_response[6] = 0x00; /* Rsvd */
824 inq_response[7] = len; /* Designator Length */
825 memcpy(&inq_response[8], eui, len);
826 } else {
827 scsi_string:
828 if (alloc_len < 72) {
829 res = nvme_trans_completion(hdr,
830 SAM_STAT_CHECK_CONDITION,
831 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
832 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
833 goto out_free;
834 }
835 inq_response[3] = 0x48; /* Page Length */
836 /* Designation Descriptor start */
837 inq_response[4] = 0x03; /* Proto ID=0h | Code set=3h */
838 inq_response[5] = 0x08; /* PIV=0b | Asso=00b | Designator Type=8h */
839 inq_response[6] = 0x00; /* Rsvd */
840 inq_response[7] = 0x44; /* Designator Length */
841
842 sprintf(&inq_response[8], "%04x", dev->pci_dev->vendor);
843 memcpy(&inq_response[12], dev->model, sizeof(dev->model));
844 sprintf(&inq_response[52], "%04x", tmp_id);
845 memcpy(&inq_response[56], dev->serial, sizeof(dev->serial));
846 }
847 xfer_len = alloc_len;
836 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); 848 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
837 849
838 out_free: 850 out_free:
@@ -1600,7 +1612,7 @@ static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
1600 /* 10 Byte CDB */ 1612 /* 10 Byte CDB */
1601 *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) + 1613 *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) +
1602 parm_list[MODE_SELECT_10_BD_OFFSET + 1]; 1614 parm_list[MODE_SELECT_10_BD_OFFSET + 1];
1603 *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] && 1615 *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &
1604 MODE_SELECT_10_LLBAA_MASK; 1616 MODE_SELECT_10_LLBAA_MASK;
1605 } else { 1617 } else {
1606 /* 6 Byte CDB */ 1618 /* 6 Byte CDB */
@@ -2222,7 +2234,7 @@ static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2222 page_code = GET_INQ_PAGE_CODE(cmd); 2234 page_code = GET_INQ_PAGE_CODE(cmd);
2223 alloc_len = GET_INQ_ALLOC_LENGTH(cmd); 2235 alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
2224 2236
2225 inq_response = kmalloc(STANDARD_INQUIRY_LENGTH, GFP_KERNEL); 2237 inq_response = kmalloc(alloc_len, GFP_KERNEL);
2226 if (inq_response == NULL) { 2238 if (inq_response == NULL) {
2227 res = -ENOMEM; 2239 res = -ENOMEM;
2228 goto out_mem; 2240 goto out_mem;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 8a86b62466f7..b40af3203089 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -38,6 +38,7 @@
38#include <linux/kernel.h> 38#include <linux/kernel.h>
39#include <linux/device.h> 39#include <linux/device.h>
40#include <linux/module.h> 40#include <linux/module.h>
41#include <linux/blk-mq.h>
41#include <linux/fs.h> 42#include <linux/fs.h>
42#include <linux/blkdev.h> 43#include <linux/blkdev.h>
43#include <linux/slab.h> 44#include <linux/slab.h>
@@ -340,9 +341,7 @@ struct rbd_device {
340 341
341 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */ 342 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
342 343
343 struct list_head rq_queue; /* incoming rq queue */
344 spinlock_t lock; /* queue, flags, open_count */ 344 spinlock_t lock; /* queue, flags, open_count */
345 struct work_struct rq_work;
346 345
347 struct rbd_image_header header; 346 struct rbd_image_header header;
348 unsigned long flags; /* possibly lock protected */ 347 unsigned long flags; /* possibly lock protected */
@@ -360,6 +359,9 @@ struct rbd_device {
360 atomic_t parent_ref; 359 atomic_t parent_ref;
361 struct rbd_device *parent; 360 struct rbd_device *parent;
362 361
362 /* Block layer tags. */
363 struct blk_mq_tag_set tag_set;
364
363 /* protects updating the header */ 365 /* protects updating the header */
364 struct rw_semaphore header_rwsem; 366 struct rw_semaphore header_rwsem;
365 367
@@ -1817,7 +1819,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1817 1819
1818 /* 1820 /*
1819 * We support a 64-bit length, but ultimately it has to be 1821 * We support a 64-bit length, but ultimately it has to be
1820 * passed to blk_end_request(), which takes an unsigned int. 1822 * passed to the block layer, which just supports a 32-bit
1823 * length field.
1821 */ 1824 */
1822 obj_request->xferred = osd_req->r_reply_op_len[0]; 1825 obj_request->xferred = osd_req->r_reply_op_len[0];
1823 rbd_assert(obj_request->xferred < (u64)UINT_MAX); 1826 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
@@ -2275,7 +2278,10 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2275 more = obj_request->which < img_request->obj_request_count - 1; 2278 more = obj_request->which < img_request->obj_request_count - 1;
2276 } else { 2279 } else {
2277 rbd_assert(img_request->rq != NULL); 2280 rbd_assert(img_request->rq != NULL);
2278 more = blk_end_request(img_request->rq, result, xferred); 2281
2282 more = blk_update_request(img_request->rq, result, xferred);
2283 if (!more)
2284 __blk_mq_end_request(img_request->rq, result);
2279 } 2285 }
2280 2286
2281 return more; 2287 return more;
@@ -3304,8 +3310,10 @@ out:
3304 return ret; 3310 return ret;
3305} 3311}
3306 3312
3307static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq) 3313static void rbd_queue_workfn(struct work_struct *work)
3308{ 3314{
3315 struct request *rq = blk_mq_rq_from_pdu(work);
3316 struct rbd_device *rbd_dev = rq->q->queuedata;
3309 struct rbd_img_request *img_request; 3317 struct rbd_img_request *img_request;
3310 struct ceph_snap_context *snapc = NULL; 3318 struct ceph_snap_context *snapc = NULL;
3311 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT; 3319 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
@@ -3314,6 +3322,13 @@ static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq)
3314 u64 mapping_size; 3322 u64 mapping_size;
3315 int result; 3323 int result;
3316 3324
3325 if (rq->cmd_type != REQ_TYPE_FS) {
3326 dout("%s: non-fs request type %d\n", __func__,
3327 (int) rq->cmd_type);
3328 result = -EIO;
3329 goto err;
3330 }
3331
3317 if (rq->cmd_flags & REQ_DISCARD) 3332 if (rq->cmd_flags & REQ_DISCARD)
3318 op_type = OBJ_OP_DISCARD; 3333 op_type = OBJ_OP_DISCARD;
3319 else if (rq->cmd_flags & REQ_WRITE) 3334 else if (rq->cmd_flags & REQ_WRITE)
@@ -3359,6 +3374,8 @@ static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq)
3359 goto err_rq; /* Shouldn't happen */ 3374 goto err_rq; /* Shouldn't happen */
3360 } 3375 }
3361 3376
3377 blk_mq_start_request(rq);
3378
3362 down_read(&rbd_dev->header_rwsem); 3379 down_read(&rbd_dev->header_rwsem);
3363 mapping_size = rbd_dev->mapping.size; 3380 mapping_size = rbd_dev->mapping.size;
3364 if (op_type != OBJ_OP_READ) { 3381 if (op_type != OBJ_OP_READ) {
@@ -3404,53 +3421,18 @@ err_rq:
3404 rbd_warn(rbd_dev, "%s %llx at %llx result %d", 3421 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
3405 obj_op_name(op_type), length, offset, result); 3422 obj_op_name(op_type), length, offset, result);
3406 ceph_put_snap_context(snapc); 3423 ceph_put_snap_context(snapc);
3407 blk_end_request_all(rq, result); 3424err:
3425 blk_mq_end_request(rq, result);
3408} 3426}
3409 3427
3410static void rbd_request_workfn(struct work_struct *work) 3428static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
3429 const struct blk_mq_queue_data *bd)
3411{ 3430{
3412 struct rbd_device *rbd_dev = 3431 struct request *rq = bd->rq;
3413 container_of(work, struct rbd_device, rq_work); 3432 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3414 struct request *rq, *next;
3415 LIST_HEAD(requests);
3416
3417 spin_lock_irq(&rbd_dev->lock); /* rq->q->queue_lock */
3418 list_splice_init(&rbd_dev->rq_queue, &requests);
3419 spin_unlock_irq(&rbd_dev->lock);
3420 3433
3421 list_for_each_entry_safe(rq, next, &requests, queuelist) { 3434 queue_work(rbd_wq, work);
3422 list_del_init(&rq->queuelist); 3435 return BLK_MQ_RQ_QUEUE_OK;
3423 rbd_handle_request(rbd_dev, rq);
3424 }
3425}
3426
3427/*
3428 * Called with q->queue_lock held and interrupts disabled, possibly on
3429 * the way to schedule(). Do not sleep here!
3430 */
3431static void rbd_request_fn(struct request_queue *q)
3432{
3433 struct rbd_device *rbd_dev = q->queuedata;
3434 struct request *rq;
3435 int queued = 0;
3436
3437 rbd_assert(rbd_dev);
3438
3439 while ((rq = blk_fetch_request(q))) {
3440 /* Ignore any non-FS requests that filter through. */
3441 if (rq->cmd_type != REQ_TYPE_FS) {
3442 dout("%s: non-fs request type %d\n", __func__,
3443 (int) rq->cmd_type);
3444 __blk_end_request_all(rq, 0);
3445 continue;
3446 }
3447
3448 list_add_tail(&rq->queuelist, &rbd_dev->rq_queue);
3449 queued++;
3450 }
3451
3452 if (queued)
3453 queue_work(rbd_wq, &rbd_dev->rq_work);
3454} 3436}
3455 3437
3456/* 3438/*
@@ -3511,6 +3493,7 @@ static void rbd_free_disk(struct rbd_device *rbd_dev)
3511 del_gendisk(disk); 3493 del_gendisk(disk);
3512 if (disk->queue) 3494 if (disk->queue)
3513 blk_cleanup_queue(disk->queue); 3495 blk_cleanup_queue(disk->queue);
3496 blk_mq_free_tag_set(&rbd_dev->tag_set);
3514 } 3497 }
3515 put_disk(disk); 3498 put_disk(disk);
3516} 3499}
@@ -3694,7 +3677,7 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3694 3677
3695 ret = rbd_dev_header_info(rbd_dev); 3678 ret = rbd_dev_header_info(rbd_dev);
3696 if (ret) 3679 if (ret)
3697 return ret; 3680 goto out;
3698 3681
3699 /* 3682 /*
3700 * If there is a parent, see if it has disappeared due to the 3683 * If there is a parent, see if it has disappeared due to the
@@ -3703,30 +3686,46 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3703 if (rbd_dev->parent) { 3686 if (rbd_dev->parent) {
3704 ret = rbd_dev_v2_parent_info(rbd_dev); 3687 ret = rbd_dev_v2_parent_info(rbd_dev);
3705 if (ret) 3688 if (ret)
3706 return ret; 3689 goto out;
3707 } 3690 }
3708 3691
3709 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) { 3692 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
3710 if (rbd_dev->mapping.size != rbd_dev->header.image_size) 3693 rbd_dev->mapping.size = rbd_dev->header.image_size;
3711 rbd_dev->mapping.size = rbd_dev->header.image_size;
3712 } else { 3694 } else {
3713 /* validate mapped snapshot's EXISTS flag */ 3695 /* validate mapped snapshot's EXISTS flag */
3714 rbd_exists_validate(rbd_dev); 3696 rbd_exists_validate(rbd_dev);
3715 } 3697 }
3716 3698
3699out:
3717 up_write(&rbd_dev->header_rwsem); 3700 up_write(&rbd_dev->header_rwsem);
3718 3701 if (!ret && mapping_size != rbd_dev->mapping.size)
3719 if (mapping_size != rbd_dev->mapping.size)
3720 rbd_dev_update_size(rbd_dev); 3702 rbd_dev_update_size(rbd_dev);
3721 3703
3704 return ret;
3705}
3706
3707static int rbd_init_request(void *data, struct request *rq,
3708 unsigned int hctx_idx, unsigned int request_idx,
3709 unsigned int numa_node)
3710{
3711 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3712
3713 INIT_WORK(work, rbd_queue_workfn);
3722 return 0; 3714 return 0;
3723} 3715}
3724 3716
3717static struct blk_mq_ops rbd_mq_ops = {
3718 .queue_rq = rbd_queue_rq,
3719 .map_queue = blk_mq_map_queue,
3720 .init_request = rbd_init_request,
3721};
3722
3725static int rbd_init_disk(struct rbd_device *rbd_dev) 3723static int rbd_init_disk(struct rbd_device *rbd_dev)
3726{ 3724{
3727 struct gendisk *disk; 3725 struct gendisk *disk;
3728 struct request_queue *q; 3726 struct request_queue *q;
3729 u64 segment_size; 3727 u64 segment_size;
3728 int err;
3730 3729
3731 /* create gendisk info */ 3730 /* create gendisk info */
3732 disk = alloc_disk(single_major ? 3731 disk = alloc_disk(single_major ?
@@ -3744,10 +3743,25 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
3744 disk->fops = &rbd_bd_ops; 3743 disk->fops = &rbd_bd_ops;
3745 disk->private_data = rbd_dev; 3744 disk->private_data = rbd_dev;
3746 3745
3747 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock); 3746 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
3748 if (!q) 3747 rbd_dev->tag_set.ops = &rbd_mq_ops;
3748 rbd_dev->tag_set.queue_depth = BLKDEV_MAX_RQ;
3749 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
3750 rbd_dev->tag_set.flags =
3751 BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
3752 rbd_dev->tag_set.nr_hw_queues = 1;
3753 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
3754
3755 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
3756 if (err)
3749 goto out_disk; 3757 goto out_disk;
3750 3758
3759 q = blk_mq_init_queue(&rbd_dev->tag_set);
3760 if (IS_ERR(q)) {
3761 err = PTR_ERR(q);
3762 goto out_tag_set;
3763 }
3764
3751 /* We use the default size, but let's be explicit about it. */ 3765 /* We use the default size, but let's be explicit about it. */
3752 blk_queue_physical_block_size(q, SECTOR_SIZE); 3766 blk_queue_physical_block_size(q, SECTOR_SIZE);
3753 3767
@@ -3773,10 +3787,11 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
3773 rbd_dev->disk = disk; 3787 rbd_dev->disk = disk;
3774 3788
3775 return 0; 3789 return 0;
3790out_tag_set:
3791 blk_mq_free_tag_set(&rbd_dev->tag_set);
3776out_disk: 3792out_disk:
3777 put_disk(disk); 3793 put_disk(disk);
3778 3794 return err;
3779 return -ENOMEM;
3780} 3795}
3781 3796
3782/* 3797/*
@@ -4033,8 +4048,6 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4033 return NULL; 4048 return NULL;
4034 4049
4035 spin_lock_init(&rbd_dev->lock); 4050 spin_lock_init(&rbd_dev->lock);
4036 INIT_LIST_HEAD(&rbd_dev->rq_queue);
4037 INIT_WORK(&rbd_dev->rq_work, rbd_request_workfn);
4038 rbd_dev->flags = 0; 4051 rbd_dev->flags = 0;
4039 atomic_set(&rbd_dev->parent_ref, 0); 4052 atomic_set(&rbd_dev->parent_ref, 0);
4040 INIT_LIST_HEAD(&rbd_dev->node); 4053 INIT_LIST_HEAD(&rbd_dev->node);
@@ -4274,32 +4287,22 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4274 } 4287 }
4275 4288
4276 /* 4289 /*
4277 * We always update the parent overlap. If it's zero we 4290 * We always update the parent overlap. If it's zero we issue
4278 * treat it specially. 4291 * a warning, as we will proceed as if there was no parent.
4279 */ 4292 */
4280 rbd_dev->parent_overlap = overlap;
4281 if (!overlap) { 4293 if (!overlap) {
4282
4283 /* A null parent_spec indicates it's the initial probe */
4284
4285 if (parent_spec) { 4294 if (parent_spec) {
4286 /* 4295 /* refresh, careful to warn just once */
4287 * The overlap has become zero, so the clone 4296 if (rbd_dev->parent_overlap)
4288 * must have been resized down to 0 at some 4297 rbd_warn(rbd_dev,
4289 * point. Treat this the same as a flatten. 4298 "clone now standalone (overlap became 0)");
4290 */
4291 rbd_dev_parent_put(rbd_dev);
4292 pr_info("%s: clone image now standalone\n",
4293 rbd_dev->disk->disk_name);
4294 } else { 4299 } else {
4295 /* 4300 /* initial probe */
4296 * For the initial probe, if we find the 4301 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
4297 * overlap is zero we just pretend there was
4298 * no parent image.
4299 */
4300 rbd_warn(rbd_dev, "ignoring parent with overlap 0");
4301 } 4302 }
4302 } 4303 }
4304 rbd_dev->parent_overlap = overlap;
4305
4303out: 4306out:
4304 ret = 0; 4307 ret = 0;
4305out_err: 4308out_err:
@@ -4771,36 +4774,6 @@ static inline size_t next_token(const char **buf)
4771} 4774}
4772 4775
4773/* 4776/*
4774 * Finds the next token in *buf, and if the provided token buffer is
4775 * big enough, copies the found token into it. The result, if
4776 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4777 * must be terminated with '\0' on entry.
4778 *
4779 * Returns the length of the token found (not including the '\0').
4780 * Return value will be 0 if no token is found, and it will be >=
4781 * token_size if the token would not fit.
4782 *
4783 * The *buf pointer will be updated to point beyond the end of the
4784 * found token. Note that this occurs even if the token buffer is
4785 * too small to hold it.
4786 */
4787static inline size_t copy_token(const char **buf,
4788 char *token,
4789 size_t token_size)
4790{
4791 size_t len;
4792
4793 len = next_token(buf);
4794 if (len < token_size) {
4795 memcpy(token, *buf, len);
4796 *(token + len) = '\0';
4797 }
4798 *buf += len;
4799
4800 return len;
4801}
4802
4803/*
4804 * Finds the next token in *buf, dynamically allocates a buffer big 4777 * Finds the next token in *buf, dynamically allocates a buffer big
4805 * enough to hold a copy of it, and copies the token into the new 4778 * enough to hold a copy of it, and copies the token into the new
4806 * buffer. The copy is guaranteed to be terminated with '\0'. Note 4779 * buffer. The copy is guaranteed to be terminated with '\0'. Note
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index cdfbd21e3597..655e570b9b31 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -28,8 +28,7 @@ struct virtio_blk_vq {
28 char name[VQ_NAME_LEN]; 28 char name[VQ_NAME_LEN];
29} ____cacheline_aligned_in_smp; 29} ____cacheline_aligned_in_smp;
30 30
31struct virtio_blk 31struct virtio_blk {
32{
33 struct virtio_device *vdev; 32 struct virtio_device *vdev;
34 33
35 /* The disk structure for the kernel. */ 34 /* The disk structure for the kernel. */
@@ -52,8 +51,7 @@ struct virtio_blk
52 struct virtio_blk_vq *vqs; 51 struct virtio_blk_vq *vqs;
53}; 52};
54 53
55struct virtblk_req 54struct virtblk_req {
56{
57 struct request *req; 55 struct request *req;
58 struct virtio_blk_outhdr out_hdr; 56 struct virtio_blk_outhdr out_hdr;
59 struct virtio_scsi_inhdr in_hdr; 57 struct virtio_scsi_inhdr in_hdr;
@@ -575,6 +573,12 @@ static int virtblk_probe(struct virtio_device *vdev)
575 u16 min_io_size; 573 u16 min_io_size;
576 u8 physical_block_exp, alignment_offset; 574 u8 physical_block_exp, alignment_offset;
577 575
576 if (!vdev->config->get) {
577 dev_err(&vdev->dev, "%s failure: config access disabled\n",
578 __func__);
579 return -EINVAL;
580 }
581
578 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS), 582 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
579 GFP_KERNEL); 583 GFP_KERNEL);
580 if (err < 0) 584 if (err < 0)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 8e233edd7a09..871bd3550cb0 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -528,7 +528,7 @@ out_cleanup:
528static inline void update_used_max(struct zram *zram, 528static inline void update_used_max(struct zram *zram,
529 const unsigned long pages) 529 const unsigned long pages)
530{ 530{
531 int old_max, cur_max; 531 unsigned long old_max, cur_max;
532 532
533 old_max = atomic_long_read(&zram->stats.max_used_pages); 533 old_max = atomic_long_read(&zram->stats.max_used_pages);
534 534
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 3ca2e1bf7bfa..8c1bf6190533 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -273,6 +273,7 @@ static const struct usb_device_id blacklist_table[] = {
273 { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL }, 273 { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
274 274
275 /* Intel Bluetooth devices */ 275 /* Intel Bluetooth devices */
276 { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
276 { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL }, 277 { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
277 { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL }, 278 { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
278 { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW }, 279 { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index ec318bf434a6..1786574536b2 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -157,12 +157,16 @@ static int ipmi_release(struct inode *inode, struct file *file)
157{ 157{
158 struct ipmi_file_private *priv = file->private_data; 158 struct ipmi_file_private *priv = file->private_data;
159 int rv; 159 int rv;
160 struct ipmi_recv_msg *msg, *next;
160 161
161 rv = ipmi_destroy_user(priv->user); 162 rv = ipmi_destroy_user(priv->user);
162 if (rv) 163 if (rv)
163 return rv; 164 return rv;
164 165
165 /* FIXME - free the messages in the list. */ 166 list_for_each_entry_safe(msg, next, &priv->recv_msgs, link)
167 ipmi_free_recv_msg(msg);
168
169
166 kfree(priv); 170 kfree(priv);
167 171
168 return 0; 172 return 0;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 6b65fa4e0c55..9bb592872532 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -1483,14 +1483,10 @@ static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1483 smi_msg->msgid = msgid; 1483 smi_msg->msgid = msgid;
1484} 1484}
1485 1485
1486static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers, 1486static struct ipmi_smi_msg *smi_add_send_msg(ipmi_smi_t intf,
1487 struct ipmi_smi_msg *smi_msg, int priority) 1487 struct ipmi_smi_msg *smi_msg,
1488 int priority)
1488{ 1489{
1489 int run_to_completion = intf->run_to_completion;
1490 unsigned long flags;
1491
1492 if (!run_to_completion)
1493 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1494 if (intf->curr_msg) { 1490 if (intf->curr_msg) {
1495 if (priority > 0) 1491 if (priority > 0)
1496 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs); 1492 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
@@ -1500,8 +1496,25 @@ static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers,
1500 } else { 1496 } else {
1501 intf->curr_msg = smi_msg; 1497 intf->curr_msg = smi_msg;
1502 } 1498 }
1503 if (!run_to_completion) 1499
1500 return smi_msg;
1501}
1502
1503
1504static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers,
1505 struct ipmi_smi_msg *smi_msg, int priority)
1506{
1507 int run_to_completion = intf->run_to_completion;
1508
1509 if (run_to_completion) {
1510 smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1511 } else {
1512 unsigned long flags;
1513
1514 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1515 smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1504 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 1516 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1517 }
1505 1518
1506 if (smi_msg) 1519 if (smi_msg)
1507 handlers->sender(intf->send_info, smi_msg); 1520 handlers->sender(intf->send_info, smi_msg);
@@ -1985,7 +1998,9 @@ static int smi_ipmb_proc_show(struct seq_file *m, void *v)
1985 seq_printf(m, "%x", intf->channels[0].address); 1998 seq_printf(m, "%x", intf->channels[0].address);
1986 for (i = 1; i < IPMI_MAX_CHANNELS; i++) 1999 for (i = 1; i < IPMI_MAX_CHANNELS; i++)
1987 seq_printf(m, " %x", intf->channels[i].address); 2000 seq_printf(m, " %x", intf->channels[i].address);
1988 return seq_putc(m, '\n'); 2001 seq_putc(m, '\n');
2002
2003 return seq_has_overflowed(m);
1989} 2004}
1990 2005
1991static int smi_ipmb_proc_open(struct inode *inode, struct file *file) 2006static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
@@ -2004,9 +2019,11 @@ static int smi_version_proc_show(struct seq_file *m, void *v)
2004{ 2019{
2005 ipmi_smi_t intf = m->private; 2020 ipmi_smi_t intf = m->private;
2006 2021
2007 return seq_printf(m, "%u.%u\n", 2022 seq_printf(m, "%u.%u\n",
2008 ipmi_version_major(&intf->bmc->id), 2023 ipmi_version_major(&intf->bmc->id),
2009 ipmi_version_minor(&intf->bmc->id)); 2024 ipmi_version_minor(&intf->bmc->id));
2025
2026 return seq_has_overflowed(m);
2010} 2027}
2011 2028
2012static int smi_version_proc_open(struct inode *inode, struct file *file) 2029static int smi_version_proc_open(struct inode *inode, struct file *file)
@@ -2353,11 +2370,28 @@ static struct attribute *bmc_dev_attrs[] = {
2353 &dev_attr_additional_device_support.attr, 2370 &dev_attr_additional_device_support.attr,
2354 &dev_attr_manufacturer_id.attr, 2371 &dev_attr_manufacturer_id.attr,
2355 &dev_attr_product_id.attr, 2372 &dev_attr_product_id.attr,
2373 &dev_attr_aux_firmware_revision.attr,
2374 &dev_attr_guid.attr,
2356 NULL 2375 NULL
2357}; 2376};
2358 2377
2378static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
2379 struct attribute *attr, int idx)
2380{
2381 struct device *dev = kobj_to_dev(kobj);
2382 struct bmc_device *bmc = to_bmc_device(dev);
2383 umode_t mode = attr->mode;
2384
2385 if (attr == &dev_attr_aux_firmware_revision.attr)
2386 return bmc->id.aux_firmware_revision_set ? mode : 0;
2387 if (attr == &dev_attr_guid.attr)
2388 return bmc->guid_set ? mode : 0;
2389 return mode;
2390}
2391
2359static struct attribute_group bmc_dev_attr_group = { 2392static struct attribute_group bmc_dev_attr_group = {
2360 .attrs = bmc_dev_attrs, 2393 .attrs = bmc_dev_attrs,
2394 .is_visible = bmc_dev_attr_is_visible,
2361}; 2395};
2362 2396
2363static const struct attribute_group *bmc_dev_attr_groups[] = { 2397static const struct attribute_group *bmc_dev_attr_groups[] = {
@@ -2380,13 +2414,6 @@ cleanup_bmc_device(struct kref *ref)
2380{ 2414{
2381 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount); 2415 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
2382 2416
2383 if (bmc->id.aux_firmware_revision_set)
2384 device_remove_file(&bmc->pdev.dev,
2385 &dev_attr_aux_firmware_revision);
2386 if (bmc->guid_set)
2387 device_remove_file(&bmc->pdev.dev,
2388 &dev_attr_guid);
2389
2390 platform_device_unregister(&bmc->pdev); 2417 platform_device_unregister(&bmc->pdev);
2391} 2418}
2392 2419
@@ -2407,33 +2434,6 @@ static void ipmi_bmc_unregister(ipmi_smi_t intf)
2407 mutex_unlock(&ipmidriver_mutex); 2434 mutex_unlock(&ipmidriver_mutex);
2408} 2435}
2409 2436
2410static int create_bmc_files(struct bmc_device *bmc)
2411{
2412 int err;
2413
2414 if (bmc->id.aux_firmware_revision_set) {
2415 err = device_create_file(&bmc->pdev.dev,
2416 &dev_attr_aux_firmware_revision);
2417 if (err)
2418 goto out;
2419 }
2420 if (bmc->guid_set) {
2421 err = device_create_file(&bmc->pdev.dev,
2422 &dev_attr_guid);
2423 if (err)
2424 goto out_aux_firm;
2425 }
2426
2427 return 0;
2428
2429out_aux_firm:
2430 if (bmc->id.aux_firmware_revision_set)
2431 device_remove_file(&bmc->pdev.dev,
2432 &dev_attr_aux_firmware_revision);
2433out:
2434 return err;
2435}
2436
2437static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum) 2437static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
2438{ 2438{
2439 int rv; 2439 int rv;
@@ -2522,15 +2522,6 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
2522 return rv; 2522 return rv;
2523 } 2523 }
2524 2524
2525 rv = create_bmc_files(bmc);
2526 if (rv) {
2527 mutex_lock(&ipmidriver_mutex);
2528 platform_device_unregister(&bmc->pdev);
2529 mutex_unlock(&ipmidriver_mutex);
2530
2531 return rv;
2532 }
2533
2534 dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, " 2525 dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
2535 "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 2526 "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2536 bmc->id.manufacturer_id, 2527 bmc->id.manufacturer_id,
@@ -4212,7 +4203,6 @@ static void need_waiter(ipmi_smi_t intf)
4212static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); 4203static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
4213static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); 4204static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
4214 4205
4215/* FIXME - convert these to slabs. */
4216static void free_smi_msg(struct ipmi_smi_msg *msg) 4206static void free_smi_msg(struct ipmi_smi_msg *msg)
4217{ 4207{
4218 atomic_dec(&smi_msg_inuse_count); 4208 atomic_dec(&smi_msg_inuse_count);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 967b73aa4e66..f6646ed3047e 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -321,6 +321,18 @@ static int try_smi_init(struct smi_info *smi);
321static void cleanup_one_si(struct smi_info *to_clean); 321static void cleanup_one_si(struct smi_info *to_clean);
322static void cleanup_ipmi_si(void); 322static void cleanup_ipmi_si(void);
323 323
324#ifdef DEBUG_TIMING
325void debug_timestamp(char *msg)
326{
327 struct timespec64 t;
328
329 getnstimeofday64(&t);
330 pr_debug("**%s: %lld.%9.9ld\n", msg, (long long) t.tv_sec, t.tv_nsec);
331}
332#else
333#define debug_timestamp(x)
334#endif
335
324static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); 336static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
325static int register_xaction_notifier(struct notifier_block *nb) 337static int register_xaction_notifier(struct notifier_block *nb)
326{ 338{
@@ -358,9 +370,6 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode)
358static enum si_sm_result start_next_msg(struct smi_info *smi_info) 370static enum si_sm_result start_next_msg(struct smi_info *smi_info)
359{ 371{
360 int rv; 372 int rv;
361#ifdef DEBUG_TIMING
362 struct timeval t;
363#endif
364 373
365 if (!smi_info->waiting_msg) { 374 if (!smi_info->waiting_msg) {
366 smi_info->curr_msg = NULL; 375 smi_info->curr_msg = NULL;
@@ -370,10 +379,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
370 379
371 smi_info->curr_msg = smi_info->waiting_msg; 380 smi_info->curr_msg = smi_info->waiting_msg;
372 smi_info->waiting_msg = NULL; 381 smi_info->waiting_msg = NULL;
373#ifdef DEBUG_TIMING 382 debug_timestamp("Start2");
374 do_gettimeofday(&t);
375 printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
376#endif
377 err = atomic_notifier_call_chain(&xaction_notifier_list, 383 err = atomic_notifier_call_chain(&xaction_notifier_list,
378 0, smi_info); 384 0, smi_info);
379 if (err & NOTIFY_STOP_MASK) { 385 if (err & NOTIFY_STOP_MASK) {
@@ -582,12 +588,8 @@ static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
582static void handle_transaction_done(struct smi_info *smi_info) 588static void handle_transaction_done(struct smi_info *smi_info)
583{ 589{
584 struct ipmi_smi_msg *msg; 590 struct ipmi_smi_msg *msg;
585#ifdef DEBUG_TIMING
586 struct timeval t;
587 591
588 do_gettimeofday(&t); 592 debug_timestamp("Done");
589 printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
590#endif
591 switch (smi_info->si_state) { 593 switch (smi_info->si_state) {
592 case SI_NORMAL: 594 case SI_NORMAL:
593 if (!smi_info->curr_msg) 595 if (!smi_info->curr_msg)
@@ -929,24 +931,15 @@ static void sender(void *send_info,
929 struct smi_info *smi_info = send_info; 931 struct smi_info *smi_info = send_info;
930 enum si_sm_result result; 932 enum si_sm_result result;
931 unsigned long flags; 933 unsigned long flags;
932#ifdef DEBUG_TIMING
933 struct timeval t;
934#endif
935
936 BUG_ON(smi_info->waiting_msg);
937 smi_info->waiting_msg = msg;
938 934
939#ifdef DEBUG_TIMING 935 debug_timestamp("Enqueue");
940 do_gettimeofday(&t);
941 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
942#endif
943 936
944 if (smi_info->run_to_completion) { 937 if (smi_info->run_to_completion) {
945 /* 938 /*
946 * If we are running to completion, start it and run 939 * If we are running to completion, start it and run
947 * transactions until everything is clear. 940 * transactions until everything is clear.
948 */ 941 */
949 smi_info->curr_msg = smi_info->waiting_msg; 942 smi_info->curr_msg = msg;
950 smi_info->waiting_msg = NULL; 943 smi_info->waiting_msg = NULL;
951 944
952 /* 945 /*
@@ -964,6 +957,15 @@ static void sender(void *send_info,
964 } 957 }
965 958
966 spin_lock_irqsave(&smi_info->si_lock, flags); 959 spin_lock_irqsave(&smi_info->si_lock, flags);
960 /*
961 * The following two lines don't need to be under the lock for
962 * the lock's sake, but they do need SMP memory barriers to
963 * avoid getting things out of order. We are already claiming
964 * the lock, anyway, so just do it under the lock to avoid the
965 * ordering problem.
966 */
967 BUG_ON(smi_info->waiting_msg);
968 smi_info->waiting_msg = msg;
967 check_start_timer_thread(smi_info); 969 check_start_timer_thread(smi_info);
968 spin_unlock_irqrestore(&smi_info->si_lock, flags); 970 spin_unlock_irqrestore(&smi_info->si_lock, flags);
969} 971}
@@ -989,18 +991,18 @@ static void set_run_to_completion(void *send_info, bool i_run_to_completion)
989 * we are spinning in kipmid looking for something and not delaying 991 * we are spinning in kipmid looking for something and not delaying
990 * between checks 992 * between checks
991 */ 993 */
992static inline void ipmi_si_set_not_busy(struct timespec *ts) 994static inline void ipmi_si_set_not_busy(struct timespec64 *ts)
993{ 995{
994 ts->tv_nsec = -1; 996 ts->tv_nsec = -1;
995} 997}
996static inline int ipmi_si_is_busy(struct timespec *ts) 998static inline int ipmi_si_is_busy(struct timespec64 *ts)
997{ 999{
998 return ts->tv_nsec != -1; 1000 return ts->tv_nsec != -1;
999} 1001}
1000 1002
1001static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result, 1003static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
1002 const struct smi_info *smi_info, 1004 const struct smi_info *smi_info,
1003 struct timespec *busy_until) 1005 struct timespec64 *busy_until)
1004{ 1006{
1005 unsigned int max_busy_us = 0; 1007 unsigned int max_busy_us = 0;
1006 1008
@@ -1009,12 +1011,13 @@ static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
1009 if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY) 1011 if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
1010 ipmi_si_set_not_busy(busy_until); 1012 ipmi_si_set_not_busy(busy_until);
1011 else if (!ipmi_si_is_busy(busy_until)) { 1013 else if (!ipmi_si_is_busy(busy_until)) {
1012 getnstimeofday(busy_until); 1014 getnstimeofday64(busy_until);
1013 timespec_add_ns(busy_until, max_busy_us*NSEC_PER_USEC); 1015 timespec64_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
1014 } else { 1016 } else {
1015 struct timespec now; 1017 struct timespec64 now;
1016 getnstimeofday(&now); 1018
1017 if (unlikely(timespec_compare(&now, busy_until) > 0)) { 1019 getnstimeofday64(&now);
1020 if (unlikely(timespec64_compare(&now, busy_until) > 0)) {
1018 ipmi_si_set_not_busy(busy_until); 1021 ipmi_si_set_not_busy(busy_until);
1019 return 0; 1022 return 0;
1020 } 1023 }
@@ -1037,7 +1040,7 @@ static int ipmi_thread(void *data)
1037 struct smi_info *smi_info = data; 1040 struct smi_info *smi_info = data;
1038 unsigned long flags; 1041 unsigned long flags;
1039 enum si_sm_result smi_result; 1042 enum si_sm_result smi_result;
1040 struct timespec busy_until; 1043 struct timespec64 busy_until;
1041 1044
1042 ipmi_si_set_not_busy(&busy_until); 1045 ipmi_si_set_not_busy(&busy_until);
1043 set_user_nice(current, MAX_NICE); 1046 set_user_nice(current, MAX_NICE);
@@ -1128,15 +1131,10 @@ static void smi_timeout(unsigned long data)
1128 unsigned long jiffies_now; 1131 unsigned long jiffies_now;
1129 long time_diff; 1132 long time_diff;
1130 long timeout; 1133 long timeout;
1131#ifdef DEBUG_TIMING
1132 struct timeval t;
1133#endif
1134 1134
1135 spin_lock_irqsave(&(smi_info->si_lock), flags); 1135 spin_lock_irqsave(&(smi_info->si_lock), flags);
1136#ifdef DEBUG_TIMING 1136 debug_timestamp("Timer");
1137 do_gettimeofday(&t); 1137
1138 printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1139#endif
1140 jiffies_now = jiffies; 1138 jiffies_now = jiffies;
1141 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) 1139 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
1142 * SI_USEC_PER_JIFFY); 1140 * SI_USEC_PER_JIFFY);
@@ -1173,18 +1171,13 @@ static irqreturn_t si_irq_handler(int irq, void *data)
1173{ 1171{
1174 struct smi_info *smi_info = data; 1172 struct smi_info *smi_info = data;
1175 unsigned long flags; 1173 unsigned long flags;
1176#ifdef DEBUG_TIMING
1177 struct timeval t;
1178#endif
1179 1174
1180 spin_lock_irqsave(&(smi_info->si_lock), flags); 1175 spin_lock_irqsave(&(smi_info->si_lock), flags);
1181 1176
1182 smi_inc_stat(smi_info, interrupts); 1177 smi_inc_stat(smi_info, interrupts);
1183 1178
1184#ifdef DEBUG_TIMING 1179 debug_timestamp("Interrupt");
1185 do_gettimeofday(&t); 1180
1186 printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1187#endif
1188 smi_event_handler(smi_info, 0); 1181 smi_event_handler(smi_info, 0);
1189 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 1182 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1190 return IRQ_HANDLED; 1183 return IRQ_HANDLED;
@@ -2038,18 +2031,13 @@ static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
2038{ 2031{
2039 struct smi_info *smi_info = context; 2032 struct smi_info *smi_info = context;
2040 unsigned long flags; 2033 unsigned long flags;
2041#ifdef DEBUG_TIMING
2042 struct timeval t;
2043#endif
2044 2034
2045 spin_lock_irqsave(&(smi_info->si_lock), flags); 2035 spin_lock_irqsave(&(smi_info->si_lock), flags);
2046 2036
2047 smi_inc_stat(smi_info, interrupts); 2037 smi_inc_stat(smi_info, interrupts);
2048 2038
2049#ifdef DEBUG_TIMING 2039 debug_timestamp("ACPI_GPE");
2050 do_gettimeofday(&t); 2040
2051 printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
2052#endif
2053 smi_event_handler(smi_info, 0); 2041 smi_event_handler(smi_info, 0);
2054 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 2042 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
2055 2043
@@ -2071,7 +2059,6 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
2071 if (!info->irq) 2059 if (!info->irq)
2072 return 0; 2060 return 0;
2073 2061
2074 /* FIXME - is level triggered right? */
2075 status = acpi_install_gpe_handler(NULL, 2062 status = acpi_install_gpe_handler(NULL,
2076 info->irq, 2063 info->irq,
2077 ACPI_GPE_LEVEL_TRIGGERED, 2064 ACPI_GPE_LEVEL_TRIGGERED,
@@ -2998,7 +2985,9 @@ static int smi_type_proc_show(struct seq_file *m, void *v)
2998{ 2985{
2999 struct smi_info *smi = m->private; 2986 struct smi_info *smi = m->private;
3000 2987
3001 return seq_printf(m, "%s\n", si_to_str[smi->si_type]); 2988 seq_printf(m, "%s\n", si_to_str[smi->si_type]);
2989
2990 return seq_has_overflowed(m);
3002} 2991}
3003 2992
3004static int smi_type_proc_open(struct inode *inode, struct file *file) 2993static int smi_type_proc_open(struct inode *inode, struct file *file)
@@ -3060,16 +3049,18 @@ static int smi_params_proc_show(struct seq_file *m, void *v)
3060{ 3049{
3061 struct smi_info *smi = m->private; 3050 struct smi_info *smi = m->private;
3062 3051
3063 return seq_printf(m, 3052 seq_printf(m,
3064 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", 3053 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
3065 si_to_str[smi->si_type], 3054 si_to_str[smi->si_type],
3066 addr_space_to_str[smi->io.addr_type], 3055 addr_space_to_str[smi->io.addr_type],
3067 smi->io.addr_data, 3056 smi->io.addr_data,
3068 smi->io.regspacing, 3057 smi->io.regspacing,
3069 smi->io.regsize, 3058 smi->io.regsize,
3070 smi->io.regshift, 3059 smi->io.regshift,
3071 smi->irq, 3060 smi->irq,
3072 smi->slave_addr); 3061 smi->slave_addr);
3062
3063 return seq_has_overflowed(m);
3073} 3064}
3074 3065
3075static int smi_params_proc_open(struct inode *inode, struct file *file) 3066static int smi_params_proc_open(struct inode *inode, struct file *file)
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 982b96323f82..f6e378dac5f5 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -1097,8 +1097,6 @@ static int ssif_remove(struct i2c_client *client)
1097 if (!ssif_info) 1097 if (!ssif_info)
1098 return 0; 1098 return 0;
1099 1099
1100 i2c_set_clientdata(client, NULL);
1101
1102 /* 1100 /*
1103 * After this point, we won't deliver anything asychronously 1101 * After this point, we won't deliver anything asychronously
1104 * to the message handler. We can unregister ourself. 1102 * to the message handler. We can unregister ourself.
@@ -1198,7 +1196,9 @@ static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info)
1198 1196
1199static int smi_type_proc_show(struct seq_file *m, void *v) 1197static int smi_type_proc_show(struct seq_file *m, void *v)
1200{ 1198{
1201 return seq_puts(m, "ssif\n"); 1199 seq_puts(m, "ssif\n");
1200
1201 return seq_has_overflowed(m);
1202} 1202}
1203 1203
1204static int smi_type_proc_open(struct inode *inode, struct file *file) 1204static int smi_type_proc_open(struct inode *inode, struct file *file)
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 26afb56a8073..fae2dbbf5745 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1986,7 +1986,10 @@ static int virtcons_probe(struct virtio_device *vdev)
1986 bool multiport; 1986 bool multiport;
1987 bool early = early_put_chars != NULL; 1987 bool early = early_put_chars != NULL;
1988 1988
1989 if (!vdev->config->get) { 1989 /* We only need a config space if features are offered */
1990 if (!vdev->config->get &&
1991 (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)
1992 || virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT))) {
1990 dev_err(&vdev->dev, "%s failure: config access disabled\n", 1993 dev_err(&vdev->dev, "%s failure: config access disabled\n",
1991 __func__); 1994 __func__);
1992 return -EINVAL; 1995 return -EINVAL;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 91f86131bb7a..0b474a04730f 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -102,12 +102,12 @@ config COMMON_CLK_AXI_CLKGEN
102 Support for the Analog Devices axi-clkgen pcore clock generator for Xilinx 102 Support for the Analog Devices axi-clkgen pcore clock generator for Xilinx
103 FPGAs. It is commonly used in Analog Devices' reference designs. 103 FPGAs. It is commonly used in Analog Devices' reference designs.
104 104
105config CLK_PPC_CORENET 105config CLK_QORIQ
106 bool "Clock driver for PowerPC corenet platforms" 106 bool "Clock driver for Freescale QorIQ platforms"
107 depends on PPC_E500MC && OF 107 depends on (PPC_E500MC || ARM) && OF
108 ---help--- 108 ---help---
109 This adds the clock driver support for Freescale PowerPC corenet 109 This adds the clock driver support for Freescale QorIQ platforms
110 platforms using common clock framework. 110 using common clock framework.
111 111
112config COMMON_CLK_XGENE 112config COMMON_CLK_XGENE
113 bool "Clock driver for APM XGene SoC" 113 bool "Clock driver for APM XGene SoC"
@@ -135,6 +135,14 @@ config COMMON_CLK_PXA
135 ---help--- 135 ---help---
136 Sypport for the Marvell PXA SoC. 136 Sypport for the Marvell PXA SoC.
137 137
138config COMMON_CLK_CDCE706
139 tristate "Clock driver for TI CDCE706 clock synthesizer"
140 depends on I2C
141 select REGMAP_I2C
142 select RATIONAL
143 ---help---
144 This driver supports TI CDCE706 programmable 3-PLL clock synthesizer.
145
138source "drivers/clk/qcom/Kconfig" 146source "drivers/clk/qcom/Kconfig"
139 147
140endmenu 148endmenu
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index d5fba5bc6e1b..d478ceb69c5f 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -16,9 +16,11 @@ endif
16 16
17# hardware specific clock types 17# hardware specific clock types
18# please keep this section sorted lexicographically by file/directory path name 18# please keep this section sorted lexicographically by file/directory path name
19obj-$(CONFIG_MACH_ASM9260) += clk-asm9260.o
19obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o 20obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o
20obj-$(CONFIG_ARCH_AXXIA) += clk-axm5516.o 21obj-$(CONFIG_ARCH_AXXIA) += clk-axm5516.o
21obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o 22obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o
23obj-$(CONFIG_COMMON_CLK_CDCE706) += clk-cdce706.o
22obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o 24obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
23obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o 25obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
24obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o 26obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
@@ -30,7 +32,7 @@ obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o
30obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o 32obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
31obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o 33obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
32obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o 34obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o
33obj-$(CONFIG_CLK_PPC_CORENET) += clk-ppc-corenet.o 35obj-$(CONFIG_CLK_QORIQ) += clk-qoriq.o
34obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o 36obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o
35obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o 37obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o
36obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o 38obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index bbdb1b985c91..86c8a073dcc3 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -56,6 +56,8 @@ static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
56 56
57static long clk_programmable_determine_rate(struct clk_hw *hw, 57static long clk_programmable_determine_rate(struct clk_hw *hw,
58 unsigned long rate, 58 unsigned long rate,
59 unsigned long min_rate,
60 unsigned long max_rate,
59 unsigned long *best_parent_rate, 61 unsigned long *best_parent_rate,
60 struct clk_hw **best_parent_hw) 62 struct clk_hw **best_parent_hw)
61{ 63{
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index 1c06f6f3a8c5..05abae89262e 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -1032,6 +1032,8 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
1032} 1032}
1033 1033
1034static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate, 1034static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
1035 unsigned long min_rate,
1036 unsigned long max_rate,
1035 unsigned long *best_parent_rate, struct clk_hw **best_parent) 1037 unsigned long *best_parent_rate, struct clk_hw **best_parent)
1036{ 1038{
1037 struct kona_clk *bcm_clk = to_kona_clk(hw); 1039 struct kona_clk *bcm_clk = to_kona_clk(hw);
diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c
new file mode 100644
index 000000000000..88f4ff6916fe
--- /dev/null
+++ b/drivers/clk/clk-asm9260.c
@@ -0,0 +1,348 @@
1/*
2 * Copyright (c) 2014 Oleksij Rempel <linux@rempel-privat.de>.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/clk.h>
18#include <linux/clkdev.h>
19#include <linux/err.h>
20#include <linux/io.h>
21#include <linux/clk-provider.h>
22#include <linux/spinlock.h>
23#include <linux/of.h>
24#include <linux/of_address.h>
25#include <dt-bindings/clock/alphascale,asm9260.h>
26
27#define HW_AHBCLKCTRL0 0x0020
28#define HW_AHBCLKCTRL1 0x0030
29#define HW_SYSPLLCTRL 0x0100
30#define HW_MAINCLKSEL 0x0120
31#define HW_MAINCLKUEN 0x0124
32#define HW_UARTCLKSEL 0x0128
33#define HW_UARTCLKUEN 0x012c
34#define HW_I2S0CLKSEL 0x0130
35#define HW_I2S0CLKUEN 0x0134
36#define HW_I2S1CLKSEL 0x0138
37#define HW_I2S1CLKUEN 0x013c
38#define HW_WDTCLKSEL 0x0160
39#define HW_WDTCLKUEN 0x0164
40#define HW_CLKOUTCLKSEL 0x0170
41#define HW_CLKOUTCLKUEN 0x0174
42#define HW_CPUCLKDIV 0x017c
43#define HW_SYSAHBCLKDIV 0x0180
44#define HW_I2S0MCLKDIV 0x0190
45#define HW_I2S0SCLKDIV 0x0194
46#define HW_I2S1MCLKDIV 0x0188
47#define HW_I2S1SCLKDIV 0x018c
48#define HW_UART0CLKDIV 0x0198
49#define HW_UART1CLKDIV 0x019c
50#define HW_UART2CLKDIV 0x01a0
51#define HW_UART3CLKDIV 0x01a4
52#define HW_UART4CLKDIV 0x01a8
53#define HW_UART5CLKDIV 0x01ac
54#define HW_UART6CLKDIV 0x01b0
55#define HW_UART7CLKDIV 0x01b4
56#define HW_UART8CLKDIV 0x01b8
57#define HW_UART9CLKDIV 0x01bc
58#define HW_SPI0CLKDIV 0x01c0
59#define HW_SPI1CLKDIV 0x01c4
60#define HW_QUADSPICLKDIV 0x01c8
61#define HW_SSP0CLKDIV 0x01d0
62#define HW_NANDCLKDIV 0x01d4
63#define HW_TRACECLKDIV 0x01e0
64#define HW_CAMMCLKDIV 0x01e8
65#define HW_WDTCLKDIV 0x01ec
66#define HW_CLKOUTCLKDIV 0x01f4
67#define HW_MACCLKDIV 0x01f8
68#define HW_LCDCLKDIV 0x01fc
69#define HW_ADCANACLKDIV 0x0200
70
71static struct clk *clks[MAX_CLKS];
72static struct clk_onecell_data clk_data;
73static DEFINE_SPINLOCK(asm9260_clk_lock);
74
75struct asm9260_div_clk {
76 unsigned int idx;
77 const char *name;
78 const char *parent_name;
79 u32 reg;
80};
81
82struct asm9260_gate_data {
83 unsigned int idx;
84 const char *name;
85 const char *parent_name;
86 u32 reg;
87 u8 bit_idx;
88 unsigned long flags;
89};
90
91struct asm9260_mux_clock {
92 u8 mask;
93 u32 *table;
94 const char *name;
95 const char **parent_names;
96 u8 num_parents;
97 unsigned long offset;
98 unsigned long flags;
99};
100
101static void __iomem *base;
102
103static const struct asm9260_div_clk asm9260_div_clks[] __initconst = {
104 { CLKID_SYS_CPU, "cpu_div", "main_gate", HW_CPUCLKDIV },
105 { CLKID_SYS_AHB, "ahb_div", "cpu_div", HW_SYSAHBCLKDIV },
106
107 /* i2s has two deviders: one for only external mclk and internal
108 * devider for all clks. */
109 { CLKID_SYS_I2S0M, "i2s0m_div", "i2s0_mclk", HW_I2S0MCLKDIV },
110 { CLKID_SYS_I2S1M, "i2s1m_div", "i2s1_mclk", HW_I2S1MCLKDIV },
111 { CLKID_SYS_I2S0S, "i2s0s_div", "i2s0_gate", HW_I2S0SCLKDIV },
112 { CLKID_SYS_I2S1S, "i2s1s_div", "i2s0_gate", HW_I2S1SCLKDIV },
113
114 { CLKID_SYS_UART0, "uart0_div", "uart_gate", HW_UART0CLKDIV },
115 { CLKID_SYS_UART1, "uart1_div", "uart_gate", HW_UART1CLKDIV },
116 { CLKID_SYS_UART2, "uart2_div", "uart_gate", HW_UART2CLKDIV },
117 { CLKID_SYS_UART3, "uart3_div", "uart_gate", HW_UART3CLKDIV },
118 { CLKID_SYS_UART4, "uart4_div", "uart_gate", HW_UART4CLKDIV },
119 { CLKID_SYS_UART5, "uart5_div", "uart_gate", HW_UART5CLKDIV },
120 { CLKID_SYS_UART6, "uart6_div", "uart_gate", HW_UART6CLKDIV },
121 { CLKID_SYS_UART7, "uart7_div", "uart_gate", HW_UART7CLKDIV },
122 { CLKID_SYS_UART8, "uart8_div", "uart_gate", HW_UART8CLKDIV },
123 { CLKID_SYS_UART9, "uart9_div", "uart_gate", HW_UART9CLKDIV },
124
125 { CLKID_SYS_SPI0, "spi0_div", "main_gate", HW_SPI0CLKDIV },
126 { CLKID_SYS_SPI1, "spi1_div", "main_gate", HW_SPI1CLKDIV },
127 { CLKID_SYS_QUADSPI, "quadspi_div", "main_gate", HW_QUADSPICLKDIV },
128 { CLKID_SYS_SSP0, "ssp0_div", "main_gate", HW_SSP0CLKDIV },
129 { CLKID_SYS_NAND, "nand_div", "main_gate", HW_NANDCLKDIV },
130 { CLKID_SYS_TRACE, "trace_div", "main_gate", HW_TRACECLKDIV },
131 { CLKID_SYS_CAMM, "camm_div", "main_gate", HW_CAMMCLKDIV },
132 { CLKID_SYS_MAC, "mac_div", "main_gate", HW_MACCLKDIV },
133 { CLKID_SYS_LCD, "lcd_div", "main_gate", HW_LCDCLKDIV },
134 { CLKID_SYS_ADCANA, "adcana_div", "main_gate", HW_ADCANACLKDIV },
135
136 { CLKID_SYS_WDT, "wdt_div", "wdt_gate", HW_WDTCLKDIV },
137 { CLKID_SYS_CLKOUT, "clkout_div", "clkout_gate", HW_CLKOUTCLKDIV },
138};
139
140static const struct asm9260_gate_data asm9260_mux_gates[] __initconst = {
141 { 0, "main_gate", "main_mux", HW_MAINCLKUEN, 0 },
142 { 0, "uart_gate", "uart_mux", HW_UARTCLKUEN, 0 },
143 { 0, "i2s0_gate", "i2s0_mux", HW_I2S0CLKUEN, 0 },
144 { 0, "i2s1_gate", "i2s1_mux", HW_I2S1CLKUEN, 0 },
145 { 0, "wdt_gate", "wdt_mux", HW_WDTCLKUEN, 0 },
146 { 0, "clkout_gate", "clkout_mux", HW_CLKOUTCLKUEN, 0 },
147};
148static const struct asm9260_gate_data asm9260_ahb_gates[] __initconst = {
149 /* ahb gates */
150 { CLKID_AHB_ROM, "rom", "ahb_div",
151 HW_AHBCLKCTRL0, 1, CLK_IGNORE_UNUSED},
152 { CLKID_AHB_RAM, "ram", "ahb_div",
153 HW_AHBCLKCTRL0, 2, CLK_IGNORE_UNUSED},
154 { CLKID_AHB_GPIO, "gpio", "ahb_div",
155 HW_AHBCLKCTRL0, 4 },
156 { CLKID_AHB_MAC, "mac", "ahb_div",
157 HW_AHBCLKCTRL0, 5 },
158 { CLKID_AHB_EMI, "emi", "ahb_div",
159 HW_AHBCLKCTRL0, 6, CLK_IGNORE_UNUSED},
160 { CLKID_AHB_USB0, "usb0", "ahb_div",
161 HW_AHBCLKCTRL0, 7 },
162 { CLKID_AHB_USB1, "usb1", "ahb_div",
163 HW_AHBCLKCTRL0, 8 },
164 { CLKID_AHB_DMA0, "dma0", "ahb_div",
165 HW_AHBCLKCTRL0, 9 },
166 { CLKID_AHB_DMA1, "dma1", "ahb_div",
167 HW_AHBCLKCTRL0, 10 },
168 { CLKID_AHB_UART0, "uart0", "ahb_div",
169 HW_AHBCLKCTRL0, 11 },
170 { CLKID_AHB_UART1, "uart1", "ahb_div",
171 HW_AHBCLKCTRL0, 12 },
172 { CLKID_AHB_UART2, "uart2", "ahb_div",
173 HW_AHBCLKCTRL0, 13 },
174 { CLKID_AHB_UART3, "uart3", "ahb_div",
175 HW_AHBCLKCTRL0, 14 },
176 { CLKID_AHB_UART4, "uart4", "ahb_div",
177 HW_AHBCLKCTRL0, 15 },
178 { CLKID_AHB_UART5, "uart5", "ahb_div",
179 HW_AHBCLKCTRL0, 16 },
180 { CLKID_AHB_UART6, "uart6", "ahb_div",
181 HW_AHBCLKCTRL0, 17 },
182 { CLKID_AHB_UART7, "uart7", "ahb_div",
183 HW_AHBCLKCTRL0, 18 },
184 { CLKID_AHB_UART8, "uart8", "ahb_div",
185 HW_AHBCLKCTRL0, 19 },
186 { CLKID_AHB_UART9, "uart9", "ahb_div",
187 HW_AHBCLKCTRL0, 20 },
188 { CLKID_AHB_I2S0, "i2s0", "ahb_div",
189 HW_AHBCLKCTRL0, 21 },
190 { CLKID_AHB_I2C0, "i2c0", "ahb_div",
191 HW_AHBCLKCTRL0, 22 },
192 { CLKID_AHB_I2C1, "i2c1", "ahb_div",
193 HW_AHBCLKCTRL0, 23 },
194 { CLKID_AHB_SSP0, "ssp0", "ahb_div",
195 HW_AHBCLKCTRL0, 24 },
196 { CLKID_AHB_IOCONFIG, "ioconf", "ahb_div",
197 HW_AHBCLKCTRL0, 25 },
198 { CLKID_AHB_WDT, "wdt", "ahb_div",
199 HW_AHBCLKCTRL0, 26 },
200 { CLKID_AHB_CAN0, "can0", "ahb_div",
201 HW_AHBCLKCTRL0, 27 },
202 { CLKID_AHB_CAN1, "can1", "ahb_div",
203 HW_AHBCLKCTRL0, 28 },
204 { CLKID_AHB_MPWM, "mpwm", "ahb_div",
205 HW_AHBCLKCTRL0, 29 },
206 { CLKID_AHB_SPI0, "spi0", "ahb_div",
207 HW_AHBCLKCTRL0, 30 },
208 { CLKID_AHB_SPI1, "spi1", "ahb_div",
209 HW_AHBCLKCTRL0, 31 },
210
211 { CLKID_AHB_QEI, "qei", "ahb_div",
212 HW_AHBCLKCTRL1, 0 },
213 { CLKID_AHB_QUADSPI0, "quadspi0", "ahb_div",
214 HW_AHBCLKCTRL1, 1 },
215 { CLKID_AHB_CAMIF, "capmif", "ahb_div",
216 HW_AHBCLKCTRL1, 2 },
217 { CLKID_AHB_LCDIF, "lcdif", "ahb_div",
218 HW_AHBCLKCTRL1, 3 },
219 { CLKID_AHB_TIMER0, "timer0", "ahb_div",
220 HW_AHBCLKCTRL1, 4 },
221 { CLKID_AHB_TIMER1, "timer1", "ahb_div",
222 HW_AHBCLKCTRL1, 5 },
223 { CLKID_AHB_TIMER2, "timer2", "ahb_div",
224 HW_AHBCLKCTRL1, 6 },
225 { CLKID_AHB_TIMER3, "timer3", "ahb_div",
226 HW_AHBCLKCTRL1, 7 },
227 { CLKID_AHB_IRQ, "irq", "ahb_div",
228 HW_AHBCLKCTRL1, 8, CLK_IGNORE_UNUSED},
229 { CLKID_AHB_RTC, "rtc", "ahb_div",
230 HW_AHBCLKCTRL1, 9 },
231 { CLKID_AHB_NAND, "nand", "ahb_div",
232 HW_AHBCLKCTRL1, 10 },
233 { CLKID_AHB_ADC0, "adc0", "ahb_div",
234 HW_AHBCLKCTRL1, 11 },
235 { CLKID_AHB_LED, "led", "ahb_div",
236 HW_AHBCLKCTRL1, 12 },
237 { CLKID_AHB_DAC0, "dac0", "ahb_div",
238 HW_AHBCLKCTRL1, 13 },
239 { CLKID_AHB_LCD, "lcd", "ahb_div",
240 HW_AHBCLKCTRL1, 14 },
241 { CLKID_AHB_I2S1, "i2s1", "ahb_div",
242 HW_AHBCLKCTRL1, 15 },
243 { CLKID_AHB_MAC1, "mac1", "ahb_div",
244 HW_AHBCLKCTRL1, 16 },
245};
246
247static const char __initdata *main_mux_p[] = { NULL, NULL };
248static const char __initdata *i2s0_mux_p[] = { NULL, NULL, "i2s0m_div"};
249static const char __initdata *i2s1_mux_p[] = { NULL, NULL, "i2s1m_div"};
250static const char __initdata *clkout_mux_p[] = { NULL, NULL, "rtc"};
251static u32 three_mux_table[] = {0, 1, 3};
252
253static struct asm9260_mux_clock asm9260_mux_clks[] __initdata = {
254 { 1, three_mux_table, "main_mux", main_mux_p,
255 ARRAY_SIZE(main_mux_p), HW_MAINCLKSEL, },
256 { 1, three_mux_table, "uart_mux", main_mux_p,
257 ARRAY_SIZE(main_mux_p), HW_UARTCLKSEL, },
258 { 1, three_mux_table, "wdt_mux", main_mux_p,
259 ARRAY_SIZE(main_mux_p), HW_WDTCLKSEL, },
260 { 3, three_mux_table, "i2s0_mux", i2s0_mux_p,
261 ARRAY_SIZE(i2s0_mux_p), HW_I2S0CLKSEL, },
262 { 3, three_mux_table, "i2s1_mux", i2s1_mux_p,
263 ARRAY_SIZE(i2s1_mux_p), HW_I2S1CLKSEL, },
264 { 3, three_mux_table, "clkout_mux", clkout_mux_p,
265 ARRAY_SIZE(clkout_mux_p), HW_CLKOUTCLKSEL, },
266};
267
268static void __init asm9260_acc_init(struct device_node *np)
269{
270 struct clk *clk;
271 const char *ref_clk, *pll_clk = "pll";
272 u32 rate;
273 int n;
274 u32 accuracy = 0;
275
276 base = of_io_request_and_map(np, 0, np->name);
277 if (!base)
278 panic("%s: unable to map resource", np->name);
279
280 /* register pll */
281 rate = (ioread32(base + HW_SYSPLLCTRL) & 0xffff) * 1000000;
282
283 ref_clk = of_clk_get_parent_name(np, 0);
284 accuracy = clk_get_accuracy(__clk_lookup(ref_clk));
285 clk = clk_register_fixed_rate_with_accuracy(NULL, pll_clk,
286 ref_clk, 0, rate, accuracy);
287
288 if (IS_ERR(clk))
289 panic("%s: can't register REFCLK. Check DT!", np->name);
290
291 for (n = 0; n < ARRAY_SIZE(asm9260_mux_clks); n++) {
292 const struct asm9260_mux_clock *mc = &asm9260_mux_clks[n];
293
294 mc->parent_names[0] = ref_clk;
295 mc->parent_names[1] = pll_clk;
296 clk = clk_register_mux_table(NULL, mc->name, mc->parent_names,
297 mc->num_parents, mc->flags, base + mc->offset,
298 0, mc->mask, 0, mc->table, &asm9260_clk_lock);
299 }
300
301 /* clock mux gate cells */
302 for (n = 0; n < ARRAY_SIZE(asm9260_mux_gates); n++) {
303 const struct asm9260_gate_data *gd = &asm9260_mux_gates[n];
304
305 clk = clk_register_gate(NULL, gd->name,
306 gd->parent_name, gd->flags | CLK_SET_RATE_PARENT,
307 base + gd->reg, gd->bit_idx, 0, &asm9260_clk_lock);
308 }
309
310 /* clock div cells */
311 for (n = 0; n < ARRAY_SIZE(asm9260_div_clks); n++) {
312 const struct asm9260_div_clk *dc = &asm9260_div_clks[n];
313
314 clks[dc->idx] = clk_register_divider(NULL, dc->name,
315 dc->parent_name, CLK_SET_RATE_PARENT,
316 base + dc->reg, 0, 8, CLK_DIVIDER_ONE_BASED,
317 &asm9260_clk_lock);
318 }
319
320 /* clock ahb gate cells */
321 for (n = 0; n < ARRAY_SIZE(asm9260_ahb_gates); n++) {
322 const struct asm9260_gate_data *gd = &asm9260_ahb_gates[n];
323
324 clks[gd->idx] = clk_register_gate(NULL, gd->name,
325 gd->parent_name, gd->flags, base + gd->reg,
326 gd->bit_idx, 0, &asm9260_clk_lock);
327 }
328
329 /* check for errors on leaf clocks */
330 for (n = 0; n < MAX_CLKS; n++) {
331 if (!IS_ERR(clks[n]))
332 continue;
333
334 pr_err("%s: Unable to register leaf clock %d\n",
335 np->full_name, n);
336 goto fail;
337 }
338
339 /* register clk-provider */
340 clk_data.clks = clks;
341 clk_data.clk_num = MAX_CLKS;
342 of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
343 return;
344fail:
345 iounmap(base);
346}
347CLK_OF_DECLARE(asm9260_acc, "alphascale,asm9260-clock-controller",
348 asm9260_acc_init);
diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c
new file mode 100644
index 000000000000..c386ad25beb4
--- /dev/null
+++ b/drivers/clk/clk-cdce706.c
@@ -0,0 +1,700 @@
1/*
2 * TI CDCE706 programmable 3-PLL clock synthesizer driver
3 *
4 * Copyright (c) 2014 Cadence Design Systems Inc.
5 *
6 * Reference: http://www.ti.com/lit/ds/symlink/cdce706.pdf
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/clk-provider.h>
14#include <linux/delay.h>
15#include <linux/i2c.h>
16#include <linux/interrupt.h>
17#include <linux/mod_devicetable.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/rational.h>
21#include <linux/regmap.h>
22#include <linux/slab.h>
23
24#define CDCE706_CLKIN_CLOCK 10
25#define CDCE706_CLKIN_SOURCE 11
26#define CDCE706_PLL_M_LOW(pll) (1 + 3 * (pll))
27#define CDCE706_PLL_N_LOW(pll) (2 + 3 * (pll))
28#define CDCE706_PLL_HI(pll) (3 + 3 * (pll))
29#define CDCE706_PLL_MUX 3
30#define CDCE706_PLL_FVCO 6
31#define CDCE706_DIVIDER(div) (13 + (div))
32#define CDCE706_CLKOUT(out) (19 + (out))
33
34#define CDCE706_CLKIN_CLOCK_MASK 0x10
35#define CDCE706_CLKIN_SOURCE_SHIFT 6
36#define CDCE706_CLKIN_SOURCE_MASK 0xc0
37#define CDCE706_CLKIN_SOURCE_LVCMOS 0x40
38
39#define CDCE706_PLL_MUX_MASK(pll) (0x80 >> (pll))
40#define CDCE706_PLL_LOW_M_MASK 0xff
41#define CDCE706_PLL_LOW_N_MASK 0xff
42#define CDCE706_PLL_HI_M_MASK 0x1
43#define CDCE706_PLL_HI_N_MASK 0x1e
44#define CDCE706_PLL_HI_N_SHIFT 1
45#define CDCE706_PLL_M_MAX 0x1ff
46#define CDCE706_PLL_N_MAX 0xfff
47#define CDCE706_PLL_FVCO_MASK(pll) (0x80 >> (pll))
48#define CDCE706_PLL_FREQ_MIN 80000000
49#define CDCE706_PLL_FREQ_MAX 300000000
50#define CDCE706_PLL_FREQ_HI 180000000
51
52#define CDCE706_DIVIDER_PLL(div) (9 + (div) - ((div) > 2) - ((div) > 4))
53#define CDCE706_DIVIDER_PLL_SHIFT(div) ((div) < 2 ? 5 : 3 * ((div) & 1))
54#define CDCE706_DIVIDER_PLL_MASK(div) (0x7 << CDCE706_DIVIDER_PLL_SHIFT(div))
55#define CDCE706_DIVIDER_DIVIDER_MASK 0x7f
56#define CDCE706_DIVIDER_DIVIDER_MAX 0x7f
57
58#define CDCE706_CLKOUT_DIVIDER_MASK 0x7
59#define CDCE706_CLKOUT_ENABLE_MASK 0x8
60
61static struct regmap_config cdce706_regmap_config = {
62 .reg_bits = 8,
63 .val_bits = 8,
64 .val_format_endian = REGMAP_ENDIAN_NATIVE,
65};
66
67#define to_hw_data(phw) (container_of((phw), struct cdce706_hw_data, hw))
68
69struct cdce706_hw_data {
70 struct cdce706_dev_data *dev_data;
71 unsigned idx;
72 unsigned parent;
73 struct clk *clk;
74 struct clk_hw hw;
75 unsigned div;
76 unsigned mul;
77 unsigned mux;
78};
79
80struct cdce706_dev_data {
81 struct i2c_client *client;
82 struct regmap *regmap;
83 struct clk_onecell_data onecell;
84 struct clk *clks[6];
85 struct clk *clkin_clk[2];
86 const char *clkin_name[2];
87 struct cdce706_hw_data clkin[1];
88 struct cdce706_hw_data pll[3];
89 struct cdce706_hw_data divider[6];
90 struct cdce706_hw_data clkout[6];
91};
92
93static const char * const cdce706_source_name[] = {
94 "clk_in0", "clk_in1",
95};
96
97static const char *cdce706_clkin_name[] = {
98 "clk_in",
99};
100
101static const char * const cdce706_pll_name[] = {
102 "pll1", "pll2", "pll3",
103};
104
105static const char *cdce706_divider_parent_name[] = {
106 "clk_in", "pll1", "pll2", "pll2", "pll3",
107};
108
109static const char *cdce706_divider_name[] = {
110 "p0", "p1", "p2", "p3", "p4", "p5",
111};
112
113static const char * const cdce706_clkout_name[] = {
114 "clk_out0", "clk_out1", "clk_out2", "clk_out3", "clk_out4", "clk_out5",
115};
116
117static int cdce706_reg_read(struct cdce706_dev_data *dev_data, unsigned reg,
118 unsigned *val)
119{
120 int rc = regmap_read(dev_data->regmap, reg | 0x80, val);
121
122 if (rc < 0)
123 dev_err(&dev_data->client->dev, "error reading reg %u", reg);
124 return rc;
125}
126
127static int cdce706_reg_write(struct cdce706_dev_data *dev_data, unsigned reg,
128 unsigned val)
129{
130 int rc = regmap_write(dev_data->regmap, reg | 0x80, val);
131
132 if (rc < 0)
133 dev_err(&dev_data->client->dev, "error writing reg %u", reg);
134 return rc;
135}
136
137static int cdce706_reg_update(struct cdce706_dev_data *dev_data, unsigned reg,
138 unsigned mask, unsigned val)
139{
140 int rc = regmap_update_bits(dev_data->regmap, reg | 0x80, mask, val);
141
142 if (rc < 0)
143 dev_err(&dev_data->client->dev, "error updating reg %u", reg);
144 return rc;
145}
146
147static int cdce706_clkin_set_parent(struct clk_hw *hw, u8 index)
148{
149 struct cdce706_hw_data *hwd = to_hw_data(hw);
150
151 hwd->parent = index;
152 return 0;
153}
154
155static u8 cdce706_clkin_get_parent(struct clk_hw *hw)
156{
157 struct cdce706_hw_data *hwd = to_hw_data(hw);
158
159 return hwd->parent;
160}
161
162static const struct clk_ops cdce706_clkin_ops = {
163 .set_parent = cdce706_clkin_set_parent,
164 .get_parent = cdce706_clkin_get_parent,
165};
166
167static unsigned long cdce706_pll_recalc_rate(struct clk_hw *hw,
168 unsigned long parent_rate)
169{
170 struct cdce706_hw_data *hwd = to_hw_data(hw);
171
172 dev_dbg(&hwd->dev_data->client->dev,
173 "%s, pll: %d, mux: %d, mul: %u, div: %u\n",
174 __func__, hwd->idx, hwd->mux, hwd->mul, hwd->div);
175
176 if (!hwd->mux) {
177 if (hwd->div && hwd->mul) {
178 u64 res = (u64)parent_rate * hwd->mul;
179
180 do_div(res, hwd->div);
181 return res;
182 }
183 } else {
184 if (hwd->div)
185 return parent_rate / hwd->div;
186 }
187 return 0;
188}
189
190static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
191 unsigned long *parent_rate)
192{
193 struct cdce706_hw_data *hwd = to_hw_data(hw);
194 unsigned long mul, div;
195 u64 res;
196
197 dev_dbg(&hwd->dev_data->client->dev,
198 "%s, rate: %lu, parent_rate: %lu\n",
199 __func__, rate, *parent_rate);
200
201 rational_best_approximation(rate, *parent_rate,
202 CDCE706_PLL_N_MAX, CDCE706_PLL_M_MAX,
203 &mul, &div);
204 hwd->mul = mul;
205 hwd->div = div;
206
207 dev_dbg(&hwd->dev_data->client->dev,
208 "%s, pll: %d, mul: %lu, div: %lu\n",
209 __func__, hwd->idx, mul, div);
210
211 res = (u64)*parent_rate * hwd->mul;
212 do_div(res, hwd->div);
213 return res;
214}
215
216static int cdce706_pll_set_rate(struct clk_hw *hw, unsigned long rate,
217 unsigned long parent_rate)
218{
219 struct cdce706_hw_data *hwd = to_hw_data(hw);
220 unsigned long mul = hwd->mul, div = hwd->div;
221 int err;
222
223 dev_dbg(&hwd->dev_data->client->dev,
224 "%s, pll: %d, mul: %lu, div: %lu\n",
225 __func__, hwd->idx, mul, div);
226
227 err = cdce706_reg_update(hwd->dev_data,
228 CDCE706_PLL_HI(hwd->idx),
229 CDCE706_PLL_HI_M_MASK | CDCE706_PLL_HI_N_MASK,
230 ((div >> 8) & CDCE706_PLL_HI_M_MASK) |
231 ((mul >> (8 - CDCE706_PLL_HI_N_SHIFT)) &
232 CDCE706_PLL_HI_N_MASK));
233 if (err < 0)
234 return err;
235
236 err = cdce706_reg_write(hwd->dev_data,
237 CDCE706_PLL_M_LOW(hwd->idx),
238 div & CDCE706_PLL_LOW_M_MASK);
239 if (err < 0)
240 return err;
241
242 err = cdce706_reg_write(hwd->dev_data,
243 CDCE706_PLL_N_LOW(hwd->idx),
244 mul & CDCE706_PLL_LOW_N_MASK);
245 if (err < 0)
246 return err;
247
248 err = cdce706_reg_update(hwd->dev_data,
249 CDCE706_PLL_FVCO,
250 CDCE706_PLL_FVCO_MASK(hwd->idx),
251 rate > CDCE706_PLL_FREQ_HI ?
252 CDCE706_PLL_FVCO_MASK(hwd->idx) : 0);
253 return err;
254}
255
256static const struct clk_ops cdce706_pll_ops = {
257 .recalc_rate = cdce706_pll_recalc_rate,
258 .round_rate = cdce706_pll_round_rate,
259 .set_rate = cdce706_pll_set_rate,
260};
261
262static int cdce706_divider_set_parent(struct clk_hw *hw, u8 index)
263{
264 struct cdce706_hw_data *hwd = to_hw_data(hw);
265
266 if (hwd->parent == index)
267 return 0;
268 hwd->parent = index;
269 return cdce706_reg_update(hwd->dev_data,
270 CDCE706_DIVIDER_PLL(hwd->idx),
271 CDCE706_DIVIDER_PLL_MASK(hwd->idx),
272 index << CDCE706_DIVIDER_PLL_SHIFT(hwd->idx));
273}
274
275static u8 cdce706_divider_get_parent(struct clk_hw *hw)
276{
277 struct cdce706_hw_data *hwd = to_hw_data(hw);
278
279 return hwd->parent;
280}
281
282static unsigned long cdce706_divider_recalc_rate(struct clk_hw *hw,
283 unsigned long parent_rate)
284{
285 struct cdce706_hw_data *hwd = to_hw_data(hw);
286
287 dev_dbg(&hwd->dev_data->client->dev,
288 "%s, divider: %d, div: %u\n",
289 __func__, hwd->idx, hwd->div);
290 if (hwd->div)
291 return parent_rate / hwd->div;
292 return 0;
293}
294
295static long cdce706_divider_round_rate(struct clk_hw *hw, unsigned long rate,
296 unsigned long *parent_rate)
297{
298 struct cdce706_hw_data *hwd = to_hw_data(hw);
299 struct cdce706_dev_data *cdce = hwd->dev_data;
300 unsigned long mul, div;
301
302 dev_dbg(&hwd->dev_data->client->dev,
303 "%s, rate: %lu, parent_rate: %lu\n",
304 __func__, rate, *parent_rate);
305
306 rational_best_approximation(rate, *parent_rate,
307 1, CDCE706_DIVIDER_DIVIDER_MAX,
308 &mul, &div);
309 if (!mul)
310 div = CDCE706_DIVIDER_DIVIDER_MAX;
311
312 if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
313 unsigned long best_diff = rate;
314 unsigned long best_div = 0;
315 struct clk *gp_clk = cdce->clkin_clk[cdce->clkin[0].parent];
316 unsigned long gp_rate = gp_clk ? clk_get_rate(gp_clk) : 0;
317
318 for (div = CDCE706_PLL_FREQ_MIN / rate; best_diff &&
319 div <= CDCE706_PLL_FREQ_MAX / rate; ++div) {
320 unsigned long n, m;
321 unsigned long diff;
322 unsigned long div_rate;
323 u64 div_rate64;
324
325 if (rate * div < CDCE706_PLL_FREQ_MIN)
326 continue;
327
328 rational_best_approximation(rate * div, gp_rate,
329 CDCE706_PLL_N_MAX,
330 CDCE706_PLL_M_MAX,
331 &n, &m);
332 div_rate64 = (u64)gp_rate * n;
333 do_div(div_rate64, m);
334 do_div(div_rate64, div);
335 div_rate = div_rate64;
336 diff = max(div_rate, rate) - min(div_rate, rate);
337
338 if (diff < best_diff) {
339 best_diff = diff;
340 best_div = div;
341 dev_dbg(&hwd->dev_data->client->dev,
342 "%s, %lu * %lu / %lu / %lu = %lu\n",
343 __func__, gp_rate, n, m, div, div_rate);
344 }
345 }
346
347 div = best_div;
348
349 dev_dbg(&hwd->dev_data->client->dev,
350 "%s, altering parent rate: %lu -> %lu\n",
351 __func__, *parent_rate, rate * div);
352 *parent_rate = rate * div;
353 }
354 hwd->div = div;
355
356 dev_dbg(&hwd->dev_data->client->dev,
357 "%s, divider: %d, div: %lu\n",
358 __func__, hwd->idx, div);
359
360 return *parent_rate / div;
361}
362
363static int cdce706_divider_set_rate(struct clk_hw *hw, unsigned long rate,
364 unsigned long parent_rate)
365{
366 struct cdce706_hw_data *hwd = to_hw_data(hw);
367
368 dev_dbg(&hwd->dev_data->client->dev,
369 "%s, divider: %d, div: %u\n",
370 __func__, hwd->idx, hwd->div);
371
372 return cdce706_reg_update(hwd->dev_data,
373 CDCE706_DIVIDER(hwd->idx),
374 CDCE706_DIVIDER_DIVIDER_MASK,
375 hwd->div);
376}
377
378static const struct clk_ops cdce706_divider_ops = {
379 .set_parent = cdce706_divider_set_parent,
380 .get_parent = cdce706_divider_get_parent,
381 .recalc_rate = cdce706_divider_recalc_rate,
382 .round_rate = cdce706_divider_round_rate,
383 .set_rate = cdce706_divider_set_rate,
384};
385
386static int cdce706_clkout_prepare(struct clk_hw *hw)
387{
388 struct cdce706_hw_data *hwd = to_hw_data(hw);
389
390 return cdce706_reg_update(hwd->dev_data, CDCE706_CLKOUT(hwd->idx),
391 CDCE706_CLKOUT_ENABLE_MASK,
392 CDCE706_CLKOUT_ENABLE_MASK);
393}
394
395static void cdce706_clkout_unprepare(struct clk_hw *hw)
396{
397 struct cdce706_hw_data *hwd = to_hw_data(hw);
398
399 cdce706_reg_update(hwd->dev_data, CDCE706_CLKOUT(hwd->idx),
400 CDCE706_CLKOUT_ENABLE_MASK, 0);
401}
402
403static int cdce706_clkout_set_parent(struct clk_hw *hw, u8 index)
404{
405 struct cdce706_hw_data *hwd = to_hw_data(hw);
406
407 if (hwd->parent == index)
408 return 0;
409 hwd->parent = index;
410 return cdce706_reg_update(hwd->dev_data,
411 CDCE706_CLKOUT(hwd->idx),
412 CDCE706_CLKOUT_ENABLE_MASK, index);
413}
414
415static u8 cdce706_clkout_get_parent(struct clk_hw *hw)
416{
417 struct cdce706_hw_data *hwd = to_hw_data(hw);
418
419 return hwd->parent;
420}
421
422static unsigned long cdce706_clkout_recalc_rate(struct clk_hw *hw,
423 unsigned long parent_rate)
424{
425 return parent_rate;
426}
427
428static long cdce706_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
429 unsigned long *parent_rate)
430{
431 *parent_rate = rate;
432 return rate;
433}
434
435static int cdce706_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
436 unsigned long parent_rate)
437{
438 return 0;
439}
440
441static const struct clk_ops cdce706_clkout_ops = {
442 .prepare = cdce706_clkout_prepare,
443 .unprepare = cdce706_clkout_unprepare,
444 .set_parent = cdce706_clkout_set_parent,
445 .get_parent = cdce706_clkout_get_parent,
446 .recalc_rate = cdce706_clkout_recalc_rate,
447 .round_rate = cdce706_clkout_round_rate,
448 .set_rate = cdce706_clkout_set_rate,
449};
450
451static int cdce706_register_hw(struct cdce706_dev_data *cdce,
452 struct cdce706_hw_data *hw, unsigned num_hw,
453 const char * const *clk_names,
454 struct clk_init_data *init)
455{
456 unsigned i;
457
458 for (i = 0; i < num_hw; ++i, ++hw) {
459 init->name = clk_names[i];
460 hw->dev_data = cdce;
461 hw->idx = i;
462 hw->hw.init = init;
463 hw->clk = devm_clk_register(&cdce->client->dev,
464 &hw->hw);
465 if (IS_ERR(hw->clk)) {
466 dev_err(&cdce->client->dev, "Failed to register %s\n",
467 clk_names[i]);
468 return PTR_ERR(hw->clk);
469 }
470 }
471 return 0;
472}
473
474static int cdce706_register_clkin(struct cdce706_dev_data *cdce)
475{
476 struct clk_init_data init = {
477 .ops = &cdce706_clkin_ops,
478 .parent_names = cdce->clkin_name,
479 .num_parents = ARRAY_SIZE(cdce->clkin_name),
480 };
481 unsigned i;
482 int ret;
483 unsigned clock, source;
484
485 for (i = 0; i < ARRAY_SIZE(cdce->clkin_name); ++i) {
486 struct clk *parent = devm_clk_get(&cdce->client->dev,
487 cdce706_source_name[i]);
488
489 if (IS_ERR(parent)) {
490 cdce->clkin_name[i] = cdce706_source_name[i];
491 } else {
492 cdce->clkin_name[i] = __clk_get_name(parent);
493 cdce->clkin_clk[i] = parent;
494 }
495 }
496
497 ret = cdce706_reg_read(cdce, CDCE706_CLKIN_SOURCE, &source);
498 if (ret < 0)
499 return ret;
500 if ((source & CDCE706_CLKIN_SOURCE_MASK) ==
501 CDCE706_CLKIN_SOURCE_LVCMOS) {
502 ret = cdce706_reg_read(cdce, CDCE706_CLKIN_CLOCK, &clock);
503 if (ret < 0)
504 return ret;
505 cdce->clkin[0].parent = !!(clock & CDCE706_CLKIN_CLOCK_MASK);
506 }
507
508 ret = cdce706_register_hw(cdce, cdce->clkin,
509 ARRAY_SIZE(cdce->clkin),
510 cdce706_clkin_name, &init);
511 return ret;
512}
513
514static int cdce706_register_plls(struct cdce706_dev_data *cdce)
515{
516 struct clk_init_data init = {
517 .ops = &cdce706_pll_ops,
518 .parent_names = cdce706_clkin_name,
519 .num_parents = ARRAY_SIZE(cdce706_clkin_name),
520 };
521 unsigned i;
522 int ret;
523 unsigned mux;
524
525 ret = cdce706_reg_read(cdce, CDCE706_PLL_MUX, &mux);
526 if (ret < 0)
527 return ret;
528
529 for (i = 0; i < ARRAY_SIZE(cdce->pll); ++i) {
530 unsigned m, n, v;
531
532 ret = cdce706_reg_read(cdce, CDCE706_PLL_M_LOW(i), &m);
533 if (ret < 0)
534 return ret;
535 ret = cdce706_reg_read(cdce, CDCE706_PLL_N_LOW(i), &n);
536 if (ret < 0)
537 return ret;
538 ret = cdce706_reg_read(cdce, CDCE706_PLL_HI(i), &v);
539 if (ret < 0)
540 return ret;
541 cdce->pll[i].div = m | ((v & CDCE706_PLL_HI_M_MASK) << 8);
542 cdce->pll[i].mul = n | ((v & CDCE706_PLL_HI_N_MASK) <<
543 (8 - CDCE706_PLL_HI_N_SHIFT));
544 cdce->pll[i].mux = mux & CDCE706_PLL_MUX_MASK(i);
545 dev_dbg(&cdce->client->dev,
546 "%s: i: %u, div: %u, mul: %u, mux: %d\n", __func__, i,
547 cdce->pll[i].div, cdce->pll[i].mul, cdce->pll[i].mux);
548 }
549
550 ret = cdce706_register_hw(cdce, cdce->pll,
551 ARRAY_SIZE(cdce->pll),
552 cdce706_pll_name, &init);
553 return ret;
554}
555
556static int cdce706_register_dividers(struct cdce706_dev_data *cdce)
557{
558 struct clk_init_data init = {
559 .ops = &cdce706_divider_ops,
560 .parent_names = cdce706_divider_parent_name,
561 .num_parents = ARRAY_SIZE(cdce706_divider_parent_name),
562 .flags = CLK_SET_RATE_PARENT,
563 };
564 unsigned i;
565 int ret;
566
567 for (i = 0; i < ARRAY_SIZE(cdce->divider); ++i) {
568 unsigned val;
569
570 ret = cdce706_reg_read(cdce, CDCE706_DIVIDER_PLL(i), &val);
571 if (ret < 0)
572 return ret;
573 cdce->divider[i].parent =
574 (val & CDCE706_DIVIDER_PLL_MASK(i)) >>
575 CDCE706_DIVIDER_PLL_SHIFT(i);
576
577 ret = cdce706_reg_read(cdce, CDCE706_DIVIDER(i), &val);
578 if (ret < 0)
579 return ret;
580 cdce->divider[i].div = val & CDCE706_DIVIDER_DIVIDER_MASK;
581 dev_dbg(&cdce->client->dev,
582 "%s: i: %u, parent: %u, div: %u\n", __func__, i,
583 cdce->divider[i].parent, cdce->divider[i].div);
584 }
585
586 ret = cdce706_register_hw(cdce, cdce->divider,
587 ARRAY_SIZE(cdce->divider),
588 cdce706_divider_name, &init);
589 return ret;
590}
591
592static int cdce706_register_clkouts(struct cdce706_dev_data *cdce)
593{
594 struct clk_init_data init = {
595 .ops = &cdce706_clkout_ops,
596 .parent_names = cdce706_divider_name,
597 .num_parents = ARRAY_SIZE(cdce706_divider_name),
598 .flags = CLK_SET_RATE_PARENT,
599 };
600 unsigned i;
601 int ret;
602
603 for (i = 0; i < ARRAY_SIZE(cdce->clkout); ++i) {
604 unsigned val;
605
606 ret = cdce706_reg_read(cdce, CDCE706_CLKOUT(i), &val);
607 if (ret < 0)
608 return ret;
609 cdce->clkout[i].parent = val & CDCE706_CLKOUT_DIVIDER_MASK;
610 dev_dbg(&cdce->client->dev,
611 "%s: i: %u, parent: %u\n", __func__, i,
612 cdce->clkout[i].parent);
613 }
614
615 ret = cdce706_register_hw(cdce, cdce->clkout,
616 ARRAY_SIZE(cdce->clkout),
617 cdce706_clkout_name, &init);
618 for (i = 0; i < ARRAY_SIZE(cdce->clkout); ++i)
619 cdce->clks[i] = cdce->clkout[i].clk;
620
621 return ret;
622}
623
624static int cdce706_probe(struct i2c_client *client,
625 const struct i2c_device_id *id)
626{
627 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
628 struct cdce706_dev_data *cdce;
629 int ret;
630
631 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
632 return -EIO;
633
634 cdce = devm_kzalloc(&client->dev, sizeof(*cdce), GFP_KERNEL);
635 if (!cdce)
636 return -ENOMEM;
637
638 cdce->client = client;
639 cdce->regmap = devm_regmap_init_i2c(client, &cdce706_regmap_config);
640 if (IS_ERR(cdce->regmap)) {
641 dev_err(&client->dev, "Failed to initialize regmap\n");
642 return -EINVAL;
643 }
644
645 i2c_set_clientdata(client, cdce);
646
647 ret = cdce706_register_clkin(cdce);
648 if (ret < 0)
649 return ret;
650 ret = cdce706_register_plls(cdce);
651 if (ret < 0)
652 return ret;
653 ret = cdce706_register_dividers(cdce);
654 if (ret < 0)
655 return ret;
656 ret = cdce706_register_clkouts(cdce);
657 if (ret < 0)
658 return ret;
659 cdce->onecell.clks = cdce->clks;
660 cdce->onecell.clk_num = ARRAY_SIZE(cdce->clks);
661 ret = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get,
662 &cdce->onecell);
663
664 return ret;
665}
666
667static int cdce706_remove(struct i2c_client *client)
668{
669 return 0;
670}
671
672
673#ifdef CONFIG_OF
674static const struct of_device_id cdce706_dt_match[] = {
675 { .compatible = "ti,cdce706" },
676 { },
677};
678MODULE_DEVICE_TABLE(of, cdce706_dt_match);
679#endif
680
681static const struct i2c_device_id cdce706_id[] = {
682 { "cdce706", 0 },
683 { }
684};
685MODULE_DEVICE_TABLE(i2c, cdce706_id);
686
687static struct i2c_driver cdce706_i2c_driver = {
688 .driver = {
689 .name = "cdce706",
690 .of_match_table = of_match_ptr(cdce706_dt_match),
691 },
692 .probe = cdce706_probe,
693 .remove = cdce706_remove,
694 .id_table = cdce706_id,
695};
696module_i2c_driver(cdce706_i2c_driver);
697
698MODULE_AUTHOR("Max Filippov <jcmvbkbc@gmail.com>");
699MODULE_DESCRIPTION("TI CDCE 706 clock synthesizer driver");
700MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 4386697236a7..956b7e54fa1c 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -27,7 +27,7 @@ static u8 clk_composite_get_parent(struct clk_hw *hw)
27 const struct clk_ops *mux_ops = composite->mux_ops; 27 const struct clk_ops *mux_ops = composite->mux_ops;
28 struct clk_hw *mux_hw = composite->mux_hw; 28 struct clk_hw *mux_hw = composite->mux_hw;
29 29
30 mux_hw->clk = hw->clk; 30 __clk_hw_set_clk(mux_hw, hw);
31 31
32 return mux_ops->get_parent(mux_hw); 32 return mux_ops->get_parent(mux_hw);
33} 33}
@@ -38,7 +38,7 @@ static int clk_composite_set_parent(struct clk_hw *hw, u8 index)
38 const struct clk_ops *mux_ops = composite->mux_ops; 38 const struct clk_ops *mux_ops = composite->mux_ops;
39 struct clk_hw *mux_hw = composite->mux_hw; 39 struct clk_hw *mux_hw = composite->mux_hw;
40 40
41 mux_hw->clk = hw->clk; 41 __clk_hw_set_clk(mux_hw, hw);
42 42
43 return mux_ops->set_parent(mux_hw, index); 43 return mux_ops->set_parent(mux_hw, index);
44} 44}
@@ -50,12 +50,14 @@ static unsigned long clk_composite_recalc_rate(struct clk_hw *hw,
50 const struct clk_ops *rate_ops = composite->rate_ops; 50 const struct clk_ops *rate_ops = composite->rate_ops;
51 struct clk_hw *rate_hw = composite->rate_hw; 51 struct clk_hw *rate_hw = composite->rate_hw;
52 52
53 rate_hw->clk = hw->clk; 53 __clk_hw_set_clk(rate_hw, hw);
54 54
55 return rate_ops->recalc_rate(rate_hw, parent_rate); 55 return rate_ops->recalc_rate(rate_hw, parent_rate);
56} 56}
57 57
58static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate, 58static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
59 unsigned long min_rate,
60 unsigned long max_rate,
59 unsigned long *best_parent_rate, 61 unsigned long *best_parent_rate,
60 struct clk_hw **best_parent_p) 62 struct clk_hw **best_parent_p)
61{ 63{
@@ -72,8 +74,10 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
72 int i; 74 int i;
73 75
74 if (rate_hw && rate_ops && rate_ops->determine_rate) { 76 if (rate_hw && rate_ops && rate_ops->determine_rate) {
75 rate_hw->clk = hw->clk; 77 __clk_hw_set_clk(rate_hw, hw);
76 return rate_ops->determine_rate(rate_hw, rate, best_parent_rate, 78 return rate_ops->determine_rate(rate_hw, rate, min_rate,
79 max_rate,
80 best_parent_rate,
77 best_parent_p); 81 best_parent_p);
78 } else if (rate_hw && rate_ops && rate_ops->round_rate && 82 } else if (rate_hw && rate_ops && rate_ops->round_rate &&
79 mux_hw && mux_ops && mux_ops->set_parent) { 83 mux_hw && mux_ops && mux_ops->set_parent) {
@@ -116,8 +120,9 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
116 120
117 return best_rate; 121 return best_rate;
118 } else if (mux_hw && mux_ops && mux_ops->determine_rate) { 122 } else if (mux_hw && mux_ops && mux_ops->determine_rate) {
119 mux_hw->clk = hw->clk; 123 __clk_hw_set_clk(mux_hw, hw);
120 return mux_ops->determine_rate(mux_hw, rate, best_parent_rate, 124 return mux_ops->determine_rate(mux_hw, rate, min_rate,
125 max_rate, best_parent_rate,
121 best_parent_p); 126 best_parent_p);
122 } else { 127 } else {
123 pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n"); 128 pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n");
@@ -132,7 +137,7 @@ static long clk_composite_round_rate(struct clk_hw *hw, unsigned long rate,
132 const struct clk_ops *rate_ops = composite->rate_ops; 137 const struct clk_ops *rate_ops = composite->rate_ops;
133 struct clk_hw *rate_hw = composite->rate_hw; 138 struct clk_hw *rate_hw = composite->rate_hw;
134 139
135 rate_hw->clk = hw->clk; 140 __clk_hw_set_clk(rate_hw, hw);
136 141
137 return rate_ops->round_rate(rate_hw, rate, prate); 142 return rate_ops->round_rate(rate_hw, rate, prate);
138} 143}
@@ -144,7 +149,7 @@ static int clk_composite_set_rate(struct clk_hw *hw, unsigned long rate,
144 const struct clk_ops *rate_ops = composite->rate_ops; 149 const struct clk_ops *rate_ops = composite->rate_ops;
145 struct clk_hw *rate_hw = composite->rate_hw; 150 struct clk_hw *rate_hw = composite->rate_hw;
146 151
147 rate_hw->clk = hw->clk; 152 __clk_hw_set_clk(rate_hw, hw);
148 153
149 return rate_ops->set_rate(rate_hw, rate, parent_rate); 154 return rate_ops->set_rate(rate_hw, rate, parent_rate);
150} 155}
@@ -155,7 +160,7 @@ static int clk_composite_is_enabled(struct clk_hw *hw)
155 const struct clk_ops *gate_ops = composite->gate_ops; 160 const struct clk_ops *gate_ops = composite->gate_ops;
156 struct clk_hw *gate_hw = composite->gate_hw; 161 struct clk_hw *gate_hw = composite->gate_hw;
157 162
158 gate_hw->clk = hw->clk; 163 __clk_hw_set_clk(gate_hw, hw);
159 164
160 return gate_ops->is_enabled(gate_hw); 165 return gate_ops->is_enabled(gate_hw);
161} 166}
@@ -166,7 +171,7 @@ static int clk_composite_enable(struct clk_hw *hw)
166 const struct clk_ops *gate_ops = composite->gate_ops; 171 const struct clk_ops *gate_ops = composite->gate_ops;
167 struct clk_hw *gate_hw = composite->gate_hw; 172 struct clk_hw *gate_hw = composite->gate_hw;
168 173
169 gate_hw->clk = hw->clk; 174 __clk_hw_set_clk(gate_hw, hw);
170 175
171 return gate_ops->enable(gate_hw); 176 return gate_ops->enable(gate_hw);
172} 177}
@@ -177,7 +182,7 @@ static void clk_composite_disable(struct clk_hw *hw)
177 const struct clk_ops *gate_ops = composite->gate_ops; 182 const struct clk_ops *gate_ops = composite->gate_ops;
178 struct clk_hw *gate_hw = composite->gate_hw; 183 struct clk_hw *gate_hw = composite->gate_hw;
179 184
180 gate_hw->clk = hw->clk; 185 __clk_hw_set_clk(gate_hw, hw);
181 186
182 gate_ops->disable(gate_hw); 187 gate_ops->disable(gate_hw);
183} 188}
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index c0a842b335c5..db7f8bce7467 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -30,7 +30,7 @@
30 30
31#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw) 31#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
32 32
33#define div_mask(d) ((1 << ((d)->width)) - 1) 33#define div_mask(width) ((1 << (width)) - 1)
34 34
35static unsigned int _get_table_maxdiv(const struct clk_div_table *table) 35static unsigned int _get_table_maxdiv(const struct clk_div_table *table)
36{ 36{
@@ -54,15 +54,16 @@ static unsigned int _get_table_mindiv(const struct clk_div_table *table)
54 return mindiv; 54 return mindiv;
55} 55}
56 56
57static unsigned int _get_maxdiv(struct clk_divider *divider) 57static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width,
58 unsigned long flags)
58{ 59{
59 if (divider->flags & CLK_DIVIDER_ONE_BASED) 60 if (flags & CLK_DIVIDER_ONE_BASED)
60 return div_mask(divider); 61 return div_mask(width);
61 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) 62 if (flags & CLK_DIVIDER_POWER_OF_TWO)
62 return 1 << div_mask(divider); 63 return 1 << div_mask(width);
63 if (divider->table) 64 if (table)
64 return _get_table_maxdiv(divider->table); 65 return _get_table_maxdiv(table);
65 return div_mask(divider) + 1; 66 return div_mask(width) + 1;
66} 67}
67 68
68static unsigned int _get_table_div(const struct clk_div_table *table, 69static unsigned int _get_table_div(const struct clk_div_table *table,
@@ -76,14 +77,15 @@ static unsigned int _get_table_div(const struct clk_div_table *table,
76 return 0; 77 return 0;
77} 78}
78 79
79static unsigned int _get_div(struct clk_divider *divider, unsigned int val) 80static unsigned int _get_div(const struct clk_div_table *table,
81 unsigned int val, unsigned long flags)
80{ 82{
81 if (divider->flags & CLK_DIVIDER_ONE_BASED) 83 if (flags & CLK_DIVIDER_ONE_BASED)
82 return val; 84 return val;
83 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) 85 if (flags & CLK_DIVIDER_POWER_OF_TWO)
84 return 1 << val; 86 return 1 << val;
85 if (divider->table) 87 if (table)
86 return _get_table_div(divider->table, val); 88 return _get_table_div(table, val);
87 return val + 1; 89 return val + 1;
88} 90}
89 91
@@ -98,29 +100,28 @@ static unsigned int _get_table_val(const struct clk_div_table *table,
98 return 0; 100 return 0;
99} 101}
100 102
101static unsigned int _get_val(struct clk_divider *divider, unsigned int div) 103static unsigned int _get_val(const struct clk_div_table *table,
104 unsigned int div, unsigned long flags)
102{ 105{
103 if (divider->flags & CLK_DIVIDER_ONE_BASED) 106 if (flags & CLK_DIVIDER_ONE_BASED)
104 return div; 107 return div;
105 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) 108 if (flags & CLK_DIVIDER_POWER_OF_TWO)
106 return __ffs(div); 109 return __ffs(div);
107 if (divider->table) 110 if (table)
108 return _get_table_val(divider->table, div); 111 return _get_table_val(table, div);
109 return div - 1; 112 return div - 1;
110} 113}
111 114
112static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, 115unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
113 unsigned long parent_rate) 116 unsigned int val,
117 const struct clk_div_table *table,
118 unsigned long flags)
114{ 119{
115 struct clk_divider *divider = to_clk_divider(hw); 120 unsigned int div;
116 unsigned int div, val;
117 121
118 val = clk_readl(divider->reg) >> divider->shift; 122 div = _get_div(table, val, flags);
119 val &= div_mask(divider);
120
121 div = _get_div(divider, val);
122 if (!div) { 123 if (!div) {
123 WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO), 124 WARN(!(flags & CLK_DIVIDER_ALLOW_ZERO),
124 "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n", 125 "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
125 __clk_get_name(hw->clk)); 126 __clk_get_name(hw->clk));
126 return parent_rate; 127 return parent_rate;
@@ -128,6 +129,20 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
128 129
129 return DIV_ROUND_UP(parent_rate, div); 130 return DIV_ROUND_UP(parent_rate, div);
130} 131}
132EXPORT_SYMBOL_GPL(divider_recalc_rate);
133
134static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
135 unsigned long parent_rate)
136{
137 struct clk_divider *divider = to_clk_divider(hw);
138 unsigned int val;
139
140 val = clk_readl(divider->reg) >> divider->shift;
141 val &= div_mask(divider->width);
142
143 return divider_recalc_rate(hw, parent_rate, val, divider->table,
144 divider->flags);
145}
131 146
132/* 147/*
133 * The reverse of DIV_ROUND_UP: The maximum number which 148 * The reverse of DIV_ROUND_UP: The maximum number which
@@ -146,12 +161,13 @@ static bool _is_valid_table_div(const struct clk_div_table *table,
146 return false; 161 return false;
147} 162}
148 163
149static bool _is_valid_div(struct clk_divider *divider, unsigned int div) 164static bool _is_valid_div(const struct clk_div_table *table, unsigned int div,
165 unsigned long flags)
150{ 166{
151 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) 167 if (flags & CLK_DIVIDER_POWER_OF_TWO)
152 return is_power_of_2(div); 168 return is_power_of_2(div);
153 if (divider->table) 169 if (table)
154 return _is_valid_table_div(divider->table, div); 170 return _is_valid_table_div(table, div);
155 return true; 171 return true;
156} 172}
157 173
@@ -191,71 +207,76 @@ static int _round_down_table(const struct clk_div_table *table, int div)
191 return down; 207 return down;
192} 208}
193 209
194static int _div_round_up(struct clk_divider *divider, 210static int _div_round_up(const struct clk_div_table *table,
195 unsigned long parent_rate, unsigned long rate) 211 unsigned long parent_rate, unsigned long rate,
212 unsigned long flags)
196{ 213{
197 int div = DIV_ROUND_UP(parent_rate, rate); 214 int div = DIV_ROUND_UP(parent_rate, rate);
198 215
199 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) 216 if (flags & CLK_DIVIDER_POWER_OF_TWO)
200 div = __roundup_pow_of_two(div); 217 div = __roundup_pow_of_two(div);
201 if (divider->table) 218 if (table)
202 div = _round_up_table(divider->table, div); 219 div = _round_up_table(table, div);
203 220
204 return div; 221 return div;
205} 222}
206 223
207static int _div_round_closest(struct clk_divider *divider, 224static int _div_round_closest(const struct clk_div_table *table,
208 unsigned long parent_rate, unsigned long rate) 225 unsigned long parent_rate, unsigned long rate,
226 unsigned long flags)
209{ 227{
210 int up, down, div; 228 int up, down, div;
211 229
212 up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate); 230 up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate);
213 231
214 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) { 232 if (flags & CLK_DIVIDER_POWER_OF_TWO) {
215 up = __roundup_pow_of_two(div); 233 up = __roundup_pow_of_two(div);
216 down = __rounddown_pow_of_two(div); 234 down = __rounddown_pow_of_two(div);
217 } else if (divider->table) { 235 } else if (table) {
218 up = _round_up_table(divider->table, div); 236 up = _round_up_table(table, div);
219 down = _round_down_table(divider->table, div); 237 down = _round_down_table(table, div);
220 } 238 }
221 239
222 return (up - div) <= (div - down) ? up : down; 240 return (up - div) <= (div - down) ? up : down;
223} 241}
224 242
225static int _div_round(struct clk_divider *divider, unsigned long parent_rate, 243static int _div_round(const struct clk_div_table *table,
226 unsigned long rate) 244 unsigned long parent_rate, unsigned long rate,
245 unsigned long flags)
227{ 246{
228 if (divider->flags & CLK_DIVIDER_ROUND_CLOSEST) 247 if (flags & CLK_DIVIDER_ROUND_CLOSEST)
229 return _div_round_closest(divider, parent_rate, rate); 248 return _div_round_closest(table, parent_rate, rate, flags);
230 249
231 return _div_round_up(divider, parent_rate, rate); 250 return _div_round_up(table, parent_rate, rate, flags);
232} 251}
233 252
234static bool _is_best_div(struct clk_divider *divider, 253static bool _is_best_div(unsigned long rate, unsigned long now,
235 unsigned long rate, unsigned long now, unsigned long best) 254 unsigned long best, unsigned long flags)
236{ 255{
237 if (divider->flags & CLK_DIVIDER_ROUND_CLOSEST) 256 if (flags & CLK_DIVIDER_ROUND_CLOSEST)
238 return abs(rate - now) < abs(rate - best); 257 return abs(rate - now) < abs(rate - best);
239 258
240 return now <= rate && now > best; 259 return now <= rate && now > best;
241} 260}
242 261
243static int _next_div(struct clk_divider *divider, int div) 262static int _next_div(const struct clk_div_table *table, int div,
263 unsigned long flags)
244{ 264{
245 div++; 265 div++;
246 266
247 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) 267 if (flags & CLK_DIVIDER_POWER_OF_TWO)
248 return __roundup_pow_of_two(div); 268 return __roundup_pow_of_two(div);
249 if (divider->table) 269 if (table)
250 return _round_up_table(divider->table, div); 270 return _round_up_table(table, div);
251 271
252 return div; 272 return div;
253} 273}
254 274
255static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, 275static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
256 unsigned long *best_parent_rate) 276 unsigned long *best_parent_rate,
277 const struct clk_div_table *table, u8 width,
278 unsigned long flags)
257{ 279{
258 struct clk_divider *divider = to_clk_divider(hw);
259 int i, bestdiv = 0; 280 int i, bestdiv = 0;
260 unsigned long parent_rate, best = 0, now, maxdiv; 281 unsigned long parent_rate, best = 0, now, maxdiv;
261 unsigned long parent_rate_saved = *best_parent_rate; 282 unsigned long parent_rate_saved = *best_parent_rate;
@@ -263,19 +284,11 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
263 if (!rate) 284 if (!rate)
264 rate = 1; 285 rate = 1;
265 286
266 /* if read only, just return current value */ 287 maxdiv = _get_maxdiv(table, width, flags);
267 if (divider->flags & CLK_DIVIDER_READ_ONLY) {
268 bestdiv = readl(divider->reg) >> divider->shift;
269 bestdiv &= div_mask(divider);
270 bestdiv = _get_div(divider, bestdiv);
271 return bestdiv;
272 }
273
274 maxdiv = _get_maxdiv(divider);
275 288
276 if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { 289 if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
277 parent_rate = *best_parent_rate; 290 parent_rate = *best_parent_rate;
278 bestdiv = _div_round(divider, parent_rate, rate); 291 bestdiv = _div_round(table, parent_rate, rate, flags);
279 bestdiv = bestdiv == 0 ? 1 : bestdiv; 292 bestdiv = bestdiv == 0 ? 1 : bestdiv;
280 bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; 293 bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
281 return bestdiv; 294 return bestdiv;
@@ -287,8 +300,8 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
287 */ 300 */
288 maxdiv = min(ULONG_MAX / rate, maxdiv); 301 maxdiv = min(ULONG_MAX / rate, maxdiv);
289 302
290 for (i = 1; i <= maxdiv; i = _next_div(divider, i)) { 303 for (i = 1; i <= maxdiv; i = _next_div(table, i, flags)) {
291 if (!_is_valid_div(divider, i)) 304 if (!_is_valid_div(table, i, flags))
292 continue; 305 continue;
293 if (rate * i == parent_rate_saved) { 306 if (rate * i == parent_rate_saved) {
294 /* 307 /*
@@ -302,7 +315,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
302 parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 315 parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
303 MULT_ROUND_UP(rate, i)); 316 MULT_ROUND_UP(rate, i));
304 now = DIV_ROUND_UP(parent_rate, i); 317 now = DIV_ROUND_UP(parent_rate, i);
305 if (_is_best_div(divider, rate, now, best)) { 318 if (_is_best_div(rate, now, best, flags)) {
306 bestdiv = i; 319 bestdiv = i;
307 best = now; 320 best = now;
308 *best_parent_rate = parent_rate; 321 *best_parent_rate = parent_rate;
@@ -310,48 +323,79 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
310 } 323 }
311 324
312 if (!bestdiv) { 325 if (!bestdiv) {
313 bestdiv = _get_maxdiv(divider); 326 bestdiv = _get_maxdiv(table, width, flags);
314 *best_parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 1); 327 *best_parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 1);
315 } 328 }
316 329
317 return bestdiv; 330 return bestdiv;
318} 331}
319 332
320static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, 333long divider_round_rate(struct clk_hw *hw, unsigned long rate,
321 unsigned long *prate) 334 unsigned long *prate, const struct clk_div_table *table,
335 u8 width, unsigned long flags)
322{ 336{
323 int div; 337 int div;
324 div = clk_divider_bestdiv(hw, rate, prate); 338
339 div = clk_divider_bestdiv(hw, rate, prate, table, width, flags);
325 340
326 return DIV_ROUND_UP(*prate, div); 341 return DIV_ROUND_UP(*prate, div);
327} 342}
343EXPORT_SYMBOL_GPL(divider_round_rate);
328 344
329static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, 345static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
330 unsigned long parent_rate) 346 unsigned long *prate)
331{ 347{
332 struct clk_divider *divider = to_clk_divider(hw); 348 struct clk_divider *divider = to_clk_divider(hw);
349 int bestdiv;
350
351 /* if read only, just return current value */
352 if (divider->flags & CLK_DIVIDER_READ_ONLY) {
353 bestdiv = readl(divider->reg) >> divider->shift;
354 bestdiv &= div_mask(divider->width);
355 bestdiv = _get_div(divider->table, bestdiv, divider->flags);
356 return bestdiv;
357 }
358
359 return divider_round_rate(hw, rate, prate, divider->table,
360 divider->width, divider->flags);
361}
362
363int divider_get_val(unsigned long rate, unsigned long parent_rate,
364 const struct clk_div_table *table, u8 width,
365 unsigned long flags)
366{
333 unsigned int div, value; 367 unsigned int div, value;
334 unsigned long flags = 0;
335 u32 val;
336 368
337 div = DIV_ROUND_UP(parent_rate, rate); 369 div = DIV_ROUND_UP(parent_rate, rate);
338 370
339 if (!_is_valid_div(divider, div)) 371 if (!_is_valid_div(table, div, flags))
340 return -EINVAL; 372 return -EINVAL;
341 373
342 value = _get_val(divider, div); 374 value = _get_val(table, div, flags);
375
376 return min_t(unsigned int, value, div_mask(width));
377}
378EXPORT_SYMBOL_GPL(divider_get_val);
379
380static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
381 unsigned long parent_rate)
382{
383 struct clk_divider *divider = to_clk_divider(hw);
384 unsigned int value;
385 unsigned long flags = 0;
386 u32 val;
343 387
344 if (value > div_mask(divider)) 388 value = divider_get_val(rate, parent_rate, divider->table,
345 value = div_mask(divider); 389 divider->width, divider->flags);
346 390
347 if (divider->lock) 391 if (divider->lock)
348 spin_lock_irqsave(divider->lock, flags); 392 spin_lock_irqsave(divider->lock, flags);
349 393
350 if (divider->flags & CLK_DIVIDER_HIWORD_MASK) { 394 if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
351 val = div_mask(divider) << (divider->shift + 16); 395 val = div_mask(divider->width) << (divider->shift + 16);
352 } else { 396 } else {
353 val = clk_readl(divider->reg); 397 val = clk_readl(divider->reg);
354 val &= ~(div_mask(divider) << divider->shift); 398 val &= ~(div_mask(divider->width) << divider->shift);
355 } 399 }
356 val |= value << divider->shift; 400 val |= value << divider->shift;
357 clk_writel(val, divider->reg); 401 clk_writel(val, divider->reg);
@@ -463,3 +507,19 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
463 width, clk_divider_flags, table, lock); 507 width, clk_divider_flags, table, lock);
464} 508}
465EXPORT_SYMBOL_GPL(clk_register_divider_table); 509EXPORT_SYMBOL_GPL(clk_register_divider_table);
510
511void clk_unregister_divider(struct clk *clk)
512{
513 struct clk_divider *div;
514 struct clk_hw *hw;
515
516 hw = __clk_get_hw(clk);
517 if (!hw)
518 return;
519
520 div = to_clk_divider(hw);
521
522 clk_unregister(clk);
523 kfree(div);
524}
525EXPORT_SYMBOL_GPL(clk_unregister_divider);
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 51fd87fb7ba6..3f0e4200cb5d 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -128,7 +128,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
128 struct clk_init_data init; 128 struct clk_init_data init;
129 129
130 if (clk_gate_flags & CLK_GATE_HIWORD_MASK) { 130 if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
131 if (bit_idx > 16) { 131 if (bit_idx > 15) {
132 pr_err("gate bit exceeds LOWORD field\n"); 132 pr_err("gate bit exceeds LOWORD field\n");
133 return ERR_PTR(-EINVAL); 133 return ERR_PTR(-EINVAL);
134 } 134 }
@@ -162,3 +162,19 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
162 return clk; 162 return clk;
163} 163}
164EXPORT_SYMBOL_GPL(clk_register_gate); 164EXPORT_SYMBOL_GPL(clk_register_gate);
165
166void clk_unregister_gate(struct clk *clk)
167{
168 struct clk_gate *gate;
169 struct clk_hw *hw;
170
171 hw = __clk_get_hw(clk);
172 if (!hw)
173 return;
174
175 gate = to_clk_gate(hw);
176
177 clk_unregister(clk);
178 kfree(gate);
179}
180EXPORT_SYMBOL_GPL(clk_unregister_gate);
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 6e1ecf94bf58..69a094c3783d 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -177,3 +177,19 @@ struct clk *clk_register_mux(struct device *dev, const char *name,
177 NULL, lock); 177 NULL, lock);
178} 178}
179EXPORT_SYMBOL_GPL(clk_register_mux); 179EXPORT_SYMBOL_GPL(clk_register_mux);
180
181void clk_unregister_mux(struct clk *clk)
182{
183 struct clk_mux *mux;
184 struct clk_hw *hw;
185
186 hw = __clk_get_hw(clk);
187 if (!hw)
188 return;
189
190 mux = to_clk_mux(hw);
191
192 clk_unregister(clk);
193 kfree(mux);
194}
195EXPORT_SYMBOL_GPL(clk_unregister_mux);
diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-qoriq.c
index 0a47d6f49cd6..cda90a971e39 100644
--- a/drivers/clk/clk-ppc-corenet.c
+++ b/drivers/clk/clk-qoriq.c
@@ -5,8 +5,11 @@
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 * 7 *
8 * clock driver for Freescale PowerPC corenet SoCs. 8 * clock driver for Freescale QorIQ SoCs.
9 */ 9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
10#include <linux/clk-provider.h> 13#include <linux/clk-provider.h>
11#include <linux/io.h> 14#include <linux/io.h>
12#include <linux/kernel.h> 15#include <linux/kernel.h>
@@ -19,6 +22,7 @@
19struct cmux_clk { 22struct cmux_clk {
20 struct clk_hw hw; 23 struct clk_hw hw;
21 void __iomem *reg; 24 void __iomem *reg;
25 unsigned int clk_per_pll;
22 u32 flags; 26 u32 flags;
23}; 27};
24 28
@@ -27,14 +31,12 @@ struct cmux_clk {
27#define CLKSEL_ADJUST BIT(0) 31#define CLKSEL_ADJUST BIT(0)
28#define to_cmux_clk(p) container_of(p, struct cmux_clk, hw) 32#define to_cmux_clk(p) container_of(p, struct cmux_clk, hw)
29 33
30static unsigned int clocks_per_pll;
31
32static int cmux_set_parent(struct clk_hw *hw, u8 idx) 34static int cmux_set_parent(struct clk_hw *hw, u8 idx)
33{ 35{
34 struct cmux_clk *clk = to_cmux_clk(hw); 36 struct cmux_clk *clk = to_cmux_clk(hw);
35 u32 clksel; 37 u32 clksel;
36 38
37 clksel = ((idx / clocks_per_pll) << 2) + idx % clocks_per_pll; 39 clksel = ((idx / clk->clk_per_pll) << 2) + idx % clk->clk_per_pll;
38 if (clk->flags & CLKSEL_ADJUST) 40 if (clk->flags & CLKSEL_ADJUST)
39 clksel += 8; 41 clksel += 8;
40 clksel = (clksel & 0xf) << CLKSEL_SHIFT; 42 clksel = (clksel & 0xf) << CLKSEL_SHIFT;
@@ -52,12 +54,12 @@ static u8 cmux_get_parent(struct clk_hw *hw)
52 clksel = (clksel >> CLKSEL_SHIFT) & 0xf; 54 clksel = (clksel >> CLKSEL_SHIFT) & 0xf;
53 if (clk->flags & CLKSEL_ADJUST) 55 if (clk->flags & CLKSEL_ADJUST)
54 clksel -= 8; 56 clksel -= 8;
55 clksel = (clksel >> 2) * clocks_per_pll + clksel % 4; 57 clksel = (clksel >> 2) * clk->clk_per_pll + clksel % 4;
56 58
57 return clksel; 59 return clksel;
58} 60}
59 61
60const struct clk_ops cmux_ops = { 62static const struct clk_ops cmux_ops = {
61 .get_parent = cmux_get_parent, 63 .get_parent = cmux_get_parent,
62 .set_parent = cmux_set_parent, 64 .set_parent = cmux_set_parent,
63}; 65};
@@ -72,6 +74,7 @@ static void __init core_mux_init(struct device_node *np)
72 u32 offset; 74 u32 offset;
73 const char *clk_name; 75 const char *clk_name;
74 const char **parent_names; 76 const char **parent_names;
77 struct of_phandle_args clkspec;
75 78
76 rc = of_property_read_u32(np, "reg", &offset); 79 rc = of_property_read_u32(np, "reg", &offset);
77 if (rc) { 80 if (rc) {
@@ -85,32 +88,40 @@ static void __init core_mux_init(struct device_node *np)
85 pr_err("%s: get clock count error\n", np->name); 88 pr_err("%s: get clock count error\n", np->name);
86 return; 89 return;
87 } 90 }
88 parent_names = kzalloc((sizeof(char *) * count), GFP_KERNEL); 91 parent_names = kcalloc(count, sizeof(char *), GFP_KERNEL);
89 if (!parent_names) { 92 if (!parent_names)
90 pr_err("%s: could not allocate parent_names\n", __func__);
91 return; 93 return;
92 }
93 94
94 for (i = 0; i < count; i++) 95 for (i = 0; i < count; i++)
95 parent_names[i] = of_clk_get_parent_name(np, i); 96 parent_names[i] = of_clk_get_parent_name(np, i);
96 97
97 cmux_clk = kzalloc(sizeof(struct cmux_clk), GFP_KERNEL); 98 cmux_clk = kzalloc(sizeof(*cmux_clk), GFP_KERNEL);
98 if (!cmux_clk) { 99 if (!cmux_clk)
99 pr_err("%s: could not allocate cmux_clk\n", __func__);
100 goto err_name; 100 goto err_name;
101 } 101
102 cmux_clk->reg = of_iomap(np, 0); 102 cmux_clk->reg = of_iomap(np, 0);
103 if (!cmux_clk->reg) { 103 if (!cmux_clk->reg) {
104 pr_err("%s: could not map register\n", __func__); 104 pr_err("%s: could not map register\n", __func__);
105 goto err_clk; 105 goto err_clk;
106 } 106 }
107 107
108 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", 0,
109 &clkspec);
110 if (rc) {
111 pr_err("%s: parse clock node error\n", __func__);
112 goto err_clk;
113 }
114
115 cmux_clk->clk_per_pll = of_property_count_strings(clkspec.np,
116 "clock-output-names");
117 of_node_put(clkspec.np);
118
108 node = of_find_compatible_node(NULL, NULL, "fsl,p4080-clockgen"); 119 node = of_find_compatible_node(NULL, NULL, "fsl,p4080-clockgen");
109 if (node && (offset >= 0x80)) 120 if (node && (offset >= 0x80))
110 cmux_clk->flags = CLKSEL_ADJUST; 121 cmux_clk->flags = CLKSEL_ADJUST;
111 122
112 rc = of_property_read_string_index(np, "clock-output-names", 123 rc = of_property_read_string_index(np, "clock-output-names",
113 0, &clk_name); 124 0, &clk_name);
114 if (rc) { 125 if (rc) {
115 pr_err("%s: read clock names error\n", np->name); 126 pr_err("%s: read clock names error\n", np->name);
116 goto err_clk; 127 goto err_clk;
@@ -132,7 +143,7 @@ static void __init core_mux_init(struct device_node *np)
132 rc = of_clk_add_provider(np, of_clk_src_simple_get, clk); 143 rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
133 if (rc) { 144 if (rc) {
134 pr_err("Could not register clock provider for node:%s\n", 145 pr_err("Could not register clock provider for node:%s\n",
135 np->name); 146 np->name);
136 goto err_clk; 147 goto err_clk;
137 } 148 }
138 goto err_name; 149 goto err_name;
@@ -155,7 +166,7 @@ static void __init core_pll_init(struct device_node *np)
155 166
156 base = of_iomap(np, 0); 167 base = of_iomap(np, 0);
157 if (!base) { 168 if (!base) {
158 pr_err("clk-ppc: iomap error\n"); 169 pr_err("iomap error\n");
159 return; 170 return;
160 } 171 }
161 172
@@ -181,24 +192,17 @@ static void __init core_pll_init(struct device_node *np)
181 goto err_map; 192 goto err_map;
182 } 193 }
183 194
184 /* output clock number per PLL */ 195 subclks = kcalloc(count, sizeof(struct clk *), GFP_KERNEL);
185 clocks_per_pll = count; 196 if (!subclks)
186
187 subclks = kzalloc(sizeof(struct clk *) * count, GFP_KERNEL);
188 if (!subclks) {
189 pr_err("%s: could not allocate subclks\n", __func__);
190 goto err_map; 197 goto err_map;
191 }
192 198
193 onecell_data = kzalloc(sizeof(struct clk_onecell_data), GFP_KERNEL); 199 onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL);
194 if (!onecell_data) { 200 if (!onecell_data)
195 pr_err("%s: could not allocate onecell_data\n", __func__);
196 goto err_clks; 201 goto err_clks;
197 }
198 202
199 for (i = 0; i < count; i++) { 203 for (i = 0; i < count; i++) {
200 rc = of_property_read_string_index(np, "clock-output-names", 204 rc = of_property_read_string_index(np, "clock-output-names",
201 i, &clk_name); 205 i, &clk_name);
202 if (rc) { 206 if (rc) {
203 pr_err("%s: could not get clock names\n", np->name); 207 pr_err("%s: could not get clock names\n", np->name);
204 goto err_cell; 208 goto err_cell;
@@ -230,7 +234,7 @@ static void __init core_pll_init(struct device_node *np)
230 rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data); 234 rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
231 if (rc) { 235 if (rc) {
232 pr_err("Could not register clk provider for node:%s\n", 236 pr_err("Could not register clk provider for node:%s\n",
233 np->name); 237 np->name);
234 goto err_cell; 238 goto err_cell;
235 } 239 }
236 240
@@ -252,7 +256,7 @@ static void __init sysclk_init(struct device_node *node)
252 u32 rate; 256 u32 rate;
253 257
254 if (!np) { 258 if (!np) {
255 pr_err("ppc-clk: could not get parent node\n"); 259 pr_err("could not get parent node\n");
256 return; 260 return;
257 } 261 }
258 262
@@ -268,39 +272,91 @@ static void __init sysclk_init(struct device_node *node)
268 of_clk_add_provider(np, of_clk_src_simple_get, clk); 272 of_clk_add_provider(np, of_clk_src_simple_get, clk);
269} 273}
270 274
271static const struct of_device_id clk_match[] __initconst = { 275static void __init pltfrm_pll_init(struct device_node *np)
272 { .compatible = "fsl,qoriq-sysclk-1.0", .data = sysclk_init, },
273 { .compatible = "fsl,qoriq-sysclk-2.0", .data = sysclk_init, },
274 { .compatible = "fsl,qoriq-core-pll-1.0", .data = core_pll_init, },
275 { .compatible = "fsl,qoriq-core-pll-2.0", .data = core_pll_init, },
276 { .compatible = "fsl,qoriq-core-mux-1.0", .data = core_mux_init, },
277 { .compatible = "fsl,qoriq-core-mux-2.0", .data = core_mux_init, },
278 {}
279};
280
281static int __init ppc_corenet_clk_probe(struct platform_device *pdev)
282{ 276{
283 of_clk_init(clk_match); 277 void __iomem *base;
278 uint32_t mult;
279 const char *parent_name, *clk_name;
280 int i, _errno;
281 struct clk_onecell_data *cod;
284 282
285 return 0; 283 base = of_iomap(np, 0);
286} 284 if (!base) {
285 pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name);
286 return;
287 }
287 288
288static const struct of_device_id ppc_clk_ids[] __initconst = { 289 /* Get the multiple of PLL */
289 { .compatible = "fsl,qoriq-clockgen-1.0", }, 290 mult = ioread32be(base);
290 { .compatible = "fsl,qoriq-clockgen-2.0", },
291 {}
292};
293 291
294static struct platform_driver ppc_corenet_clk_driver = { 292 iounmap(base);
295 .driver = {
296 .name = "ppc_corenet_clock",
297 .of_match_table = ppc_clk_ids,
298 },
299 .probe = ppc_corenet_clk_probe,
300};
301 293
302static int __init ppc_corenet_clk_init(void) 294 /* Check if this PLL is disabled */
303{ 295 if (mult & PLL_KILL) {
304 return platform_driver_register(&ppc_corenet_clk_driver); 296 pr_debug("%s(): %s: Disabled\n", __func__, np->name);
297 return;
298 }
299 mult = (mult & GENMASK(6, 1)) >> 1;
300
301 parent_name = of_clk_get_parent_name(np, 0);
302 if (!parent_name) {
303 pr_err("%s(): %s: of_clk_get_parent_name() failed\n",
304 __func__, np->name);
305 return;
306 }
307
308 i = of_property_count_strings(np, "clock-output-names");
309 if (i < 0) {
310 pr_err("%s(): %s: of_property_count_strings(clock-output-names) = %d\n",
311 __func__, np->name, i);
312 return;
313 }
314
315 cod = kmalloc(sizeof(*cod) + i * sizeof(struct clk *), GFP_KERNEL);
316 if (!cod)
317 return;
318 cod->clks = (struct clk **)(cod + 1);
319 cod->clk_num = i;
320
321 for (i = 0; i < cod->clk_num; i++) {
322 _errno = of_property_read_string_index(np, "clock-output-names",
323 i, &clk_name);
324 if (_errno < 0) {
325 pr_err("%s(): %s: of_property_read_string_index(clock-output-names) = %d\n",
326 __func__, np->name, _errno);
327 goto return_clk_unregister;
328 }
329
330 cod->clks[i] = clk_register_fixed_factor(NULL, clk_name,
331 parent_name, 0, mult, 1 + i);
332 if (IS_ERR(cod->clks[i])) {
333 pr_err("%s(): %s: clk_register_fixed_factor(%s) = %ld\n",
334 __func__, np->name,
335 clk_name, PTR_ERR(cod->clks[i]));
336 goto return_clk_unregister;
337 }
338 }
339
340 _errno = of_clk_add_provider(np, of_clk_src_onecell_get, cod);
341 if (_errno < 0) {
342 pr_err("%s(): %s: of_clk_add_provider() = %d\n",
343 __func__, np->name, _errno);
344 goto return_clk_unregister;
345 }
346
347 return;
348
349return_clk_unregister:
350 while (--i >= 0)
351 clk_unregister(cod->clks[i]);
352 kfree(cod);
305} 353}
306subsys_initcall(ppc_corenet_clk_init); 354
355CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
356CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init);
357CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init);
358CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init);
359CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init);
360CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init);
361CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init);
362CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 642cf37124d3..eb0152961d3c 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -9,7 +9,7 @@
9 * Standard functionality for the common clock API. See Documentation/clk.txt 9 * Standard functionality for the common clock API. See Documentation/clk.txt
10 */ 10 */
11 11
12#include <linux/clk-private.h> 12#include <linux/clk-provider.h>
13#include <linux/clk/clk-conf.h> 13#include <linux/clk/clk-conf.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/mutex.h> 15#include <linux/mutex.h>
@@ -37,6 +37,55 @@ static HLIST_HEAD(clk_root_list);
37static HLIST_HEAD(clk_orphan_list); 37static HLIST_HEAD(clk_orphan_list);
38static LIST_HEAD(clk_notifier_list); 38static LIST_HEAD(clk_notifier_list);
39 39
40static long clk_core_get_accuracy(struct clk_core *clk);
41static unsigned long clk_core_get_rate(struct clk_core *clk);
42static int clk_core_get_phase(struct clk_core *clk);
43static bool clk_core_is_prepared(struct clk_core *clk);
44static bool clk_core_is_enabled(struct clk_core *clk);
45static struct clk_core *clk_core_lookup(const char *name);
46
47/*** private data structures ***/
48
49struct clk_core {
50 const char *name;
51 const struct clk_ops *ops;
52 struct clk_hw *hw;
53 struct module *owner;
54 struct clk_core *parent;
55 const char **parent_names;
56 struct clk_core **parents;
57 u8 num_parents;
58 u8 new_parent_index;
59 unsigned long rate;
60 unsigned long req_rate;
61 unsigned long new_rate;
62 struct clk_core *new_parent;
63 struct clk_core *new_child;
64 unsigned long flags;
65 unsigned int enable_count;
66 unsigned int prepare_count;
67 unsigned long accuracy;
68 int phase;
69 struct hlist_head children;
70 struct hlist_node child_node;
71 struct hlist_node debug_node;
72 struct hlist_head clks;
73 unsigned int notifier_count;
74#ifdef CONFIG_DEBUG_FS
75 struct dentry *dentry;
76#endif
77 struct kref ref;
78};
79
80struct clk {
81 struct clk_core *core;
82 const char *dev_id;
83 const char *con_id;
84 unsigned long min_rate;
85 unsigned long max_rate;
86 struct hlist_node child_node;
87};
88
40/*** locking ***/ 89/*** locking ***/
41static void clk_prepare_lock(void) 90static void clk_prepare_lock(void)
42{ 91{
@@ -114,7 +163,8 @@ static struct hlist_head *orphan_list[] = {
114 NULL, 163 NULL,
115}; 164};
116 165
117static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level) 166static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
167 int level)
118{ 168{
119 if (!c) 169 if (!c)
120 return; 170 return;
@@ -122,14 +172,14 @@ static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
122 seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n", 172 seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
123 level * 3 + 1, "", 173 level * 3 + 1, "",
124 30 - level * 3, c->name, 174 30 - level * 3, c->name,
125 c->enable_count, c->prepare_count, clk_get_rate(c), 175 c->enable_count, c->prepare_count, clk_core_get_rate(c),
126 clk_get_accuracy(c), clk_get_phase(c)); 176 clk_core_get_accuracy(c), clk_core_get_phase(c));
127} 177}
128 178
129static void clk_summary_show_subtree(struct seq_file *s, struct clk *c, 179static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
130 int level) 180 int level)
131{ 181{
132 struct clk *child; 182 struct clk_core *child;
133 183
134 if (!c) 184 if (!c)
135 return; 185 return;
@@ -142,7 +192,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
142 192
143static int clk_summary_show(struct seq_file *s, void *data) 193static int clk_summary_show(struct seq_file *s, void *data)
144{ 194{
145 struct clk *c; 195 struct clk_core *c;
146 struct hlist_head **lists = (struct hlist_head **)s->private; 196 struct hlist_head **lists = (struct hlist_head **)s->private;
147 197
148 seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n"); 198 seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n");
@@ -172,7 +222,7 @@ static const struct file_operations clk_summary_fops = {
172 .release = single_release, 222 .release = single_release,
173}; 223};
174 224
175static void clk_dump_one(struct seq_file *s, struct clk *c, int level) 225static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
176{ 226{
177 if (!c) 227 if (!c)
178 return; 228 return;
@@ -180,14 +230,14 @@ static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
180 seq_printf(s, "\"%s\": { ", c->name); 230 seq_printf(s, "\"%s\": { ", c->name);
181 seq_printf(s, "\"enable_count\": %d,", c->enable_count); 231 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
182 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); 232 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
183 seq_printf(s, "\"rate\": %lu", clk_get_rate(c)); 233 seq_printf(s, "\"rate\": %lu", clk_core_get_rate(c));
184 seq_printf(s, "\"accuracy\": %lu", clk_get_accuracy(c)); 234 seq_printf(s, "\"accuracy\": %lu", clk_core_get_accuracy(c));
185 seq_printf(s, "\"phase\": %d", clk_get_phase(c)); 235 seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
186} 236}
187 237
188static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level) 238static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
189{ 239{
190 struct clk *child; 240 struct clk_core *child;
191 241
192 if (!c) 242 if (!c)
193 return; 243 return;
@@ -204,7 +254,7 @@ static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
204 254
205static int clk_dump(struct seq_file *s, void *data) 255static int clk_dump(struct seq_file *s, void *data)
206{ 256{
207 struct clk *c; 257 struct clk_core *c;
208 bool first_node = true; 258 bool first_node = true;
209 struct hlist_head **lists = (struct hlist_head **)s->private; 259 struct hlist_head **lists = (struct hlist_head **)s->private;
210 260
@@ -240,7 +290,7 @@ static const struct file_operations clk_dump_fops = {
240 .release = single_release, 290 .release = single_release,
241}; 291};
242 292
243static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry) 293static int clk_debug_create_one(struct clk_core *clk, struct dentry *pdentry)
244{ 294{
245 struct dentry *d; 295 struct dentry *d;
246 int ret = -ENOMEM; 296 int ret = -ENOMEM;
@@ -315,7 +365,7 @@ out:
315 * initialized. Otherwise it bails out early since the debugfs clk tree 365 * initialized. Otherwise it bails out early since the debugfs clk tree
316 * will be created lazily by clk_debug_init as part of a late_initcall. 366 * will be created lazily by clk_debug_init as part of a late_initcall.
317 */ 367 */
318static int clk_debug_register(struct clk *clk) 368static int clk_debug_register(struct clk_core *clk)
319{ 369{
320 int ret = 0; 370 int ret = 0;
321 371
@@ -340,16 +390,12 @@ unlock:
340 * debugfs clk tree if clk->dentry points to debugfs created by 390 * debugfs clk tree if clk->dentry points to debugfs created by
341 * clk_debug_register in __clk_init. 391 * clk_debug_register in __clk_init.
342 */ 392 */
343static void clk_debug_unregister(struct clk *clk) 393static void clk_debug_unregister(struct clk_core *clk)
344{ 394{
345 mutex_lock(&clk_debug_lock); 395 mutex_lock(&clk_debug_lock);
346 if (!clk->dentry)
347 goto out;
348
349 hlist_del_init(&clk->debug_node); 396 hlist_del_init(&clk->debug_node);
350 debugfs_remove_recursive(clk->dentry); 397 debugfs_remove_recursive(clk->dentry);
351 clk->dentry = NULL; 398 clk->dentry = NULL;
352out:
353 mutex_unlock(&clk_debug_lock); 399 mutex_unlock(&clk_debug_lock);
354} 400}
355 401
@@ -358,8 +404,9 @@ struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
358{ 404{
359 struct dentry *d = NULL; 405 struct dentry *d = NULL;
360 406
361 if (hw->clk->dentry) 407 if (hw->core->dentry)
362 d = debugfs_create_file(name, mode, hw->clk->dentry, data, fops); 408 d = debugfs_create_file(name, mode, hw->core->dentry, data,
409 fops);
363 410
364 return d; 411 return d;
365} 412}
@@ -379,7 +426,7 @@ EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
379 */ 426 */
380static int __init clk_debug_init(void) 427static int __init clk_debug_init(void)
381{ 428{
382 struct clk *clk; 429 struct clk_core *clk;
383 struct dentry *d; 430 struct dentry *d;
384 431
385 rootdir = debugfs_create_dir("clk", NULL); 432 rootdir = debugfs_create_dir("clk", NULL);
@@ -418,22 +465,20 @@ static int __init clk_debug_init(void)
418} 465}
419late_initcall(clk_debug_init); 466late_initcall(clk_debug_init);
420#else 467#else
421static inline int clk_debug_register(struct clk *clk) { return 0; } 468static inline int clk_debug_register(struct clk_core *clk) { return 0; }
422static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent) 469static inline void clk_debug_reparent(struct clk_core *clk,
470 struct clk_core *new_parent)
423{ 471{
424} 472}
425static inline void clk_debug_unregister(struct clk *clk) 473static inline void clk_debug_unregister(struct clk_core *clk)
426{ 474{
427} 475}
428#endif 476#endif
429 477
430/* caller must hold prepare_lock */ 478/* caller must hold prepare_lock */
431static void clk_unprepare_unused_subtree(struct clk *clk) 479static void clk_unprepare_unused_subtree(struct clk_core *clk)
432{ 480{
433 struct clk *child; 481 struct clk_core *child;
434
435 if (!clk)
436 return;
437 482
438 hlist_for_each_entry(child, &clk->children, child_node) 483 hlist_for_each_entry(child, &clk->children, child_node)
439 clk_unprepare_unused_subtree(child); 484 clk_unprepare_unused_subtree(child);
@@ -444,7 +489,7 @@ static void clk_unprepare_unused_subtree(struct clk *clk)
444 if (clk->flags & CLK_IGNORE_UNUSED) 489 if (clk->flags & CLK_IGNORE_UNUSED)
445 return; 490 return;
446 491
447 if (__clk_is_prepared(clk)) { 492 if (clk_core_is_prepared(clk)) {
448 if (clk->ops->unprepare_unused) 493 if (clk->ops->unprepare_unused)
449 clk->ops->unprepare_unused(clk->hw); 494 clk->ops->unprepare_unused(clk->hw);
450 else if (clk->ops->unprepare) 495 else if (clk->ops->unprepare)
@@ -453,14 +498,11 @@ static void clk_unprepare_unused_subtree(struct clk *clk)
453} 498}
454 499
455/* caller must hold prepare_lock */ 500/* caller must hold prepare_lock */
456static void clk_disable_unused_subtree(struct clk *clk) 501static void clk_disable_unused_subtree(struct clk_core *clk)
457{ 502{
458 struct clk *child; 503 struct clk_core *child;
459 unsigned long flags; 504 unsigned long flags;
460 505
461 if (!clk)
462 goto out;
463
464 hlist_for_each_entry(child, &clk->children, child_node) 506 hlist_for_each_entry(child, &clk->children, child_node)
465 clk_disable_unused_subtree(child); 507 clk_disable_unused_subtree(child);
466 508
@@ -477,7 +519,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
477 * sequence. call .disable_unused if available, otherwise fall 519 * sequence. call .disable_unused if available, otherwise fall
478 * back to .disable 520 * back to .disable
479 */ 521 */
480 if (__clk_is_enabled(clk)) { 522 if (clk_core_is_enabled(clk)) {
481 if (clk->ops->disable_unused) 523 if (clk->ops->disable_unused)
482 clk->ops->disable_unused(clk->hw); 524 clk->ops->disable_unused(clk->hw);
483 else if (clk->ops->disable) 525 else if (clk->ops->disable)
@@ -486,9 +528,6 @@ static void clk_disable_unused_subtree(struct clk *clk)
486 528
487unlock_out: 529unlock_out:
488 clk_enable_unlock(flags); 530 clk_enable_unlock(flags);
489
490out:
491 return;
492} 531}
493 532
494static bool clk_ignore_unused; 533static bool clk_ignore_unused;
@@ -501,7 +540,7 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
501 540
502static int clk_disable_unused(void) 541static int clk_disable_unused(void)
503{ 542{
504 struct clk *clk; 543 struct clk_core *clk;
505 544
506 if (clk_ignore_unused) { 545 if (clk_ignore_unused) {
507 pr_warn("clk: Not disabling unused clocks\n"); 546 pr_warn("clk: Not disabling unused clocks\n");
@@ -532,48 +571,65 @@ late_initcall_sync(clk_disable_unused);
532 571
533const char *__clk_get_name(struct clk *clk) 572const char *__clk_get_name(struct clk *clk)
534{ 573{
535 return !clk ? NULL : clk->name; 574 return !clk ? NULL : clk->core->name;
536} 575}
537EXPORT_SYMBOL_GPL(__clk_get_name); 576EXPORT_SYMBOL_GPL(__clk_get_name);
538 577
539struct clk_hw *__clk_get_hw(struct clk *clk) 578struct clk_hw *__clk_get_hw(struct clk *clk)
540{ 579{
541 return !clk ? NULL : clk->hw; 580 return !clk ? NULL : clk->core->hw;
542} 581}
543EXPORT_SYMBOL_GPL(__clk_get_hw); 582EXPORT_SYMBOL_GPL(__clk_get_hw);
544 583
545u8 __clk_get_num_parents(struct clk *clk) 584u8 __clk_get_num_parents(struct clk *clk)
546{ 585{
547 return !clk ? 0 : clk->num_parents; 586 return !clk ? 0 : clk->core->num_parents;
548} 587}
549EXPORT_SYMBOL_GPL(__clk_get_num_parents); 588EXPORT_SYMBOL_GPL(__clk_get_num_parents);
550 589
551struct clk *__clk_get_parent(struct clk *clk) 590struct clk *__clk_get_parent(struct clk *clk)
552{ 591{
553 return !clk ? NULL : clk->parent; 592 if (!clk)
593 return NULL;
594
595 /* TODO: Create a per-user clk and change callers to call clk_put */
596 return !clk->core->parent ? NULL : clk->core->parent->hw->clk;
554} 597}
555EXPORT_SYMBOL_GPL(__clk_get_parent); 598EXPORT_SYMBOL_GPL(__clk_get_parent);
556 599
557struct clk *clk_get_parent_by_index(struct clk *clk, u8 index) 600static struct clk_core *clk_core_get_parent_by_index(struct clk_core *clk,
601 u8 index)
558{ 602{
559 if (!clk || index >= clk->num_parents) 603 if (!clk || index >= clk->num_parents)
560 return NULL; 604 return NULL;
561 else if (!clk->parents) 605 else if (!clk->parents)
562 return __clk_lookup(clk->parent_names[index]); 606 return clk_core_lookup(clk->parent_names[index]);
563 else if (!clk->parents[index]) 607 else if (!clk->parents[index])
564 return clk->parents[index] = 608 return clk->parents[index] =
565 __clk_lookup(clk->parent_names[index]); 609 clk_core_lookup(clk->parent_names[index]);
566 else 610 else
567 return clk->parents[index]; 611 return clk->parents[index];
568} 612}
613
614struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
615{
616 struct clk_core *parent;
617
618 if (!clk)
619 return NULL;
620
621 parent = clk_core_get_parent_by_index(clk->core, index);
622
623 return !parent ? NULL : parent->hw->clk;
624}
569EXPORT_SYMBOL_GPL(clk_get_parent_by_index); 625EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
570 626
571unsigned int __clk_get_enable_count(struct clk *clk) 627unsigned int __clk_get_enable_count(struct clk *clk)
572{ 628{
573 return !clk ? 0 : clk->enable_count; 629 return !clk ? 0 : clk->core->enable_count;
574} 630}
575 631
576unsigned long __clk_get_rate(struct clk *clk) 632static unsigned long clk_core_get_rate_nolock(struct clk_core *clk)
577{ 633{
578 unsigned long ret; 634 unsigned long ret;
579 635
@@ -593,9 +649,17 @@ unsigned long __clk_get_rate(struct clk *clk)
593out: 649out:
594 return ret; 650 return ret;
595} 651}
652
653unsigned long __clk_get_rate(struct clk *clk)
654{
655 if (!clk)
656 return 0;
657
658 return clk_core_get_rate_nolock(clk->core);
659}
596EXPORT_SYMBOL_GPL(__clk_get_rate); 660EXPORT_SYMBOL_GPL(__clk_get_rate);
597 661
598static unsigned long __clk_get_accuracy(struct clk *clk) 662static unsigned long __clk_get_accuracy(struct clk_core *clk)
599{ 663{
600 if (!clk) 664 if (!clk)
601 return 0; 665 return 0;
@@ -605,11 +669,11 @@ static unsigned long __clk_get_accuracy(struct clk *clk)
605 669
606unsigned long __clk_get_flags(struct clk *clk) 670unsigned long __clk_get_flags(struct clk *clk)
607{ 671{
608 return !clk ? 0 : clk->flags; 672 return !clk ? 0 : clk->core->flags;
609} 673}
610EXPORT_SYMBOL_GPL(__clk_get_flags); 674EXPORT_SYMBOL_GPL(__clk_get_flags);
611 675
612bool __clk_is_prepared(struct clk *clk) 676static bool clk_core_is_prepared(struct clk_core *clk)
613{ 677{
614 int ret; 678 int ret;
615 679
@@ -630,7 +694,15 @@ out:
630 return !!ret; 694 return !!ret;
631} 695}
632 696
633bool __clk_is_enabled(struct clk *clk) 697bool __clk_is_prepared(struct clk *clk)
698{
699 if (!clk)
700 return false;
701
702 return clk_core_is_prepared(clk->core);
703}
704
705static bool clk_core_is_enabled(struct clk_core *clk)
634{ 706{
635 int ret; 707 int ret;
636 708
@@ -650,12 +722,21 @@ bool __clk_is_enabled(struct clk *clk)
650out: 722out:
651 return !!ret; 723 return !!ret;
652} 724}
725
726bool __clk_is_enabled(struct clk *clk)
727{
728 if (!clk)
729 return false;
730
731 return clk_core_is_enabled(clk->core);
732}
653EXPORT_SYMBOL_GPL(__clk_is_enabled); 733EXPORT_SYMBOL_GPL(__clk_is_enabled);
654 734
655static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk) 735static struct clk_core *__clk_lookup_subtree(const char *name,
736 struct clk_core *clk)
656{ 737{
657 struct clk *child; 738 struct clk_core *child;
658 struct clk *ret; 739 struct clk_core *ret;
659 740
660 if (!strcmp(clk->name, name)) 741 if (!strcmp(clk->name, name))
661 return clk; 742 return clk;
@@ -669,10 +750,10 @@ static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
669 return NULL; 750 return NULL;
670} 751}
671 752
672struct clk *__clk_lookup(const char *name) 753static struct clk_core *clk_core_lookup(const char *name)
673{ 754{
674 struct clk *root_clk; 755 struct clk_core *root_clk;
675 struct clk *ret; 756 struct clk_core *ret;
676 757
677 if (!name) 758 if (!name)
678 return NULL; 759 return NULL;
@@ -694,42 +775,53 @@ struct clk *__clk_lookup(const char *name)
694 return NULL; 775 return NULL;
695} 776}
696 777
697/* 778static bool mux_is_better_rate(unsigned long rate, unsigned long now,
698 * Helper for finding best parent to provide a given frequency. This can be used 779 unsigned long best, unsigned long flags)
699 * directly as a determine_rate callback (e.g. for a mux), or from a more
700 * complex clock that may combine a mux with other operations.
701 */
702long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
703 unsigned long *best_parent_rate,
704 struct clk_hw **best_parent_p)
705{ 780{
706 struct clk *clk = hw->clk, *parent, *best_parent = NULL; 781 if (flags & CLK_MUX_ROUND_CLOSEST)
782 return abs(now - rate) < abs(best - rate);
783
784 return now <= rate && now > best;
785}
786
787static long
788clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate,
789 unsigned long min_rate,
790 unsigned long max_rate,
791 unsigned long *best_parent_rate,
792 struct clk_hw **best_parent_p,
793 unsigned long flags)
794{
795 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
707 int i, num_parents; 796 int i, num_parents;
708 unsigned long parent_rate, best = 0; 797 unsigned long parent_rate, best = 0;
709 798
710 /* if NO_REPARENT flag set, pass through to current parent */ 799 /* if NO_REPARENT flag set, pass through to current parent */
711 if (clk->flags & CLK_SET_RATE_NO_REPARENT) { 800 if (core->flags & CLK_SET_RATE_NO_REPARENT) {
712 parent = clk->parent; 801 parent = core->parent;
713 if (clk->flags & CLK_SET_RATE_PARENT) 802 if (core->flags & CLK_SET_RATE_PARENT)
714 best = __clk_round_rate(parent, rate); 803 best = __clk_determine_rate(parent ? parent->hw : NULL,
804 rate, min_rate, max_rate);
715 else if (parent) 805 else if (parent)
716 best = __clk_get_rate(parent); 806 best = clk_core_get_rate_nolock(parent);
717 else 807 else
718 best = __clk_get_rate(clk); 808 best = clk_core_get_rate_nolock(core);
719 goto out; 809 goto out;
720 } 810 }
721 811
722 /* find the parent that can provide the fastest rate <= rate */ 812 /* find the parent that can provide the fastest rate <= rate */
723 num_parents = clk->num_parents; 813 num_parents = core->num_parents;
724 for (i = 0; i < num_parents; i++) { 814 for (i = 0; i < num_parents; i++) {
725 parent = clk_get_parent_by_index(clk, i); 815 parent = clk_core_get_parent_by_index(core, i);
726 if (!parent) 816 if (!parent)
727 continue; 817 continue;
728 if (clk->flags & CLK_SET_RATE_PARENT) 818 if (core->flags & CLK_SET_RATE_PARENT)
729 parent_rate = __clk_round_rate(parent, rate); 819 parent_rate = __clk_determine_rate(parent->hw, rate,
820 min_rate,
821 max_rate);
730 else 822 else
731 parent_rate = __clk_get_rate(parent); 823 parent_rate = clk_core_get_rate_nolock(parent);
732 if (parent_rate <= rate && parent_rate > best) { 824 if (mux_is_better_rate(rate, parent_rate, best, flags)) {
733 best_parent = parent; 825 best_parent = parent;
734 best = parent_rate; 826 best = parent_rate;
735 } 827 }
@@ -742,11 +834,63 @@ out:
742 834
743 return best; 835 return best;
744} 836}
837
838struct clk *__clk_lookup(const char *name)
839{
840 struct clk_core *core = clk_core_lookup(name);
841
842 return !core ? NULL : core->hw->clk;
843}
844
845static void clk_core_get_boundaries(struct clk_core *clk,
846 unsigned long *min_rate,
847 unsigned long *max_rate)
848{
849 struct clk *clk_user;
850
851 *min_rate = 0;
852 *max_rate = ULONG_MAX;
853
854 hlist_for_each_entry(clk_user, &clk->clks, child_node)
855 *min_rate = max(*min_rate, clk_user->min_rate);
856
857 hlist_for_each_entry(clk_user, &clk->clks, child_node)
858 *max_rate = min(*max_rate, clk_user->max_rate);
859}
860
861/*
862 * Helper for finding best parent to provide a given frequency. This can be used
863 * directly as a determine_rate callback (e.g. for a mux), or from a more
864 * complex clock that may combine a mux with other operations.
865 */
866long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
867 unsigned long min_rate,
868 unsigned long max_rate,
869 unsigned long *best_parent_rate,
870 struct clk_hw **best_parent_p)
871{
872 return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
873 best_parent_rate,
874 best_parent_p, 0);
875}
745EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); 876EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
746 877
878long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
879 unsigned long min_rate,
880 unsigned long max_rate,
881 unsigned long *best_parent_rate,
882 struct clk_hw **best_parent_p)
883{
884 return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
885 best_parent_rate,
886 best_parent_p,
887 CLK_MUX_ROUND_CLOSEST);
888}
889EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
890
747/*** clk api ***/ 891/*** clk api ***/
748 892
749void __clk_unprepare(struct clk *clk) 893static void clk_core_unprepare(struct clk_core *clk)
750{ 894{
751 if (!clk) 895 if (!clk)
752 return; 896 return;
@@ -762,7 +906,7 @@ void __clk_unprepare(struct clk *clk)
762 if (clk->ops->unprepare) 906 if (clk->ops->unprepare)
763 clk->ops->unprepare(clk->hw); 907 clk->ops->unprepare(clk->hw);
764 908
765 __clk_unprepare(clk->parent); 909 clk_core_unprepare(clk->parent);
766} 910}
767 911
768/** 912/**
@@ -782,12 +926,12 @@ void clk_unprepare(struct clk *clk)
782 return; 926 return;
783 927
784 clk_prepare_lock(); 928 clk_prepare_lock();
785 __clk_unprepare(clk); 929 clk_core_unprepare(clk->core);
786 clk_prepare_unlock(); 930 clk_prepare_unlock();
787} 931}
788EXPORT_SYMBOL_GPL(clk_unprepare); 932EXPORT_SYMBOL_GPL(clk_unprepare);
789 933
790int __clk_prepare(struct clk *clk) 934static int clk_core_prepare(struct clk_core *clk)
791{ 935{
792 int ret = 0; 936 int ret = 0;
793 937
@@ -795,14 +939,14 @@ int __clk_prepare(struct clk *clk)
795 return 0; 939 return 0;
796 940
797 if (clk->prepare_count == 0) { 941 if (clk->prepare_count == 0) {
798 ret = __clk_prepare(clk->parent); 942 ret = clk_core_prepare(clk->parent);
799 if (ret) 943 if (ret)
800 return ret; 944 return ret;
801 945
802 if (clk->ops->prepare) { 946 if (clk->ops->prepare) {
803 ret = clk->ops->prepare(clk->hw); 947 ret = clk->ops->prepare(clk->hw);
804 if (ret) { 948 if (ret) {
805 __clk_unprepare(clk->parent); 949 clk_core_unprepare(clk->parent);
806 return ret; 950 return ret;
807 } 951 }
808 } 952 }
@@ -829,15 +973,18 @@ int clk_prepare(struct clk *clk)
829{ 973{
830 int ret; 974 int ret;
831 975
976 if (!clk)
977 return 0;
978
832 clk_prepare_lock(); 979 clk_prepare_lock();
833 ret = __clk_prepare(clk); 980 ret = clk_core_prepare(clk->core);
834 clk_prepare_unlock(); 981 clk_prepare_unlock();
835 982
836 return ret; 983 return ret;
837} 984}
838EXPORT_SYMBOL_GPL(clk_prepare); 985EXPORT_SYMBOL_GPL(clk_prepare);
839 986
840static void __clk_disable(struct clk *clk) 987static void clk_core_disable(struct clk_core *clk)
841{ 988{
842 if (!clk) 989 if (!clk)
843 return; 990 return;
@@ -851,7 +998,15 @@ static void __clk_disable(struct clk *clk)
851 if (clk->ops->disable) 998 if (clk->ops->disable)
852 clk->ops->disable(clk->hw); 999 clk->ops->disable(clk->hw);
853 1000
854 __clk_disable(clk->parent); 1001 clk_core_disable(clk->parent);
1002}
1003
1004static void __clk_disable(struct clk *clk)
1005{
1006 if (!clk)
1007 return;
1008
1009 clk_core_disable(clk->core);
855} 1010}
856 1011
857/** 1012/**
@@ -879,7 +1034,7 @@ void clk_disable(struct clk *clk)
879} 1034}
880EXPORT_SYMBOL_GPL(clk_disable); 1035EXPORT_SYMBOL_GPL(clk_disable);
881 1036
882static int __clk_enable(struct clk *clk) 1037static int clk_core_enable(struct clk_core *clk)
883{ 1038{
884 int ret = 0; 1039 int ret = 0;
885 1040
@@ -890,7 +1045,7 @@ static int __clk_enable(struct clk *clk)
890 return -ESHUTDOWN; 1045 return -ESHUTDOWN;
891 1046
892 if (clk->enable_count == 0) { 1047 if (clk->enable_count == 0) {
893 ret = __clk_enable(clk->parent); 1048 ret = clk_core_enable(clk->parent);
894 1049
895 if (ret) 1050 if (ret)
896 return ret; 1051 return ret;
@@ -898,7 +1053,7 @@ static int __clk_enable(struct clk *clk)
898 if (clk->ops->enable) { 1053 if (clk->ops->enable) {
899 ret = clk->ops->enable(clk->hw); 1054 ret = clk->ops->enable(clk->hw);
900 if (ret) { 1055 if (ret) {
901 __clk_disable(clk->parent); 1056 clk_core_disable(clk->parent);
902 return ret; 1057 return ret;
903 } 1058 }
904 } 1059 }
@@ -908,6 +1063,14 @@ static int __clk_enable(struct clk *clk)
908 return 0; 1063 return 0;
909} 1064}
910 1065
1066static int __clk_enable(struct clk *clk)
1067{
1068 if (!clk)
1069 return 0;
1070
1071 return clk_core_enable(clk->core);
1072}
1073
911/** 1074/**
912 * clk_enable - ungate a clock 1075 * clk_enable - ungate a clock
913 * @clk: the clk being ungated 1076 * @clk: the clk being ungated
@@ -934,17 +1097,13 @@ int clk_enable(struct clk *clk)
934} 1097}
935EXPORT_SYMBOL_GPL(clk_enable); 1098EXPORT_SYMBOL_GPL(clk_enable);
936 1099
937/** 1100static unsigned long clk_core_round_rate_nolock(struct clk_core *clk,
938 * __clk_round_rate - round the given rate for a clk 1101 unsigned long rate,
939 * @clk: round the rate of this clock 1102 unsigned long min_rate,
940 * @rate: the rate which is to be rounded 1103 unsigned long max_rate)
941 *
942 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
943 */
944unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
945{ 1104{
946 unsigned long parent_rate = 0; 1105 unsigned long parent_rate = 0;
947 struct clk *parent; 1106 struct clk_core *parent;
948 struct clk_hw *parent_hw; 1107 struct clk_hw *parent_hw;
949 1108
950 if (!clk) 1109 if (!clk)
@@ -956,15 +1115,59 @@ unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
956 1115
957 if (clk->ops->determine_rate) { 1116 if (clk->ops->determine_rate) {
958 parent_hw = parent ? parent->hw : NULL; 1117 parent_hw = parent ? parent->hw : NULL;
959 return clk->ops->determine_rate(clk->hw, rate, &parent_rate, 1118 return clk->ops->determine_rate(clk->hw, rate,
960 &parent_hw); 1119 min_rate, max_rate,
1120 &parent_rate, &parent_hw);
961 } else if (clk->ops->round_rate) 1121 } else if (clk->ops->round_rate)
962 return clk->ops->round_rate(clk->hw, rate, &parent_rate); 1122 return clk->ops->round_rate(clk->hw, rate, &parent_rate);
963 else if (clk->flags & CLK_SET_RATE_PARENT) 1123 else if (clk->flags & CLK_SET_RATE_PARENT)
964 return __clk_round_rate(clk->parent, rate); 1124 return clk_core_round_rate_nolock(clk->parent, rate, min_rate,
1125 max_rate);
965 else 1126 else
966 return clk->rate; 1127 return clk->rate;
967} 1128}
1129
1130/**
1131 * __clk_determine_rate - get the closest rate actually supported by a clock
1132 * @hw: determine the rate of this clock
1133 * @rate: target rate
1134 * @min_rate: returned rate must be greater than this rate
1135 * @max_rate: returned rate must be less than this rate
1136 *
1137 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate and
1138 * .determine_rate.
1139 */
1140unsigned long __clk_determine_rate(struct clk_hw *hw,
1141 unsigned long rate,
1142 unsigned long min_rate,
1143 unsigned long max_rate)
1144{
1145 if (!hw)
1146 return 0;
1147
1148 return clk_core_round_rate_nolock(hw->core, rate, min_rate, max_rate);
1149}
1150EXPORT_SYMBOL_GPL(__clk_determine_rate);
1151
1152/**
1153 * __clk_round_rate - round the given rate for a clk
1154 * @clk: round the rate of this clock
1155 * @rate: the rate which is to be rounded
1156 *
1157 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
1158 */
1159unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
1160{
1161 unsigned long min_rate;
1162 unsigned long max_rate;
1163
1164 if (!clk)
1165 return 0;
1166
1167 clk_core_get_boundaries(clk->core, &min_rate, &max_rate);
1168
1169 return clk_core_round_rate_nolock(clk->core, rate, min_rate, max_rate);
1170}
968EXPORT_SYMBOL_GPL(__clk_round_rate); 1171EXPORT_SYMBOL_GPL(__clk_round_rate);
969 1172
970/** 1173/**
@@ -980,6 +1183,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
980{ 1183{
981 unsigned long ret; 1184 unsigned long ret;
982 1185
1186 if (!clk)
1187 return 0;
1188
983 clk_prepare_lock(); 1189 clk_prepare_lock();
984 ret = __clk_round_rate(clk, rate); 1190 ret = __clk_round_rate(clk, rate);
985 clk_prepare_unlock(); 1191 clk_prepare_unlock();
@@ -1002,22 +1208,21 @@ EXPORT_SYMBOL_GPL(clk_round_rate);
1002 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if 1208 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1003 * a driver returns that. 1209 * a driver returns that.
1004 */ 1210 */
1005static int __clk_notify(struct clk *clk, unsigned long msg, 1211static int __clk_notify(struct clk_core *clk, unsigned long msg,
1006 unsigned long old_rate, unsigned long new_rate) 1212 unsigned long old_rate, unsigned long new_rate)
1007{ 1213{
1008 struct clk_notifier *cn; 1214 struct clk_notifier *cn;
1009 struct clk_notifier_data cnd; 1215 struct clk_notifier_data cnd;
1010 int ret = NOTIFY_DONE; 1216 int ret = NOTIFY_DONE;
1011 1217
1012 cnd.clk = clk;
1013 cnd.old_rate = old_rate; 1218 cnd.old_rate = old_rate;
1014 cnd.new_rate = new_rate; 1219 cnd.new_rate = new_rate;
1015 1220
1016 list_for_each_entry(cn, &clk_notifier_list, node) { 1221 list_for_each_entry(cn, &clk_notifier_list, node) {
1017 if (cn->clk == clk) { 1222 if (cn->clk->core == clk) {
1223 cnd.clk = cn->clk;
1018 ret = srcu_notifier_call_chain(&cn->notifier_head, msg, 1224 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1019 &cnd); 1225 &cnd);
1020 break;
1021 } 1226 }
1022 } 1227 }
1023 1228
@@ -1035,10 +1240,10 @@ static int __clk_notify(struct clk *clk, unsigned long msg,
1035 * 1240 *
1036 * Caller must hold prepare_lock. 1241 * Caller must hold prepare_lock.
1037 */ 1242 */
1038static void __clk_recalc_accuracies(struct clk *clk) 1243static void __clk_recalc_accuracies(struct clk_core *clk)
1039{ 1244{
1040 unsigned long parent_accuracy = 0; 1245 unsigned long parent_accuracy = 0;
1041 struct clk *child; 1246 struct clk_core *child;
1042 1247
1043 if (clk->parent) 1248 if (clk->parent)
1044 parent_accuracy = clk->parent->accuracy; 1249 parent_accuracy = clk->parent->accuracy;
@@ -1053,6 +1258,20 @@ static void __clk_recalc_accuracies(struct clk *clk)
1053 __clk_recalc_accuracies(child); 1258 __clk_recalc_accuracies(child);
1054} 1259}
1055 1260
1261static long clk_core_get_accuracy(struct clk_core *clk)
1262{
1263 unsigned long accuracy;
1264
1265 clk_prepare_lock();
1266 if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE))
1267 __clk_recalc_accuracies(clk);
1268
1269 accuracy = __clk_get_accuracy(clk);
1270 clk_prepare_unlock();
1271
1272 return accuracy;
1273}
1274
1056/** 1275/**
1057 * clk_get_accuracy - return the accuracy of clk 1276 * clk_get_accuracy - return the accuracy of clk
1058 * @clk: the clk whose accuracy is being returned 1277 * @clk: the clk whose accuracy is being returned
@@ -1064,20 +1283,15 @@ static void __clk_recalc_accuracies(struct clk *clk)
1064 */ 1283 */
1065long clk_get_accuracy(struct clk *clk) 1284long clk_get_accuracy(struct clk *clk)
1066{ 1285{
1067 unsigned long accuracy; 1286 if (!clk)
1068 1287 return 0;
1069 clk_prepare_lock();
1070 if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE))
1071 __clk_recalc_accuracies(clk);
1072
1073 accuracy = __clk_get_accuracy(clk);
1074 clk_prepare_unlock();
1075 1288
1076 return accuracy; 1289 return clk_core_get_accuracy(clk->core);
1077} 1290}
1078EXPORT_SYMBOL_GPL(clk_get_accuracy); 1291EXPORT_SYMBOL_GPL(clk_get_accuracy);
1079 1292
1080static unsigned long clk_recalc(struct clk *clk, unsigned long parent_rate) 1293static unsigned long clk_recalc(struct clk_core *clk,
1294 unsigned long parent_rate)
1081{ 1295{
1082 if (clk->ops->recalc_rate) 1296 if (clk->ops->recalc_rate)
1083 return clk->ops->recalc_rate(clk->hw, parent_rate); 1297 return clk->ops->recalc_rate(clk->hw, parent_rate);
@@ -1098,11 +1312,11 @@ static unsigned long clk_recalc(struct clk *clk, unsigned long parent_rate)
1098 * 1312 *
1099 * Caller must hold prepare_lock. 1313 * Caller must hold prepare_lock.
1100 */ 1314 */
1101static void __clk_recalc_rates(struct clk *clk, unsigned long msg) 1315static void __clk_recalc_rates(struct clk_core *clk, unsigned long msg)
1102{ 1316{
1103 unsigned long old_rate; 1317 unsigned long old_rate;
1104 unsigned long parent_rate = 0; 1318 unsigned long parent_rate = 0;
1105 struct clk *child; 1319 struct clk_core *child;
1106 1320
1107 old_rate = clk->rate; 1321 old_rate = clk->rate;
1108 1322
@@ -1122,15 +1336,7 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
1122 __clk_recalc_rates(child, msg); 1336 __clk_recalc_rates(child, msg);
1123} 1337}
1124 1338
1125/** 1339static unsigned long clk_core_get_rate(struct clk_core *clk)
1126 * clk_get_rate - return the rate of clk
1127 * @clk: the clk whose rate is being returned
1128 *
1129 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1130 * is set, which means a recalc_rate will be issued.
1131 * If clk is NULL then returns 0.
1132 */
1133unsigned long clk_get_rate(struct clk *clk)
1134{ 1340{
1135 unsigned long rate; 1341 unsigned long rate;
1136 1342
@@ -1139,14 +1345,32 @@ unsigned long clk_get_rate(struct clk *clk)
1139 if (clk && (clk->flags & CLK_GET_RATE_NOCACHE)) 1345 if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
1140 __clk_recalc_rates(clk, 0); 1346 __clk_recalc_rates(clk, 0);
1141 1347
1142 rate = __clk_get_rate(clk); 1348 rate = clk_core_get_rate_nolock(clk);
1143 clk_prepare_unlock(); 1349 clk_prepare_unlock();
1144 1350
1145 return rate; 1351 return rate;
1146} 1352}
1353EXPORT_SYMBOL_GPL(clk_core_get_rate);
1354
1355/**
1356 * clk_get_rate - return the rate of clk
1357 * @clk: the clk whose rate is being returned
1358 *
1359 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1360 * is set, which means a recalc_rate will be issued.
1361 * If clk is NULL then returns 0.
1362 */
1363unsigned long clk_get_rate(struct clk *clk)
1364{
1365 if (!clk)
1366 return 0;
1367
1368 return clk_core_get_rate(clk->core);
1369}
1147EXPORT_SYMBOL_GPL(clk_get_rate); 1370EXPORT_SYMBOL_GPL(clk_get_rate);
1148 1371
1149static int clk_fetch_parent_index(struct clk *clk, struct clk *parent) 1372static int clk_fetch_parent_index(struct clk_core *clk,
1373 struct clk_core *parent)
1150{ 1374{
1151 int i; 1375 int i;
1152 1376
@@ -1160,7 +1384,7 @@ static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
1160 /* 1384 /*
1161 * find index of new parent clock using cached parent ptrs, 1385 * find index of new parent clock using cached parent ptrs,
1162 * or if not yet cached, use string name comparison and cache 1386 * or if not yet cached, use string name comparison and cache
1163 * them now to avoid future calls to __clk_lookup. 1387 * them now to avoid future calls to clk_core_lookup.
1164 */ 1388 */
1165 for (i = 0; i < clk->num_parents; i++) { 1389 for (i = 0; i < clk->num_parents; i++) {
1166 if (clk->parents[i] == parent) 1390 if (clk->parents[i] == parent)
@@ -1170,7 +1394,7 @@ static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
1170 continue; 1394 continue;
1171 1395
1172 if (!strcmp(clk->parent_names[i], parent->name)) { 1396 if (!strcmp(clk->parent_names[i], parent->name)) {
1173 clk->parents[i] = __clk_lookup(parent->name); 1397 clk->parents[i] = clk_core_lookup(parent->name);
1174 return i; 1398 return i;
1175 } 1399 }
1176 } 1400 }
@@ -1178,7 +1402,7 @@ static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
1178 return -EINVAL; 1402 return -EINVAL;
1179} 1403}
1180 1404
1181static void clk_reparent(struct clk *clk, struct clk *new_parent) 1405static void clk_reparent(struct clk_core *clk, struct clk_core *new_parent)
1182{ 1406{
1183 hlist_del(&clk->child_node); 1407 hlist_del(&clk->child_node);
1184 1408
@@ -1195,10 +1419,11 @@ static void clk_reparent(struct clk *clk, struct clk *new_parent)
1195 clk->parent = new_parent; 1419 clk->parent = new_parent;
1196} 1420}
1197 1421
1198static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent) 1422static struct clk_core *__clk_set_parent_before(struct clk_core *clk,
1423 struct clk_core *parent)
1199{ 1424{
1200 unsigned long flags; 1425 unsigned long flags;
1201 struct clk *old_parent = clk->parent; 1426 struct clk_core *old_parent = clk->parent;
1202 1427
1203 /* 1428 /*
1204 * Migrate prepare state between parents and prevent race with 1429 * Migrate prepare state between parents and prevent race with
@@ -1218,9 +1443,9 @@ static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
1218 * See also: Comment for clk_set_parent() below. 1443 * See also: Comment for clk_set_parent() below.
1219 */ 1444 */
1220 if (clk->prepare_count) { 1445 if (clk->prepare_count) {
1221 __clk_prepare(parent); 1446 clk_core_prepare(parent);
1222 clk_enable(parent); 1447 clk_core_enable(parent);
1223 clk_enable(clk); 1448 clk_core_enable(clk);
1224 } 1449 }
1225 1450
1226 /* update the clk tree topology */ 1451 /* update the clk tree topology */
@@ -1231,25 +1456,27 @@ static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
1231 return old_parent; 1456 return old_parent;
1232} 1457}
1233 1458
1234static void __clk_set_parent_after(struct clk *clk, struct clk *parent, 1459static void __clk_set_parent_after(struct clk_core *core,
1235 struct clk *old_parent) 1460 struct clk_core *parent,
1461 struct clk_core *old_parent)
1236{ 1462{
1237 /* 1463 /*
1238 * Finish the migration of prepare state and undo the changes done 1464 * Finish the migration of prepare state and undo the changes done
1239 * for preventing a race with clk_enable(). 1465 * for preventing a race with clk_enable().
1240 */ 1466 */
1241 if (clk->prepare_count) { 1467 if (core->prepare_count) {
1242 clk_disable(clk); 1468 clk_core_disable(core);
1243 clk_disable(old_parent); 1469 clk_core_disable(old_parent);
1244 __clk_unprepare(old_parent); 1470 clk_core_unprepare(old_parent);
1245 } 1471 }
1246} 1472}
1247 1473
1248static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index) 1474static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent,
1475 u8 p_index)
1249{ 1476{
1250 unsigned long flags; 1477 unsigned long flags;
1251 int ret = 0; 1478 int ret = 0;
1252 struct clk *old_parent; 1479 struct clk_core *old_parent;
1253 1480
1254 old_parent = __clk_set_parent_before(clk, parent); 1481 old_parent = __clk_set_parent_before(clk, parent);
1255 1482
@@ -1263,9 +1490,9 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
1263 clk_enable_unlock(flags); 1490 clk_enable_unlock(flags);
1264 1491
1265 if (clk->prepare_count) { 1492 if (clk->prepare_count) {
1266 clk_disable(clk); 1493 clk_core_disable(clk);
1267 clk_disable(parent); 1494 clk_core_disable(parent);
1268 __clk_unprepare(parent); 1495 clk_core_unprepare(parent);
1269 } 1496 }
1270 return ret; 1497 return ret;
1271 } 1498 }
@@ -1291,9 +1518,10 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
1291 * 1518 *
1292 * Caller must hold prepare_lock. 1519 * Caller must hold prepare_lock.
1293 */ 1520 */
1294static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate) 1521static int __clk_speculate_rates(struct clk_core *clk,
1522 unsigned long parent_rate)
1295{ 1523{
1296 struct clk *child; 1524 struct clk_core *child;
1297 unsigned long new_rate; 1525 unsigned long new_rate;
1298 int ret = NOTIFY_DONE; 1526 int ret = NOTIFY_DONE;
1299 1527
@@ -1319,10 +1547,10 @@ out:
1319 return ret; 1547 return ret;
1320} 1548}
1321 1549
1322static void clk_calc_subtree(struct clk *clk, unsigned long new_rate, 1550static void clk_calc_subtree(struct clk_core *clk, unsigned long new_rate,
1323 struct clk *new_parent, u8 p_index) 1551 struct clk_core *new_parent, u8 p_index)
1324{ 1552{
1325 struct clk *child; 1553 struct clk_core *child;
1326 1554
1327 clk->new_rate = new_rate; 1555 clk->new_rate = new_rate;
1328 clk->new_parent = new_parent; 1556 clk->new_parent = new_parent;
@@ -1342,13 +1570,16 @@ static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
1342 * calculate the new rates returning the topmost clock that has to be 1570 * calculate the new rates returning the topmost clock that has to be
1343 * changed. 1571 * changed.
1344 */ 1572 */
1345static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate) 1573static struct clk_core *clk_calc_new_rates(struct clk_core *clk,
1574 unsigned long rate)
1346{ 1575{
1347 struct clk *top = clk; 1576 struct clk_core *top = clk;
1348 struct clk *old_parent, *parent; 1577 struct clk_core *old_parent, *parent;
1349 struct clk_hw *parent_hw; 1578 struct clk_hw *parent_hw;
1350 unsigned long best_parent_rate = 0; 1579 unsigned long best_parent_rate = 0;
1351 unsigned long new_rate; 1580 unsigned long new_rate;
1581 unsigned long min_rate;
1582 unsigned long max_rate;
1352 int p_index = 0; 1583 int p_index = 0;
1353 1584
1354 /* sanity */ 1585 /* sanity */
@@ -1360,16 +1591,22 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
1360 if (parent) 1591 if (parent)
1361 best_parent_rate = parent->rate; 1592 best_parent_rate = parent->rate;
1362 1593
1594 clk_core_get_boundaries(clk, &min_rate, &max_rate);
1595
1363 /* find the closest rate and parent clk/rate */ 1596 /* find the closest rate and parent clk/rate */
1364 if (clk->ops->determine_rate) { 1597 if (clk->ops->determine_rate) {
1365 parent_hw = parent ? parent->hw : NULL; 1598 parent_hw = parent ? parent->hw : NULL;
1366 new_rate = clk->ops->determine_rate(clk->hw, rate, 1599 new_rate = clk->ops->determine_rate(clk->hw, rate,
1600 min_rate,
1601 max_rate,
1367 &best_parent_rate, 1602 &best_parent_rate,
1368 &parent_hw); 1603 &parent_hw);
1369 parent = parent_hw ? parent_hw->clk : NULL; 1604 parent = parent_hw ? parent_hw->core : NULL;
1370 } else if (clk->ops->round_rate) { 1605 } else if (clk->ops->round_rate) {
1371 new_rate = clk->ops->round_rate(clk->hw, rate, 1606 new_rate = clk->ops->round_rate(clk->hw, rate,
1372 &best_parent_rate); 1607 &best_parent_rate);
1608 if (new_rate < min_rate || new_rate > max_rate)
1609 return NULL;
1373 } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) { 1610 } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
1374 /* pass-through clock without adjustable parent */ 1611 /* pass-through clock without adjustable parent */
1375 clk->new_rate = clk->rate; 1612 clk->new_rate = clk->rate;
@@ -1390,7 +1627,7 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
1390 } 1627 }
1391 1628
1392 /* try finding the new parent index */ 1629 /* try finding the new parent index */
1393 if (parent) { 1630 if (parent && clk->num_parents > 1) {
1394 p_index = clk_fetch_parent_index(clk, parent); 1631 p_index = clk_fetch_parent_index(clk, parent);
1395 if (p_index < 0) { 1632 if (p_index < 0) {
1396 pr_debug("%s: clk %s can not be parent of clk %s\n", 1633 pr_debug("%s: clk %s can not be parent of clk %s\n",
@@ -1414,9 +1651,10 @@ out:
1414 * so that in case of an error we can walk down the whole tree again and 1651 * so that in case of an error we can walk down the whole tree again and
1415 * abort the change. 1652 * abort the change.
1416 */ 1653 */
1417static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event) 1654static struct clk_core *clk_propagate_rate_change(struct clk_core *clk,
1655 unsigned long event)
1418{ 1656{
1419 struct clk *child, *tmp_clk, *fail_clk = NULL; 1657 struct clk_core *child, *tmp_clk, *fail_clk = NULL;
1420 int ret = NOTIFY_DONE; 1658 int ret = NOTIFY_DONE;
1421 1659
1422 if (clk->rate == clk->new_rate) 1660 if (clk->rate == clk->new_rate)
@@ -1451,14 +1689,14 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
1451 * walk down a subtree and set the new rates notifying the rate 1689 * walk down a subtree and set the new rates notifying the rate
1452 * change on the way 1690 * change on the way
1453 */ 1691 */
1454static void clk_change_rate(struct clk *clk) 1692static void clk_change_rate(struct clk_core *clk)
1455{ 1693{
1456 struct clk *child; 1694 struct clk_core *child;
1457 struct hlist_node *tmp; 1695 struct hlist_node *tmp;
1458 unsigned long old_rate; 1696 unsigned long old_rate;
1459 unsigned long best_parent_rate = 0; 1697 unsigned long best_parent_rate = 0;
1460 bool skip_set_rate = false; 1698 bool skip_set_rate = false;
1461 struct clk *old_parent; 1699 struct clk_core *old_parent;
1462 1700
1463 old_rate = clk->rate; 1701 old_rate = clk->rate;
1464 1702
@@ -1506,6 +1744,45 @@ static void clk_change_rate(struct clk *clk)
1506 clk_change_rate(clk->new_child); 1744 clk_change_rate(clk->new_child);
1507} 1745}
1508 1746
1747static int clk_core_set_rate_nolock(struct clk_core *clk,
1748 unsigned long req_rate)
1749{
1750 struct clk_core *top, *fail_clk;
1751 unsigned long rate = req_rate;
1752 int ret = 0;
1753
1754 if (!clk)
1755 return 0;
1756
1757 /* bail early if nothing to do */
1758 if (rate == clk_core_get_rate_nolock(clk))
1759 return 0;
1760
1761 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count)
1762 return -EBUSY;
1763
1764 /* calculate new rates and get the topmost changed clock */
1765 top = clk_calc_new_rates(clk, rate);
1766 if (!top)
1767 return -EINVAL;
1768
1769 /* notify that we are about to change rates */
1770 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1771 if (fail_clk) {
1772 pr_debug("%s: failed to set %s rate\n", __func__,
1773 fail_clk->name);
1774 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1775 return -EBUSY;
1776 }
1777
1778 /* change the rates */
1779 clk_change_rate(top);
1780
1781 clk->req_rate = req_rate;
1782
1783 return ret;
1784}
1785
1509/** 1786/**
1510 * clk_set_rate - specify a new rate for clk 1787 * clk_set_rate - specify a new rate for clk
1511 * @clk: the clk whose rate is being changed 1788 * @clk: the clk whose rate is being changed
@@ -1529,8 +1806,7 @@ static void clk_change_rate(struct clk *clk)
1529 */ 1806 */
1530int clk_set_rate(struct clk *clk, unsigned long rate) 1807int clk_set_rate(struct clk *clk, unsigned long rate)
1531{ 1808{
1532 struct clk *top, *fail_clk; 1809 int ret;
1533 int ret = 0;
1534 1810
1535 if (!clk) 1811 if (!clk)
1536 return 0; 1812 return 0;
@@ -1538,41 +1814,81 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
1538 /* prevent racing with updates to the clock topology */ 1814 /* prevent racing with updates to the clock topology */
1539 clk_prepare_lock(); 1815 clk_prepare_lock();
1540 1816
1541 /* bail early if nothing to do */ 1817 ret = clk_core_set_rate_nolock(clk->core, rate);
1542 if (rate == clk_get_rate(clk))
1543 goto out;
1544 1818
1545 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) { 1819 clk_prepare_unlock();
1546 ret = -EBUSY;
1547 goto out;
1548 }
1549 1820
1550 /* calculate new rates and get the topmost changed clock */ 1821 return ret;
1551 top = clk_calc_new_rates(clk, rate); 1822}
1552 if (!top) { 1823EXPORT_SYMBOL_GPL(clk_set_rate);
1553 ret = -EINVAL;
1554 goto out;
1555 }
1556 1824
1557 /* notify that we are about to change rates */ 1825/**
1558 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); 1826 * clk_set_rate_range - set a rate range for a clock source
1559 if (fail_clk) { 1827 * @clk: clock source
1560 pr_debug("%s: failed to set %s rate\n", __func__, 1828 * @min: desired minimum clock rate in Hz, inclusive
1561 fail_clk->name); 1829 * @max: desired maximum clock rate in Hz, inclusive
1562 clk_propagate_rate_change(top, ABORT_RATE_CHANGE); 1830 *
1563 ret = -EBUSY; 1831 * Returns success (0) or negative errno.
1564 goto out; 1832 */
1833int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
1834{
1835 int ret = 0;
1836
1837 if (!clk)
1838 return 0;
1839
1840 if (min > max) {
1841 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
1842 __func__, clk->core->name, clk->dev_id, clk->con_id,
1843 min, max);
1844 return -EINVAL;
1565 } 1845 }
1566 1846
1567 /* change the rates */ 1847 clk_prepare_lock();
1568 clk_change_rate(top); 1848
1849 if (min != clk->min_rate || max != clk->max_rate) {
1850 clk->min_rate = min;
1851 clk->max_rate = max;
1852 ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
1853 }
1569 1854
1570out:
1571 clk_prepare_unlock(); 1855 clk_prepare_unlock();
1572 1856
1573 return ret; 1857 return ret;
1574} 1858}
1575EXPORT_SYMBOL_GPL(clk_set_rate); 1859EXPORT_SYMBOL_GPL(clk_set_rate_range);
1860
1861/**
1862 * clk_set_min_rate - set a minimum clock rate for a clock source
1863 * @clk: clock source
1864 * @rate: desired minimum clock rate in Hz, inclusive
1865 *
1866 * Returns success (0) or negative errno.
1867 */
1868int clk_set_min_rate(struct clk *clk, unsigned long rate)
1869{
1870 if (!clk)
1871 return 0;
1872
1873 return clk_set_rate_range(clk, rate, clk->max_rate);
1874}
1875EXPORT_SYMBOL_GPL(clk_set_min_rate);
1876
1877/**
1878 * clk_set_max_rate - set a maximum clock rate for a clock source
1879 * @clk: clock source
1880 * @rate: desired maximum clock rate in Hz, inclusive
1881 *
1882 * Returns success (0) or negative errno.
1883 */
1884int clk_set_max_rate(struct clk *clk, unsigned long rate)
1885{
1886 if (!clk)
1887 return 0;
1888
1889 return clk_set_rate_range(clk, clk->min_rate, rate);
1890}
1891EXPORT_SYMBOL_GPL(clk_set_max_rate);
1576 1892
1577/** 1893/**
1578 * clk_get_parent - return the parent of a clk 1894 * clk_get_parent - return the parent of a clk
@@ -1599,11 +1915,11 @@ EXPORT_SYMBOL_GPL(clk_get_parent);
1599 * 1915 *
1600 * For single-parent clocks without .get_parent, first check to see if the 1916 * For single-parent clocks without .get_parent, first check to see if the
1601 * .parents array exists, and if so use it to avoid an expensive tree 1917 * .parents array exists, and if so use it to avoid an expensive tree
1602 * traversal. If .parents does not exist then walk the tree with __clk_lookup. 1918 * traversal. If .parents does not exist then walk the tree.
1603 */ 1919 */
1604static struct clk *__clk_init_parent(struct clk *clk) 1920static struct clk_core *__clk_init_parent(struct clk_core *clk)
1605{ 1921{
1606 struct clk *ret = NULL; 1922 struct clk_core *ret = NULL;
1607 u8 index; 1923 u8 index;
1608 1924
1609 /* handle the trivial cases */ 1925 /* handle the trivial cases */
@@ -1613,7 +1929,7 @@ static struct clk *__clk_init_parent(struct clk *clk)
1613 1929
1614 if (clk->num_parents == 1) { 1930 if (clk->num_parents == 1) {
1615 if (IS_ERR_OR_NULL(clk->parent)) 1931 if (IS_ERR_OR_NULL(clk->parent))
1616 clk->parent = __clk_lookup(clk->parent_names[0]); 1932 clk->parent = clk_core_lookup(clk->parent_names[0]);
1617 ret = clk->parent; 1933 ret = clk->parent;
1618 goto out; 1934 goto out;
1619 } 1935 }
@@ -1627,8 +1943,8 @@ static struct clk *__clk_init_parent(struct clk *clk)
1627 1943
1628 /* 1944 /*
1629 * Do our best to cache parent clocks in clk->parents. This prevents 1945 * Do our best to cache parent clocks in clk->parents. This prevents
1630 * unnecessary and expensive calls to __clk_lookup. We don't set 1946 * unnecessary and expensive lookups. We don't set clk->parent here;
1631 * clk->parent here; that is done by the calling function 1947 * that is done by the calling function.
1632 */ 1948 */
1633 1949
1634 index = clk->ops->get_parent(clk->hw); 1950 index = clk->ops->get_parent(clk->hw);
@@ -1638,13 +1954,14 @@ static struct clk *__clk_init_parent(struct clk *clk)
1638 kcalloc(clk->num_parents, sizeof(struct clk *), 1954 kcalloc(clk->num_parents, sizeof(struct clk *),
1639 GFP_KERNEL); 1955 GFP_KERNEL);
1640 1956
1641 ret = clk_get_parent_by_index(clk, index); 1957 ret = clk_core_get_parent_by_index(clk, index);
1642 1958
1643out: 1959out:
1644 return ret; 1960 return ret;
1645} 1961}
1646 1962
1647void __clk_reparent(struct clk *clk, struct clk *new_parent) 1963static void clk_core_reparent(struct clk_core *clk,
1964 struct clk_core *new_parent)
1648{ 1965{
1649 clk_reparent(clk, new_parent); 1966 clk_reparent(clk, new_parent);
1650 __clk_recalc_accuracies(clk); 1967 __clk_recalc_accuracies(clk);
@@ -1652,23 +1969,40 @@ void __clk_reparent(struct clk *clk, struct clk *new_parent)
1652} 1969}
1653 1970
1654/** 1971/**
1655 * clk_set_parent - switch the parent of a mux clk 1972 * clk_has_parent - check if a clock is a possible parent for another
1656 * @clk: the mux clk whose input we are switching 1973 * @clk: clock source
1657 * @parent: the new input to clk 1974 * @parent: parent clock source
1658 * 1975 *
1659 * Re-parent clk to use parent as its new input source. If clk is in 1976 * This function can be used in drivers that need to check that a clock can be
1660 * prepared state, the clk will get enabled for the duration of this call. If 1977 * the parent of another without actually changing the parent.
1661 * that's not acceptable for a specific clk (Eg: the consumer can't handle
1662 * that, the reparenting is glitchy in hardware, etc), use the
1663 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
1664 *
1665 * After successfully changing clk's parent clk_set_parent will update the
1666 * clk topology, sysfs topology and propagate rate recalculation via
1667 * __clk_recalc_rates.
1668 * 1978 *
1669 * Returns 0 on success, -EERROR otherwise. 1979 * Returns true if @parent is a possible parent for @clk, false otherwise.
1670 */ 1980 */
1671int clk_set_parent(struct clk *clk, struct clk *parent) 1981bool clk_has_parent(struct clk *clk, struct clk *parent)
1982{
1983 struct clk_core *core, *parent_core;
1984 unsigned int i;
1985
1986 /* NULL clocks should be nops, so return success if either is NULL. */
1987 if (!clk || !parent)
1988 return true;
1989
1990 core = clk->core;
1991 parent_core = parent->core;
1992
1993 /* Optimize for the case where the parent is already the parent. */
1994 if (core->parent == parent_core)
1995 return true;
1996
1997 for (i = 0; i < core->num_parents; i++)
1998 if (strcmp(core->parent_names[i], parent_core->name) == 0)
1999 return true;
2000
2001 return false;
2002}
2003EXPORT_SYMBOL_GPL(clk_has_parent);
2004
2005static int clk_core_set_parent(struct clk_core *clk, struct clk_core *parent)
1672{ 2006{
1673 int ret = 0; 2007 int ret = 0;
1674 int p_index = 0; 2008 int p_index = 0;
@@ -1728,6 +2062,31 @@ out:
1728 2062
1729 return ret; 2063 return ret;
1730} 2064}
2065
2066/**
2067 * clk_set_parent - switch the parent of a mux clk
2068 * @clk: the mux clk whose input we are switching
2069 * @parent: the new input to clk
2070 *
2071 * Re-parent clk to use parent as its new input source. If clk is in
2072 * prepared state, the clk will get enabled for the duration of this call. If
2073 * that's not acceptable for a specific clk (Eg: the consumer can't handle
2074 * that, the reparenting is glitchy in hardware, etc), use the
2075 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
2076 *
2077 * After successfully changing clk's parent clk_set_parent will update the
2078 * clk topology, sysfs topology and propagate rate recalculation via
2079 * __clk_recalc_rates.
2080 *
2081 * Returns 0 on success, -EERROR otherwise.
2082 */
2083int clk_set_parent(struct clk *clk, struct clk *parent)
2084{
2085 if (!clk)
2086 return 0;
2087
2088 return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
2089}
1731EXPORT_SYMBOL_GPL(clk_set_parent); 2090EXPORT_SYMBOL_GPL(clk_set_parent);
1732 2091
1733/** 2092/**
@@ -1764,13 +2123,13 @@ int clk_set_phase(struct clk *clk, int degrees)
1764 2123
1765 clk_prepare_lock(); 2124 clk_prepare_lock();
1766 2125
1767 if (!clk->ops->set_phase) 2126 if (!clk->core->ops->set_phase)
1768 goto out_unlock; 2127 goto out_unlock;
1769 2128
1770 ret = clk->ops->set_phase(clk->hw, degrees); 2129 ret = clk->core->ops->set_phase(clk->core->hw, degrees);
1771 2130
1772 if (!ret) 2131 if (!ret)
1773 clk->phase = degrees; 2132 clk->core->phase = degrees;
1774 2133
1775out_unlock: 2134out_unlock:
1776 clk_prepare_unlock(); 2135 clk_prepare_unlock();
@@ -1778,15 +2137,9 @@ out_unlock:
1778out: 2137out:
1779 return ret; 2138 return ret;
1780} 2139}
2140EXPORT_SYMBOL_GPL(clk_set_phase);
1781 2141
1782/** 2142static int clk_core_get_phase(struct clk_core *clk)
1783 * clk_get_phase - return the phase shift of a clock signal
1784 * @clk: clock signal source
1785 *
1786 * Returns the phase shift of a clock node in degrees, otherwise returns
1787 * -EERROR.
1788 */
1789int clk_get_phase(struct clk *clk)
1790{ 2143{
1791 int ret = 0; 2144 int ret = 0;
1792 2145
@@ -1800,28 +2153,48 @@ int clk_get_phase(struct clk *clk)
1800out: 2153out:
1801 return ret; 2154 return ret;
1802} 2155}
2156EXPORT_SYMBOL_GPL(clk_get_phase);
2157
2158/**
2159 * clk_get_phase - return the phase shift of a clock signal
2160 * @clk: clock signal source
2161 *
2162 * Returns the phase shift of a clock node in degrees, otherwise returns
2163 * -EERROR.
2164 */
2165int clk_get_phase(struct clk *clk)
2166{
2167 if (!clk)
2168 return 0;
2169
2170 return clk_core_get_phase(clk->core);
2171}
1803 2172
1804/** 2173/**
1805 * __clk_init - initialize the data structures in a struct clk 2174 * __clk_init - initialize the data structures in a struct clk
1806 * @dev: device initializing this clk, placeholder for now 2175 * @dev: device initializing this clk, placeholder for now
1807 * @clk: clk being initialized 2176 * @clk: clk being initialized
1808 * 2177 *
1809 * Initializes the lists in struct clk, queries the hardware for the 2178 * Initializes the lists in struct clk_core, queries the hardware for the
1810 * parent and rate and sets them both. 2179 * parent and rate and sets them both.
1811 */ 2180 */
1812int __clk_init(struct device *dev, struct clk *clk) 2181static int __clk_init(struct device *dev, struct clk *clk_user)
1813{ 2182{
1814 int i, ret = 0; 2183 int i, ret = 0;
1815 struct clk *orphan; 2184 struct clk_core *orphan;
1816 struct hlist_node *tmp2; 2185 struct hlist_node *tmp2;
2186 struct clk_core *clk;
2187 unsigned long rate;
1817 2188
1818 if (!clk) 2189 if (!clk_user)
1819 return -EINVAL; 2190 return -EINVAL;
1820 2191
2192 clk = clk_user->core;
2193
1821 clk_prepare_lock(); 2194 clk_prepare_lock();
1822 2195
1823 /* check to see if a clock with this name is already registered */ 2196 /* check to see if a clock with this name is already registered */
1824 if (__clk_lookup(clk->name)) { 2197 if (clk_core_lookup(clk->name)) {
1825 pr_debug("%s: clk %s already initialized\n", 2198 pr_debug("%s: clk %s already initialized\n",
1826 __func__, clk->name); 2199 __func__, clk->name);
1827 ret = -EEXIST; 2200 ret = -EEXIST;
@@ -1873,7 +2246,7 @@ int __clk_init(struct device *dev, struct clk *clk)
1873 clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *), 2246 clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *),
1874 GFP_KERNEL); 2247 GFP_KERNEL);
1875 /* 2248 /*
1876 * __clk_lookup returns NULL for parents that have not been 2249 * clk_core_lookup returns NULL for parents that have not been
1877 * clk_init'd; thus any access to clk->parents[] must check 2250 * clk_init'd; thus any access to clk->parents[] must check
1878 * for a NULL pointer. We can always perform lazy lookups for 2251 * for a NULL pointer. We can always perform lazy lookups for
1879 * missing parents later on. 2252 * missing parents later on.
@@ -1881,7 +2254,7 @@ int __clk_init(struct device *dev, struct clk *clk)
1881 if (clk->parents) 2254 if (clk->parents)
1882 for (i = 0; i < clk->num_parents; i++) 2255 for (i = 0; i < clk->num_parents; i++)
1883 clk->parents[i] = 2256 clk->parents[i] =
1884 __clk_lookup(clk->parent_names[i]); 2257 clk_core_lookup(clk->parent_names[i]);
1885 } 2258 }
1886 2259
1887 clk->parent = __clk_init_parent(clk); 2260 clk->parent = __clk_init_parent(clk);
@@ -1936,12 +2309,13 @@ int __clk_init(struct device *dev, struct clk *clk)
1936 * then rate is set to zero. 2309 * then rate is set to zero.
1937 */ 2310 */
1938 if (clk->ops->recalc_rate) 2311 if (clk->ops->recalc_rate)
1939 clk->rate = clk->ops->recalc_rate(clk->hw, 2312 rate = clk->ops->recalc_rate(clk->hw,
1940 __clk_get_rate(clk->parent)); 2313 clk_core_get_rate_nolock(clk->parent));
1941 else if (clk->parent) 2314 else if (clk->parent)
1942 clk->rate = clk->parent->rate; 2315 rate = clk->parent->rate;
1943 else 2316 else
1944 clk->rate = 0; 2317 rate = 0;
2318 clk->rate = clk->req_rate = rate;
1945 2319
1946 /* 2320 /*
1947 * walk the list of orphan clocks and reparent any that are children of 2321 * walk the list of orphan clocks and reparent any that are children of
@@ -1951,13 +2325,13 @@ int __clk_init(struct device *dev, struct clk *clk)
1951 if (orphan->num_parents && orphan->ops->get_parent) { 2325 if (orphan->num_parents && orphan->ops->get_parent) {
1952 i = orphan->ops->get_parent(orphan->hw); 2326 i = orphan->ops->get_parent(orphan->hw);
1953 if (!strcmp(clk->name, orphan->parent_names[i])) 2327 if (!strcmp(clk->name, orphan->parent_names[i]))
1954 __clk_reparent(orphan, clk); 2328 clk_core_reparent(orphan, clk);
1955 continue; 2329 continue;
1956 } 2330 }
1957 2331
1958 for (i = 0; i < orphan->num_parents; i++) 2332 for (i = 0; i < orphan->num_parents; i++)
1959 if (!strcmp(clk->name, orphan->parent_names[i])) { 2333 if (!strcmp(clk->name, orphan->parent_names[i])) {
1960 __clk_reparent(orphan, clk); 2334 clk_core_reparent(orphan, clk);
1961 break; 2335 break;
1962 } 2336 }
1963 } 2337 }
@@ -1983,47 +2357,39 @@ out:
1983 return ret; 2357 return ret;
1984} 2358}
1985 2359
1986/** 2360struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
1987 * __clk_register - register a clock and return a cookie. 2361 const char *con_id)
1988 *
1989 * Same as clk_register, except that the .clk field inside hw shall point to a
1990 * preallocated (generally statically allocated) struct clk. None of the fields
1991 * of the struct clk need to be initialized.
1992 *
1993 * The data pointed to by .init and .clk field shall NOT be marked as init
1994 * data.
1995 *
1996 * __clk_register is only exposed via clk-private.h and is intended for use with
1997 * very large numbers of clocks that need to be statically initialized. It is
1998 * a layering violation to include clk-private.h from any code which implements
1999 * a clock's .ops; as such any statically initialized clock data MUST be in a
2000 * separate C file from the logic that implements its operations. Returns 0
2001 * on success, otherwise an error code.
2002 */
2003struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
2004{ 2362{
2005 int ret;
2006 struct clk *clk; 2363 struct clk *clk;
2007 2364
2008 clk = hw->clk; 2365 /* This is to allow this function to be chained to others */
2009 clk->name = hw->init->name; 2366 if (!hw || IS_ERR(hw))
2010 clk->ops = hw->init->ops; 2367 return (struct clk *) hw;
2011 clk->hw = hw;
2012 clk->flags = hw->init->flags;
2013 clk->parent_names = hw->init->parent_names;
2014 clk->num_parents = hw->init->num_parents;
2015 if (dev && dev->driver)
2016 clk->owner = dev->driver->owner;
2017 else
2018 clk->owner = NULL;
2019 2368
2020 ret = __clk_init(dev, clk); 2369 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2021 if (ret) 2370 if (!clk)
2022 return ERR_PTR(ret); 2371 return ERR_PTR(-ENOMEM);
2372
2373 clk->core = hw->core;
2374 clk->dev_id = dev_id;
2375 clk->con_id = con_id;
2376 clk->max_rate = ULONG_MAX;
2377
2378 clk_prepare_lock();
2379 hlist_add_head(&clk->child_node, &hw->core->clks);
2380 clk_prepare_unlock();
2023 2381
2024 return clk; 2382 return clk;
2025} 2383}
2026EXPORT_SYMBOL_GPL(__clk_register); 2384
2385void __clk_free_clk(struct clk *clk)
2386{
2387 clk_prepare_lock();
2388 hlist_del(&clk->child_node);
2389 clk_prepare_unlock();
2390
2391 kfree(clk);
2392}
2027 2393
2028/** 2394/**
2029 * clk_register - allocate a new clock, register it and return an opaque cookie 2395 * clk_register - allocate a new clock, register it and return an opaque cookie
@@ -2039,7 +2405,7 @@ EXPORT_SYMBOL_GPL(__clk_register);
2039struct clk *clk_register(struct device *dev, struct clk_hw *hw) 2405struct clk *clk_register(struct device *dev, struct clk_hw *hw)
2040{ 2406{
2041 int i, ret; 2407 int i, ret;
2042 struct clk *clk; 2408 struct clk_core *clk;
2043 2409
2044 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 2410 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2045 if (!clk) { 2411 if (!clk) {
@@ -2060,7 +2426,7 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
2060 clk->hw = hw; 2426 clk->hw = hw;
2061 clk->flags = hw->init->flags; 2427 clk->flags = hw->init->flags;
2062 clk->num_parents = hw->init->num_parents; 2428 clk->num_parents = hw->init->num_parents;
2063 hw->clk = clk; 2429 hw->core = clk;
2064 2430
2065 /* allocate local copy in case parent_names is __initdata */ 2431 /* allocate local copy in case parent_names is __initdata */
2066 clk->parent_names = kcalloc(clk->num_parents, sizeof(char *), 2432 clk->parent_names = kcalloc(clk->num_parents, sizeof(char *),
@@ -2084,9 +2450,21 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
2084 } 2450 }
2085 } 2451 }
2086 2452
2087 ret = __clk_init(dev, clk); 2453 INIT_HLIST_HEAD(&clk->clks);
2454
2455 hw->clk = __clk_create_clk(hw, NULL, NULL);
2456 if (IS_ERR(hw->clk)) {
2457 pr_err("%s: could not allocate per-user clk\n", __func__);
2458 ret = PTR_ERR(hw->clk);
2459 goto fail_parent_names_copy;
2460 }
2461
2462 ret = __clk_init(dev, hw->clk);
2088 if (!ret) 2463 if (!ret)
2089 return clk; 2464 return hw->clk;
2465
2466 __clk_free_clk(hw->clk);
2467 hw->clk = NULL;
2090 2468
2091fail_parent_names_copy: 2469fail_parent_names_copy:
2092 while (--i >= 0) 2470 while (--i >= 0)
@@ -2107,7 +2485,7 @@ EXPORT_SYMBOL_GPL(clk_register);
2107 */ 2485 */
2108static void __clk_release(struct kref *ref) 2486static void __clk_release(struct kref *ref)
2109{ 2487{
2110 struct clk *clk = container_of(ref, struct clk, ref); 2488 struct clk_core *clk = container_of(ref, struct clk_core, ref);
2111 int i = clk->num_parents; 2489 int i = clk->num_parents;
2112 2490
2113 kfree(clk->parents); 2491 kfree(clk->parents);
@@ -2165,12 +2543,13 @@ void clk_unregister(struct clk *clk)
2165 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 2543 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2166 return; 2544 return;
2167 2545
2168 clk_debug_unregister(clk); 2546 clk_debug_unregister(clk->core);
2169 2547
2170 clk_prepare_lock(); 2548 clk_prepare_lock();
2171 2549
2172 if (clk->ops == &clk_nodrv_ops) { 2550 if (clk->core->ops == &clk_nodrv_ops) {
2173 pr_err("%s: unregistered clock: %s\n", __func__, clk->name); 2551 pr_err("%s: unregistered clock: %s\n", __func__,
2552 clk->core->name);
2174 return; 2553 return;
2175 } 2554 }
2176 /* 2555 /*
@@ -2178,24 +2557,25 @@ void clk_unregister(struct clk *clk)
2178 * a reference to this clock. 2557 * a reference to this clock.
2179 */ 2558 */
2180 flags = clk_enable_lock(); 2559 flags = clk_enable_lock();
2181 clk->ops = &clk_nodrv_ops; 2560 clk->core->ops = &clk_nodrv_ops;
2182 clk_enable_unlock(flags); 2561 clk_enable_unlock(flags);
2183 2562
2184 if (!hlist_empty(&clk->children)) { 2563 if (!hlist_empty(&clk->core->children)) {
2185 struct clk *child; 2564 struct clk_core *child;
2186 struct hlist_node *t; 2565 struct hlist_node *t;
2187 2566
2188 /* Reparent all children to the orphan list. */ 2567 /* Reparent all children to the orphan list. */
2189 hlist_for_each_entry_safe(child, t, &clk->children, child_node) 2568 hlist_for_each_entry_safe(child, t, &clk->core->children,
2190 clk_set_parent(child, NULL); 2569 child_node)
2570 clk_core_set_parent(child, NULL);
2191 } 2571 }
2192 2572
2193 hlist_del_init(&clk->child_node); 2573 hlist_del_init(&clk->core->child_node);
2194 2574
2195 if (clk->prepare_count) 2575 if (clk->core->prepare_count)
2196 pr_warn("%s: unregistering prepared clock: %s\n", 2576 pr_warn("%s: unregistering prepared clock: %s\n",
2197 __func__, clk->name); 2577 __func__, clk->core->name);
2198 kref_put(&clk->ref, __clk_release); 2578 kref_put(&clk->core->ref, __clk_release);
2199 2579
2200 clk_prepare_unlock(); 2580 clk_prepare_unlock();
2201} 2581}
@@ -2263,11 +2643,13 @@ EXPORT_SYMBOL_GPL(devm_clk_unregister);
2263 */ 2643 */
2264int __clk_get(struct clk *clk) 2644int __clk_get(struct clk *clk)
2265{ 2645{
2266 if (clk) { 2646 struct clk_core *core = !clk ? NULL : clk->core;
2267 if (!try_module_get(clk->owner)) 2647
2648 if (core) {
2649 if (!try_module_get(core->owner))
2268 return 0; 2650 return 0;
2269 2651
2270 kref_get(&clk->ref); 2652 kref_get(&core->ref);
2271 } 2653 }
2272 return 1; 2654 return 1;
2273} 2655}
@@ -2280,11 +2662,20 @@ void __clk_put(struct clk *clk)
2280 return; 2662 return;
2281 2663
2282 clk_prepare_lock(); 2664 clk_prepare_lock();
2283 owner = clk->owner; 2665
2284 kref_put(&clk->ref, __clk_release); 2666 hlist_del(&clk->child_node);
2667 if (clk->min_rate > clk->core->req_rate ||
2668 clk->max_rate < clk->core->req_rate)
2669 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
2670
2671 owner = clk->core->owner;
2672 kref_put(&clk->core->ref, __clk_release);
2673
2285 clk_prepare_unlock(); 2674 clk_prepare_unlock();
2286 2675
2287 module_put(owner); 2676 module_put(owner);
2677
2678 kfree(clk);
2288} 2679}
2289 2680
2290/*** clk rate change notifiers ***/ 2681/*** clk rate change notifiers ***/
@@ -2339,7 +2730,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
2339 2730
2340 ret = srcu_notifier_chain_register(&cn->notifier_head, nb); 2731 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
2341 2732
2342 clk->notifier_count++; 2733 clk->core->notifier_count++;
2343 2734
2344out: 2735out:
2345 clk_prepare_unlock(); 2736 clk_prepare_unlock();
@@ -2376,7 +2767,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
2376 if (cn->clk == clk) { 2767 if (cn->clk == clk) {
2377 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); 2768 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
2378 2769
2379 clk->notifier_count--; 2770 clk->core->notifier_count--;
2380 2771
2381 /* XXX the notifier code should handle this better */ 2772 /* XXX the notifier code should handle this better */
2382 if (!cn->notifier_head.head) { 2773 if (!cn->notifier_head.head) {
@@ -2506,7 +2897,8 @@ void of_clk_del_provider(struct device_node *np)
2506} 2897}
2507EXPORT_SYMBOL_GPL(of_clk_del_provider); 2898EXPORT_SYMBOL_GPL(of_clk_del_provider);
2508 2899
2509struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec) 2900struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
2901 const char *dev_id, const char *con_id)
2510{ 2902{
2511 struct of_clk_provider *provider; 2903 struct of_clk_provider *provider;
2512 struct clk *clk = ERR_PTR(-EPROBE_DEFER); 2904 struct clk *clk = ERR_PTR(-EPROBE_DEFER);
@@ -2515,8 +2907,17 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
2515 list_for_each_entry(provider, &of_clk_providers, link) { 2907 list_for_each_entry(provider, &of_clk_providers, link) {
2516 if (provider->node == clkspec->np) 2908 if (provider->node == clkspec->np)
2517 clk = provider->get(clkspec, provider->data); 2909 clk = provider->get(clkspec, provider->data);
2518 if (!IS_ERR(clk)) 2910 if (!IS_ERR(clk)) {
2911 clk = __clk_create_clk(__clk_get_hw(clk), dev_id,
2912 con_id);
2913
2914 if (!IS_ERR(clk) && !__clk_get(clk)) {
2915 __clk_free_clk(clk);
2916 clk = ERR_PTR(-ENOENT);
2917 }
2918
2519 break; 2919 break;
2920 }
2520 } 2921 }
2521 2922
2522 return clk; 2923 return clk;
@@ -2527,7 +2928,7 @@ struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
2527 struct clk *clk; 2928 struct clk *clk;
2528 2929
2529 mutex_lock(&of_clk_mutex); 2930 mutex_lock(&of_clk_mutex);
2530 clk = __of_clk_get_from_provider(clkspec); 2931 clk = __of_clk_get_from_provider(clkspec, NULL, __func__);
2531 mutex_unlock(&of_clk_mutex); 2932 mutex_unlock(&of_clk_mutex);
2532 2933
2533 return clk; 2934 return clk;
diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h
index c798138f023f..ba845408cc3e 100644
--- a/drivers/clk/clk.h
+++ b/drivers/clk/clk.h
@@ -9,9 +9,31 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12struct clk_hw;
13
12#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) 14#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
13struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec); 15struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec);
14struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec); 16struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
17 const char *dev_id, const char *con_id);
15void of_clk_lock(void); 18void of_clk_lock(void);
16void of_clk_unlock(void); 19void of_clk_unlock(void);
17#endif 20#endif
21
22#ifdef CONFIG_COMMON_CLK
23struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
24 const char *con_id);
25void __clk_free_clk(struct clk *clk);
26#else
27/* All these casts to avoid ifdefs in clkdev... */
28static inline struct clk *
29__clk_create_clk(struct clk_hw *hw, const char *dev_id, const char *con_id)
30{
31 return (struct clk *)hw;
32}
33static inline void __clk_free_clk(struct clk *clk) { }
34static struct clk_hw *__clk_get_hw(struct clk *clk)
35{
36 return (struct clk_hw *)clk;
37}
38
39#endif
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index da4bda8b7fc7..043fd3633373 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -19,6 +19,7 @@
19#include <linux/mutex.h> 19#include <linux/mutex.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/clkdev.h> 21#include <linux/clkdev.h>
22#include <linux/clk-provider.h>
22#include <linux/of.h> 23#include <linux/of.h>
23 24
24#include "clk.h" 25#include "clk.h"
@@ -28,6 +29,20 @@ static DEFINE_MUTEX(clocks_mutex);
28 29
29#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) 30#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
30 31
32static struct clk *__of_clk_get_by_clkspec(struct of_phandle_args *clkspec,
33 const char *dev_id, const char *con_id)
34{
35 struct clk *clk;
36
37 if (!clkspec)
38 return ERR_PTR(-EINVAL);
39
40 of_clk_lock();
41 clk = __of_clk_get_from_provider(clkspec, dev_id, con_id);
42 of_clk_unlock();
43 return clk;
44}
45
31/** 46/**
32 * of_clk_get_by_clkspec() - Lookup a clock form a clock provider 47 * of_clk_get_by_clkspec() - Lookup a clock form a clock provider
33 * @clkspec: pointer to a clock specifier data structure 48 * @clkspec: pointer to a clock specifier data structure
@@ -38,22 +53,11 @@ static DEFINE_MUTEX(clocks_mutex);
38 */ 53 */
39struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec) 54struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec)
40{ 55{
41 struct clk *clk; 56 return __of_clk_get_by_clkspec(clkspec, NULL, __func__);
42
43 if (!clkspec)
44 return ERR_PTR(-EINVAL);
45
46 of_clk_lock();
47 clk = __of_clk_get_from_provider(clkspec);
48
49 if (!IS_ERR(clk) && !__clk_get(clk))
50 clk = ERR_PTR(-ENOENT);
51
52 of_clk_unlock();
53 return clk;
54} 57}
55 58
56struct clk *of_clk_get(struct device_node *np, int index) 59static struct clk *__of_clk_get(struct device_node *np, int index,
60 const char *dev_id, const char *con_id)
57{ 61{
58 struct of_phandle_args clkspec; 62 struct of_phandle_args clkspec;
59 struct clk *clk; 63 struct clk *clk;
@@ -67,22 +71,21 @@ struct clk *of_clk_get(struct device_node *np, int index)
67 if (rc) 71 if (rc)
68 return ERR_PTR(rc); 72 return ERR_PTR(rc);
69 73
70 clk = of_clk_get_by_clkspec(&clkspec); 74 clk = __of_clk_get_by_clkspec(&clkspec, dev_id, con_id);
71 of_node_put(clkspec.np); 75 of_node_put(clkspec.np);
76
72 return clk; 77 return clk;
73} 78}
79
80struct clk *of_clk_get(struct device_node *np, int index)
81{
82 return __of_clk_get(np, index, np->full_name, NULL);
83}
74EXPORT_SYMBOL(of_clk_get); 84EXPORT_SYMBOL(of_clk_get);
75 85
76/** 86static struct clk *__of_clk_get_by_name(struct device_node *np,
77 * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node 87 const char *dev_id,
78 * @np: pointer to clock consumer node 88 const char *name)
79 * @name: name of consumer's clock input, or NULL for the first clock reference
80 *
81 * This function parses the clocks and clock-names properties,
82 * and uses them to look up the struct clk from the registered list of clock
83 * providers.
84 */
85struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
86{ 89{
87 struct clk *clk = ERR_PTR(-ENOENT); 90 struct clk *clk = ERR_PTR(-ENOENT);
88 91
@@ -97,10 +100,10 @@ struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
97 */ 100 */
98 if (name) 101 if (name)
99 index = of_property_match_string(np, "clock-names", name); 102 index = of_property_match_string(np, "clock-names", name);
100 clk = of_clk_get(np, index); 103 clk = __of_clk_get(np, index, dev_id, name);
101 if (!IS_ERR(clk)) 104 if (!IS_ERR(clk)) {
102 break; 105 break;
103 else if (name && index >= 0) { 106 } else if (name && index >= 0) {
104 if (PTR_ERR(clk) != -EPROBE_DEFER) 107 if (PTR_ERR(clk) != -EPROBE_DEFER)
105 pr_err("ERROR: could not get clock %s:%s(%i)\n", 108 pr_err("ERROR: could not get clock %s:%s(%i)\n",
106 np->full_name, name ? name : "", index); 109 np->full_name, name ? name : "", index);
@@ -119,7 +122,33 @@ struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
119 122
120 return clk; 123 return clk;
121} 124}
125
126/**
127 * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
128 * @np: pointer to clock consumer node
129 * @name: name of consumer's clock input, or NULL for the first clock reference
130 *
131 * This function parses the clocks and clock-names properties,
132 * and uses them to look up the struct clk from the registered list of clock
133 * providers.
134 */
135struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
136{
137 if (!np)
138 return ERR_PTR(-ENOENT);
139
140 return __of_clk_get_by_name(np, np->full_name, name);
141}
122EXPORT_SYMBOL(of_clk_get_by_name); 142EXPORT_SYMBOL(of_clk_get_by_name);
143
144#else /* defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) */
145
146static struct clk *__of_clk_get_by_name(struct device_node *np,
147 const char *dev_id,
148 const char *name)
149{
150 return ERR_PTR(-ENOENT);
151}
123#endif 152#endif
124 153
125/* 154/*
@@ -168,14 +197,28 @@ static struct clk_lookup *clk_find(const char *dev_id, const char *con_id)
168struct clk *clk_get_sys(const char *dev_id, const char *con_id) 197struct clk *clk_get_sys(const char *dev_id, const char *con_id)
169{ 198{
170 struct clk_lookup *cl; 199 struct clk_lookup *cl;
200 struct clk *clk = NULL;
171 201
172 mutex_lock(&clocks_mutex); 202 mutex_lock(&clocks_mutex);
203
173 cl = clk_find(dev_id, con_id); 204 cl = clk_find(dev_id, con_id);
174 if (cl && !__clk_get(cl->clk)) 205 if (!cl)
206 goto out;
207
208 clk = __clk_create_clk(__clk_get_hw(cl->clk), dev_id, con_id);
209 if (IS_ERR(clk))
210 goto out;
211
212 if (!__clk_get(clk)) {
213 __clk_free_clk(clk);
175 cl = NULL; 214 cl = NULL;
215 goto out;
216 }
217
218out:
176 mutex_unlock(&clocks_mutex); 219 mutex_unlock(&clocks_mutex);
177 220
178 return cl ? cl->clk : ERR_PTR(-ENOENT); 221 return cl ? clk : ERR_PTR(-ENOENT);
179} 222}
180EXPORT_SYMBOL(clk_get_sys); 223EXPORT_SYMBOL(clk_get_sys);
181 224
@@ -185,10 +228,8 @@ struct clk *clk_get(struct device *dev, const char *con_id)
185 struct clk *clk; 228 struct clk *clk;
186 229
187 if (dev) { 230 if (dev) {
188 clk = of_clk_get_by_name(dev->of_node, con_id); 231 clk = __of_clk_get_by_name(dev->of_node, dev_id, con_id);
189 if (!IS_ERR(clk)) 232 if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
190 return clk;
191 if (PTR_ERR(clk) == -EPROBE_DEFER)
192 return clk; 233 return clk;
193 } 234 }
194 235
@@ -331,6 +372,7 @@ int clk_register_clkdev(struct clk *clk, const char *con_id,
331 372
332 return 0; 373 return 0;
333} 374}
375EXPORT_SYMBOL(clk_register_clkdev);
334 376
335/** 377/**
336 * clk_register_clkdevs - register a set of clk_lookup for a struct clk 378 * clk_register_clkdevs - register a set of clk_lookup for a struct clk
diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
index 007144f81f50..2e4f6d432beb 100644
--- a/drivers/clk/hisilicon/clk-hi3620.c
+++ b/drivers/clk/hisilicon/clk-hi3620.c
@@ -295,6 +295,8 @@ static unsigned long mmc_clk_recalc_rate(struct clk_hw *hw,
295} 295}
296 296
297static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate, 297static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
298 unsigned long min_rate,
299 unsigned long max_rate,
298 unsigned long *best_parent_rate, 300 unsigned long *best_parent_rate,
299 struct clk_hw **best_parent_p) 301 struct clk_hw **best_parent_p)
300{ 302{
diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c
index 48fa53c7ce5e..de6a873175d2 100644
--- a/drivers/clk/mmp/clk-mix.c
+++ b/drivers/clk/mmp/clk-mix.c
@@ -202,6 +202,8 @@ error:
202} 202}
203 203
204static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate, 204static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate,
205 unsigned long min_rate,
206 unsigned long max_rate,
205 unsigned long *best_parent_rate, 207 unsigned long *best_parent_rate,
206 struct clk_hw **best_parent_clk) 208 struct clk_hw **best_parent_clk)
207{ 209{
diff --git a/drivers/clk/pxa/Makefile b/drivers/clk/pxa/Makefile
index 38e915344605..38e37bf6b821 100644
--- a/drivers/clk/pxa/Makefile
+++ b/drivers/clk/pxa/Makefile
@@ -1,3 +1,4 @@
1obj-y += clk-pxa.o 1obj-y += clk-pxa.o
2obj-$(CONFIG_PXA25x) += clk-pxa25x.o 2obj-$(CONFIG_PXA25x) += clk-pxa25x.o
3obj-$(CONFIG_PXA27x) += clk-pxa27x.o 3obj-$(CONFIG_PXA27x) += clk-pxa27x.o
4obj-$(CONFIG_PXA3xx) += clk-pxa3xx.o
diff --git a/drivers/clk/pxa/clk-pxa.c b/drivers/clk/pxa/clk-pxa.c
index 4e834753ab09..29cee9e8d4d9 100644
--- a/drivers/clk/pxa/clk-pxa.c
+++ b/drivers/clk/pxa/clk-pxa.c
@@ -46,7 +46,7 @@ static unsigned long cken_recalc_rate(struct clk_hw *hw,
46 fix = &pclk->lp; 46 fix = &pclk->lp;
47 else 47 else
48 fix = &pclk->hp; 48 fix = &pclk->hp;
49 fix->hw.clk = hw->clk; 49 __clk_hw_set_clk(&fix->hw, hw);
50 return clk_fixed_factor_ops.recalc_rate(&fix->hw, parent_rate); 50 return clk_fixed_factor_ops.recalc_rate(&fix->hw, parent_rate);
51} 51}
52 52
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
new file mode 100644
index 000000000000..39f891bba09a
--- /dev/null
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -0,0 +1,364 @@
1/*
2 * Marvell PXA3xxx family clocks
3 *
4 * Copyright (C) 2014 Robert Jarzmik
5 *
6 * Heavily inspired from former arch/arm/mach-pxa/pxa3xx.c
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * For non-devicetree platforms. Once pxa is fully converted to devicetree, this
13 * should go away.
14 */
15#include <linux/io.h>
16#include <linux/clk.h>
17#include <linux/clk-provider.h>
18#include <linux/clkdev.h>
19#include <linux/of.h>
20#include <mach/smemc.h>
21#include <mach/pxa3xx-regs.h>
22
23#include <dt-bindings/clock/pxa-clock.h>
24#include "clk-pxa.h"
25
26#define KHz 1000
27#define MHz (1000 * 1000)
28
29enum {
30 PXA_CORE_60Mhz = 0,
31 PXA_CORE_RUN,
32 PXA_CORE_TURBO,
33};
34
35enum {
36 PXA_BUS_60Mhz = 0,
37 PXA_BUS_HSS,
38};
39
40/* crystal frequency to HSIO bus frequency multiplier (HSS) */
41static unsigned char hss_mult[4] = { 8, 12, 16, 24 };
42
43/* crystal frequency to static memory controller multiplier (SMCFS) */
44static unsigned int smcfs_mult[8] = { 6, 0, 8, 0, 0, 16, };
45static unsigned int df_clkdiv[4] = { 1, 2, 4, 1 };
46
47static const char * const get_freq_khz[] = {
48 "core", "ring_osc_60mhz", "run", "cpll", "system_bus"
49};
50
51/*
52 * Get the clock frequency as reflected by ACSR and the turbo flag.
53 * We assume these values have been applied via a fcs.
54 * If info is not 0 we also display the current settings.
55 */
56unsigned int pxa3xx_get_clk_frequency_khz(int info)
57{
58 struct clk *clk;
59 unsigned long clks[5];
60 int i;
61
62 for (i = 0; i < 5; i++) {
63 clk = clk_get(NULL, get_freq_khz[i]);
64 if (IS_ERR(clk)) {
65 clks[i] = 0;
66 } else {
67 clks[i] = clk_get_rate(clk);
68 clk_put(clk);
69 }
70 }
71 if (info) {
72 pr_info("RO Mode clock: %ld.%02ldMHz\n",
73 clks[1] / 1000000, (clks[0] % 1000000) / 10000);
74 pr_info("Run Mode clock: %ld.%02ldMHz\n",
75 clks[2] / 1000000, (clks[1] % 1000000) / 10000);
76 pr_info("Turbo Mode clock: %ld.%02ldMHz\n",
77 clks[3] / 1000000, (clks[2] % 1000000) / 10000);
78 pr_info("System bus clock: %ld.%02ldMHz\n",
79 clks[4] / 1000000, (clks[4] % 1000000) / 10000);
80 }
81 return (unsigned int)clks[0];
82}
83
84static unsigned long clk_pxa3xx_ac97_get_rate(struct clk_hw *hw,
85 unsigned long parent_rate)
86{
87 unsigned long ac97_div, rate;
88
89 ac97_div = AC97_DIV;
90
91 /* This may loose precision for some rates but won't for the
92 * standard 24.576MHz.
93 */
94 rate = parent_rate / 2;
95 rate /= ((ac97_div >> 12) & 0x7fff);
96 rate *= (ac97_div & 0xfff);
97
98 return rate;
99}
100PARENTS(clk_pxa3xx_ac97) = { "spll_624mhz" };
101RATE_RO_OPS(clk_pxa3xx_ac97, "ac97");
102
103static unsigned long clk_pxa3xx_smemc_get_rate(struct clk_hw *hw,
104 unsigned long parent_rate)
105{
106 unsigned long acsr = ACSR;
107 unsigned long memclkcfg = __raw_readl(MEMCLKCFG);
108
109 return (parent_rate / 48) * smcfs_mult[(acsr >> 23) & 0x7] /
110 df_clkdiv[(memclkcfg >> 16) & 0x3];
111}
112PARENTS(clk_pxa3xx_smemc) = { "spll_624mhz" };
113RATE_RO_OPS(clk_pxa3xx_smemc, "smemc");
114
115static bool pxa3xx_is_ring_osc_forced(void)
116{
117 unsigned long acsr = ACSR;
118
119 return acsr & ACCR_D0CS;
120}
121
122PARENTS(pxa3xx_pbus) = { "ring_osc_60mhz", "spll_624mhz" };
123PARENTS(pxa3xx_32Khz_bus) = { "osc_32_768khz", "osc_32_768khz" };
124PARENTS(pxa3xx_13MHz_bus) = { "osc_13mhz", "osc_13mhz" };
125PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
126PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
127PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
128
129#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB)
130#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
131 div_hp, bit, is_lp, flags) \
132 PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \
133 mult_hp, div_hp, is_lp, CKEN_AB(bit), \
134 (CKEN_ ## bit % 32), flags)
135#define PXA3XX_PBUS_CKEN(dev_id, con_id, bit, mult_lp, div_lp, \
136 mult_hp, div_hp, delay) \
137 PXA3XX_CKEN(dev_id, con_id, pxa3xx_pbus_parents, mult_lp, \
138 div_lp, mult_hp, div_hp, bit, pxa3xx_is_ring_osc_forced, 0)
139#define PXA3XX_CKEN_1RATE(dev_id, con_id, bit, parents) \
140 PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \
141 CKEN_AB(bit), (CKEN_ ## bit % 32), 0)
142
143static struct desc_clk_cken pxa3xx_clocks[] __initdata = {
144 PXA3XX_PBUS_CKEN("pxa2xx-uart.0", NULL, FFUART, 1, 4, 1, 42, 1),
145 PXA3XX_PBUS_CKEN("pxa2xx-uart.1", NULL, BTUART, 1, 4, 1, 42, 1),
146 PXA3XX_PBUS_CKEN("pxa2xx-uart.2", NULL, STUART, 1, 4, 1, 42, 1),
147 PXA3XX_PBUS_CKEN("pxa2xx-i2c.0", NULL, I2C, 2, 5, 1, 19, 0),
148 PXA3XX_PBUS_CKEN("pxa27x-udc", NULL, UDC, 1, 4, 1, 13, 5),
149 PXA3XX_PBUS_CKEN("pxa27x-ohci", NULL, USBH, 1, 4, 1, 13, 0),
150 PXA3XX_PBUS_CKEN("pxa3xx-u2d", NULL, USB2, 1, 4, 1, 13, 0),
151 PXA3XX_PBUS_CKEN("pxa27x-pwm.0", NULL, PWM0, 1, 6, 1, 48, 0),
152 PXA3XX_PBUS_CKEN("pxa27x-pwm.1", NULL, PWM1, 1, 6, 1, 48, 0),
153 PXA3XX_PBUS_CKEN("pxa2xx-mci.0", NULL, MMC1, 1, 4, 1, 24, 0),
154 PXA3XX_PBUS_CKEN("pxa2xx-mci.1", NULL, MMC2, 1, 4, 1, 24, 0),
155 PXA3XX_PBUS_CKEN("pxa2xx-mci.2", NULL, MMC3, 1, 4, 1, 24, 0),
156
157 PXA3XX_CKEN_1RATE("pxa27x-keypad", NULL, KEYPAD,
158 pxa3xx_32Khz_bus_parents),
159 PXA3XX_CKEN_1RATE("pxa3xx-ssp.0", NULL, SSP1, pxa3xx_13MHz_bus_parents),
160 PXA3XX_CKEN_1RATE("pxa3xx-ssp.1", NULL, SSP2, pxa3xx_13MHz_bus_parents),
161 PXA3XX_CKEN_1RATE("pxa3xx-ssp.2", NULL, SSP3, pxa3xx_13MHz_bus_parents),
162 PXA3XX_CKEN_1RATE("pxa3xx-ssp.3", NULL, SSP4, pxa3xx_13MHz_bus_parents),
163
164 PXA3XX_CKEN(NULL, "AC97CLK", pxa3xx_ac97_bus_parents, 1, 4, 1, 1, AC97,
165 pxa3xx_is_ring_osc_forced, 0),
166 PXA3XX_CKEN(NULL, "CAMCLK", pxa3xx_sbus_parents, 1, 2, 1, 1, CAMERA,
167 pxa3xx_is_ring_osc_forced, 0),
168 PXA3XX_CKEN("pxa2xx-fb", NULL, pxa3xx_sbus_parents, 1, 1, 1, 1, LCD,
169 pxa3xx_is_ring_osc_forced, 0),
170 PXA3XX_CKEN("pxa2xx-pcmcia", NULL, pxa3xx_smemcbus_parents, 1, 4,
171 1, 1, SMC, pxa3xx_is_ring_osc_forced, CLK_IGNORE_UNUSED),
172};
173
174static struct desc_clk_cken pxa300_310_clocks[] __initdata = {
175
176 PXA3XX_PBUS_CKEN("pxa3xx-gcu", NULL, PXA300_GCU, 1, 1, 1, 1, 0),
177 PXA3XX_PBUS_CKEN("pxa3xx-nand", NULL, NAND, 1, 2, 1, 4, 0),
178 PXA3XX_CKEN_1RATE("pxa3xx-gpio", NULL, GPIO, pxa3xx_13MHz_bus_parents),
179};
180
181static struct desc_clk_cken pxa320_clocks[] __initdata = {
182 PXA3XX_PBUS_CKEN("pxa3xx-nand", NULL, NAND, 1, 2, 1, 6, 0),
183 PXA3XX_PBUS_CKEN("pxa3xx-gcu", NULL, PXA320_GCU, 1, 1, 1, 1, 0),
184 PXA3XX_CKEN_1RATE("pxa3xx-gpio", NULL, GPIO, pxa3xx_13MHz_bus_parents),
185};
186
187static struct desc_clk_cken pxa93x_clocks[] __initdata = {
188
189 PXA3XX_PBUS_CKEN("pxa3xx-gcu", NULL, PXA300_GCU, 1, 1, 1, 1, 0),
190 PXA3XX_PBUS_CKEN("pxa3xx-nand", NULL, NAND, 1, 2, 1, 4, 0),
191 PXA3XX_CKEN_1RATE("pxa93x-gpio", NULL, GPIO, pxa3xx_13MHz_bus_parents),
192};
193
194static unsigned long clk_pxa3xx_system_bus_get_rate(struct clk_hw *hw,
195 unsigned long parent_rate)
196{
197 unsigned long acsr = ACSR;
198 unsigned int hss = (acsr >> 14) & 0x3;
199
200 if (pxa3xx_is_ring_osc_forced())
201 return parent_rate;
202 return parent_rate / 48 * hss_mult[hss];
203}
204
205static u8 clk_pxa3xx_system_bus_get_parent(struct clk_hw *hw)
206{
207 if (pxa3xx_is_ring_osc_forced())
208 return PXA_BUS_60Mhz;
209 else
210 return PXA_BUS_HSS;
211}
212
213PARENTS(clk_pxa3xx_system_bus) = { "ring_osc_60mhz", "spll_624mhz" };
214MUX_RO_RATE_RO_OPS(clk_pxa3xx_system_bus, "system_bus");
215
216static unsigned long clk_pxa3xx_core_get_rate(struct clk_hw *hw,
217 unsigned long parent_rate)
218{
219 return parent_rate;
220}
221
222static u8 clk_pxa3xx_core_get_parent(struct clk_hw *hw)
223{
224 unsigned long xclkcfg;
225 unsigned int t;
226
227 if (pxa3xx_is_ring_osc_forced())
228 return PXA_CORE_60Mhz;
229
230 /* Read XCLKCFG register turbo bit */
231 __asm__ __volatile__("mrc\tp14, 0, %0, c6, c0, 0" : "=r"(xclkcfg));
232 t = xclkcfg & 0x1;
233
234 if (t)
235 return PXA_CORE_TURBO;
236 return PXA_CORE_RUN;
237}
238PARENTS(clk_pxa3xx_core) = { "ring_osc_60mhz", "run", "cpll" };
239MUX_RO_RATE_RO_OPS(clk_pxa3xx_core, "core");
240
241static unsigned long clk_pxa3xx_run_get_rate(struct clk_hw *hw,
242 unsigned long parent_rate)
243{
244 unsigned long acsr = ACSR;
245 unsigned int xn = (acsr & ACCR_XN_MASK) >> 8;
246 unsigned int t, xclkcfg;
247
248 /* Read XCLKCFG register turbo bit */
249 __asm__ __volatile__("mrc\tp14, 0, %0, c6, c0, 0" : "=r"(xclkcfg));
250 t = xclkcfg & 0x1;
251
252 return t ? (parent_rate / xn) * 2 : parent_rate;
253}
254PARENTS(clk_pxa3xx_run) = { "cpll" };
255RATE_RO_OPS(clk_pxa3xx_run, "run");
256
257static unsigned long clk_pxa3xx_cpll_get_rate(struct clk_hw *hw,
258 unsigned long parent_rate)
259{
260 unsigned long acsr = ACSR;
261 unsigned int xn = (acsr & ACCR_XN_MASK) >> 8;
262 unsigned int xl = acsr & ACCR_XL_MASK;
263 unsigned int t, xclkcfg;
264
265 /* Read XCLKCFG register turbo bit */
266 __asm__ __volatile__("mrc\tp14, 0, %0, c6, c0, 0" : "=r"(xclkcfg));
267 t = xclkcfg & 0x1;
268
269 pr_info("RJK: parent_rate=%lu, xl=%u, xn=%u\n", parent_rate, xl, xn);
270 return t ? parent_rate * xl * xn : parent_rate * xl;
271}
272PARENTS(clk_pxa3xx_cpll) = { "osc_13mhz" };
273RATE_RO_OPS(clk_pxa3xx_cpll, "cpll");
274
275static void __init pxa3xx_register_core(void)
276{
277 clk_register_clk_pxa3xx_cpll();
278 clk_register_clk_pxa3xx_run();
279
280 clkdev_pxa_register(CLK_CORE, "core", NULL,
281 clk_register_clk_pxa3xx_core());
282}
283
284static void __init pxa3xx_register_plls(void)
285{
286 clk_register_fixed_rate(NULL, "osc_13mhz", NULL,
287 CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
288 13 * MHz);
289 clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
290 CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
291 32768);
292 clk_register_fixed_rate(NULL, "ring_osc_120mhz", NULL,
293 CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
294 120 * MHz);
295 clk_register_fixed_rate(NULL, "clk_dummy", NULL, CLK_IS_ROOT, 0);
296 clk_register_fixed_factor(NULL, "spll_624mhz", "osc_13mhz", 0, 48, 1);
297 clk_register_fixed_factor(NULL, "ring_osc_60mhz", "ring_osc_120mhz",
298 0, 1, 2);
299}
300
301#define DUMMY_CLK(_con_id, _dev_id, _parent) \
302 { .con_id = _con_id, .dev_id = _dev_id, .parent = _parent }
303struct dummy_clk {
304 const char *con_id;
305 const char *dev_id;
306 const char *parent;
307};
308static struct dummy_clk dummy_clks[] __initdata = {
309 DUMMY_CLK(NULL, "pxa93x-gpio", "osc_13mhz"),
310 DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
311 DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
312 DUMMY_CLK(NULL, "pxa3xx-pwri2c.1", "osc_13mhz"),
313};
314
315static void __init pxa3xx_dummy_clocks_init(void)
316{
317 struct clk *clk;
318 struct dummy_clk *d;
319 const char *name;
320 int i;
321
322 for (i = 0; i < ARRAY_SIZE(dummy_clks); i++) {
323 d = &dummy_clks[i];
324 name = d->dev_id ? d->dev_id : d->con_id;
325 clk = clk_register_fixed_factor(NULL, name, d->parent, 0, 1, 1);
326 clk_register_clkdev(clk, d->con_id, d->dev_id);
327 }
328}
329
330static void __init pxa3xx_base_clocks_init(void)
331{
332 pxa3xx_register_plls();
333 pxa3xx_register_core();
334 clk_register_clk_pxa3xx_system_bus();
335 clk_register_clk_pxa3xx_ac97();
336 clk_register_clk_pxa3xx_smemc();
337 clk_register_gate(NULL, "CLK_POUT", "osc_13mhz", 0,
338 (void __iomem *)&OSCC, 11, 0, NULL);
339}
340
341int __init pxa3xx_clocks_init(void)
342{
343 int ret;
344
345 pxa3xx_base_clocks_init();
346 pxa3xx_dummy_clocks_init();
347 ret = clk_pxa_cken_init(pxa3xx_clocks, ARRAY_SIZE(pxa3xx_clocks));
348 if (ret)
349 return ret;
350 if (cpu_is_pxa320())
351 return clk_pxa_cken_init(pxa320_clocks,
352 ARRAY_SIZE(pxa320_clocks));
353 if (cpu_is_pxa300() || cpu_is_pxa310())
354 return clk_pxa_cken_init(pxa300_310_clocks,
355 ARRAY_SIZE(pxa300_310_clocks));
356 return clk_pxa_cken_init(pxa93x_clocks, ARRAY_SIZE(pxa93x_clocks));
357}
358
359static void __init pxa3xx_dt_clocks_init(struct device_node *np)
360{
361 pxa3xx_clocks_init();
362 clk_pxa_dt_common_init(np);
363}
364CLK_OF_DECLARE(pxa_clks, "marvell,pxa300-clocks", pxa3xx_dt_clocks_init);
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 1107351ed346..0d7ab52b7ab0 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -29,6 +29,15 @@ config IPQ_GCC_806X
29 Say Y if you want to use peripheral devices such as UART, SPI, 29 Say Y if you want to use peripheral devices such as UART, SPI,
30 i2c, USB, SD/eMMC, etc. 30 i2c, USB, SD/eMMC, etc.
31 31
32config IPQ_LCC_806X
33 tristate "IPQ806x LPASS Clock Controller"
34 select IPQ_GCC_806X
35 depends on COMMON_CLK_QCOM
36 help
37 Support for the LPASS clock controller on ipq806x devices.
38 Say Y if you want to use audio devices such as i2s, pcm,
39 S/PDIF, etc.
40
32config MSM_GCC_8660 41config MSM_GCC_8660
33 tristate "MSM8660 Global Clock Controller" 42 tristate "MSM8660 Global Clock Controller"
34 depends on COMMON_CLK_QCOM 43 depends on COMMON_CLK_QCOM
@@ -45,6 +54,15 @@ config MSM_GCC_8960
45 Say Y if you want to use peripheral devices such as UART, SPI, 54 Say Y if you want to use peripheral devices such as UART, SPI,
46 i2c, USB, SD/eMMC, SATA, PCIe, etc. 55 i2c, USB, SD/eMMC, SATA, PCIe, etc.
47 56
57config MSM_LCC_8960
58 tristate "APQ8064/MSM8960 LPASS Clock Controller"
59 select MSM_GCC_8960
60 depends on COMMON_CLK_QCOM
61 help
62 Support for the LPASS clock controller on apq8064/msm8960 devices.
63 Say Y if you want to use audio devices such as i2s, pcm,
64 SLIMBus, etc.
65
48config MSM_MMCC_8960 66config MSM_MMCC_8960
49 tristate "MSM8960 Multimedia Clock Controller" 67 tristate "MSM8960 Multimedia Clock Controller"
50 select MSM_GCC_8960 68 select MSM_GCC_8960
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 783cfb24faa4..617826469595 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -6,13 +6,17 @@ clk-qcom-y += clk-pll.o
6clk-qcom-y += clk-rcg.o 6clk-qcom-y += clk-rcg.o
7clk-qcom-y += clk-rcg2.o 7clk-qcom-y += clk-rcg2.o
8clk-qcom-y += clk-branch.o 8clk-qcom-y += clk-branch.o
9clk-qcom-y += clk-regmap-divider.o
10clk-qcom-y += clk-regmap-mux.o
9clk-qcom-y += reset.o 11clk-qcom-y += reset.o
10 12
11obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o 13obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
12obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o 14obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
13obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o 15obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
16obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
14obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o 17obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
15obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o 18obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
19obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
16obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o 20obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
17obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o 21obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
18obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o 22obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c
index 60873a7f45d9..b4325f65a1bf 100644
--- a/drivers/clk/qcom/clk-pll.c
+++ b/drivers/clk/qcom/clk-pll.c
@@ -141,6 +141,7 @@ struct pll_freq_tbl *find_freq(const struct pll_freq_tbl *f, unsigned long rate)
141 141
142static long 142static long
143clk_pll_determine_rate(struct clk_hw *hw, unsigned long rate, 143clk_pll_determine_rate(struct clk_hw *hw, unsigned long rate,
144 unsigned long min_rate, unsigned long max_rate,
144 unsigned long *p_rate, struct clk_hw **p) 145 unsigned long *p_rate, struct clk_hw **p)
145{ 146{
146 struct clk_pll *pll = to_clk_pll(hw); 147 struct clk_pll *pll = to_clk_pll(hw);
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
index 0b93972c8807..0039bd7d3965 100644
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -368,6 +368,7 @@ clk_dyn_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
368 368
369static long _freq_tbl_determine_rate(struct clk_hw *hw, 369static long _freq_tbl_determine_rate(struct clk_hw *hw,
370 const struct freq_tbl *f, unsigned long rate, 370 const struct freq_tbl *f, unsigned long rate,
371 unsigned long min_rate, unsigned long max_rate,
371 unsigned long *p_rate, struct clk_hw **p_hw) 372 unsigned long *p_rate, struct clk_hw **p_hw)
372{ 373{
373 unsigned long clk_flags; 374 unsigned long clk_flags;
@@ -397,22 +398,27 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
397} 398}
398 399
399static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate, 400static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
401 unsigned long min_rate, unsigned long max_rate,
400 unsigned long *p_rate, struct clk_hw **p) 402 unsigned long *p_rate, struct clk_hw **p)
401{ 403{
402 struct clk_rcg *rcg = to_clk_rcg(hw); 404 struct clk_rcg *rcg = to_clk_rcg(hw);
403 405
404 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p); 406 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate,
407 max_rate, p_rate, p);
405} 408}
406 409
407static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate, 410static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
411 unsigned long min_rate, unsigned long max_rate,
408 unsigned long *p_rate, struct clk_hw **p) 412 unsigned long *p_rate, struct clk_hw **p)
409{ 413{
410 struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw); 414 struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
411 415
412 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p); 416 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate,
417 max_rate, p_rate, p);
413} 418}
414 419
415static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate, 420static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate,
421 unsigned long min_rate, unsigned long max_rate,
416 unsigned long *p_rate, struct clk_hw **p_hw) 422 unsigned long *p_rate, struct clk_hw **p_hw)
417{ 423{
418 struct clk_rcg *rcg = to_clk_rcg(hw); 424 struct clk_rcg *rcg = to_clk_rcg(hw);
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 08b8b3729f53..742acfa18d63 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -208,6 +208,7 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
208} 208}
209 209
210static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate, 210static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate,
211 unsigned long min_rate, unsigned long max_rate,
211 unsigned long *p_rate, struct clk_hw **p) 212 unsigned long *p_rate, struct clk_hw **p)
212{ 213{
213 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 214 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -361,6 +362,8 @@ static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
361} 362}
362 363
363static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate, 364static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
365 unsigned long min_rate,
366 unsigned long max_rate,
364 unsigned long *p_rate, struct clk_hw **p) 367 unsigned long *p_rate, struct clk_hw **p)
365{ 368{
366 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 369 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -412,6 +415,7 @@ const struct clk_ops clk_edp_pixel_ops = {
412EXPORT_SYMBOL_GPL(clk_edp_pixel_ops); 415EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
413 416
414static long clk_byte_determine_rate(struct clk_hw *hw, unsigned long rate, 417static long clk_byte_determine_rate(struct clk_hw *hw, unsigned long rate,
418 unsigned long min_rate, unsigned long max_rate,
415 unsigned long *p_rate, struct clk_hw **p_hw) 419 unsigned long *p_rate, struct clk_hw **p_hw)
416{ 420{
417 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 421 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -476,6 +480,8 @@ static const struct frac_entry frac_table_pixel[] = {
476}; 480};
477 481
478static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate, 482static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
483 unsigned long min_rate,
484 unsigned long max_rate,
479 unsigned long *p_rate, struct clk_hw **p) 485 unsigned long *p_rate, struct clk_hw **p)
480{ 486{
481 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 487 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
diff --git a/drivers/clk/qcom/clk-regmap-divider.c b/drivers/clk/qcom/clk-regmap-divider.c
new file mode 100644
index 000000000000..53484912301e
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-divider.c
@@ -0,0 +1,70 @@
1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/kernel.h>
15#include <linux/bitops.h>
16#include <linux/regmap.h>
17#include <linux/export.h>
18
19#include "clk-regmap-divider.h"
20
21static inline struct clk_regmap_div *to_clk_regmap_div(struct clk_hw *hw)
22{
23 return container_of(to_clk_regmap(hw), struct clk_regmap_div, clkr);
24}
25
26static long div_round_rate(struct clk_hw *hw, unsigned long rate,
27 unsigned long *prate)
28{
29 struct clk_regmap_div *divider = to_clk_regmap_div(hw);
30
31 return divider_round_rate(hw, rate, prate, NULL, divider->width,
32 CLK_DIVIDER_ROUND_CLOSEST);
33}
34
35static int div_set_rate(struct clk_hw *hw, unsigned long rate,
36 unsigned long parent_rate)
37{
38 struct clk_regmap_div *divider = to_clk_regmap_div(hw);
39 struct clk_regmap *clkr = &divider->clkr;
40 u32 div;
41
42 div = divider_get_val(rate, parent_rate, NULL, divider->width,
43 CLK_DIVIDER_ROUND_CLOSEST);
44
45 return regmap_update_bits(clkr->regmap, divider->reg,
46 (BIT(divider->width) - 1) << divider->shift,
47 div << divider->shift);
48}
49
50static unsigned long div_recalc_rate(struct clk_hw *hw,
51 unsigned long parent_rate)
52{
53 struct clk_regmap_div *divider = to_clk_regmap_div(hw);
54 struct clk_regmap *clkr = &divider->clkr;
55 u32 div;
56
57 regmap_read(clkr->regmap, divider->reg, &div);
58 div >>= divider->shift;
59 div &= BIT(divider->width) - 1;
60
61 return divider_recalc_rate(hw, parent_rate, div, NULL,
62 CLK_DIVIDER_ROUND_CLOSEST);
63}
64
65const struct clk_ops clk_regmap_div_ops = {
66 .round_rate = div_round_rate,
67 .set_rate = div_set_rate,
68 .recalc_rate = div_recalc_rate,
69};
70EXPORT_SYMBOL_GPL(clk_regmap_div_ops);
diff --git a/drivers/clk/qcom/clk-regmap-divider.h b/drivers/clk/qcom/clk-regmap-divider.h
new file mode 100644
index 000000000000..fc4492e3a827
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-divider.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __QCOM_CLK_REGMAP_DIVIDER_H__
15#define __QCOM_CLK_REGMAP_DIVIDER_H__
16
17#include <linux/clk-provider.h>
18#include "clk-regmap.h"
19
20struct clk_regmap_div {
21 u32 reg;
22 u32 shift;
23 u32 width;
24 struct clk_regmap clkr;
25};
26
27extern const struct clk_ops clk_regmap_div_ops;
28
29#endif
diff --git a/drivers/clk/qcom/clk-regmap-mux.c b/drivers/clk/qcom/clk-regmap-mux.c
new file mode 100644
index 000000000000..cae3071f384c
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-mux.c
@@ -0,0 +1,59 @@
1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/kernel.h>
15#include <linux/bitops.h>
16#include <linux/regmap.h>
17#include <linux/export.h>
18
19#include "clk-regmap-mux.h"
20
21static inline struct clk_regmap_mux *to_clk_regmap_mux(struct clk_hw *hw)
22{
23 return container_of(to_clk_regmap(hw), struct clk_regmap_mux, clkr);
24}
25
26static u8 mux_get_parent(struct clk_hw *hw)
27{
28 struct clk_regmap_mux *mux = to_clk_regmap_mux(hw);
29 struct clk_regmap *clkr = to_clk_regmap(hw);
30 unsigned int mask = GENMASK(mux->width - 1, 0);
31 unsigned int val;
32
33 regmap_read(clkr->regmap, mux->reg, &val);
34
35 val >>= mux->shift;
36 val &= mask;
37
38 return val;
39}
40
41static int mux_set_parent(struct clk_hw *hw, u8 index)
42{
43 struct clk_regmap_mux *mux = to_clk_regmap_mux(hw);
44 struct clk_regmap *clkr = to_clk_regmap(hw);
45 unsigned int mask = GENMASK(mux->width + mux->shift - 1, mux->shift);
46 unsigned int val;
47
48 val = index;
49 val <<= mux->shift;
50
51 return regmap_update_bits(clkr->regmap, mux->reg, mask, val);
52}
53
54const struct clk_ops clk_regmap_mux_closest_ops = {
55 .get_parent = mux_get_parent,
56 .set_parent = mux_set_parent,
57 .determine_rate = __clk_mux_determine_rate_closest,
58};
59EXPORT_SYMBOL_GPL(clk_regmap_mux_closest_ops);
diff --git a/drivers/clk/qcom/clk-regmap-mux.h b/drivers/clk/qcom/clk-regmap-mux.h
new file mode 100644
index 000000000000..5cec76154fda
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-mux.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __QCOM_CLK_REGMAP_MUX_H__
15#define __QCOM_CLK_REGMAP_MUX_H__
16
17#include <linux/clk-provider.h>
18#include "clk-regmap.h"
19
20struct clk_regmap_mux {
21 u32 reg;
22 u32 shift;
23 u32 width;
24 struct clk_regmap clkr;
25};
26
27extern const struct clk_ops clk_regmap_mux_closest_ops;
28
29#endif
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index afed5eb0691e..cbdc31dea7f4 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -75,6 +75,17 @@ static struct clk_pll pll3 = {
75 }, 75 },
76}; 76};
77 77
78static struct clk_regmap pll4_vote = {
79 .enable_reg = 0x34c0,
80 .enable_mask = BIT(4),
81 .hw.init = &(struct clk_init_data){
82 .name = "pll4_vote",
83 .parent_names = (const char *[]){ "pll4" },
84 .num_parents = 1,
85 .ops = &clk_pll_vote_ops,
86 },
87};
88
78static struct clk_pll pll8 = { 89static struct clk_pll pll8 = {
79 .l_reg = 0x3144, 90 .l_reg = 0x3144,
80 .m_reg = 0x3148, 91 .m_reg = 0x3148,
@@ -2163,6 +2174,7 @@ static struct clk_regmap *gcc_ipq806x_clks[] = {
2163 [PLL0] = &pll0.clkr, 2174 [PLL0] = &pll0.clkr,
2164 [PLL0_VOTE] = &pll0_vote, 2175 [PLL0_VOTE] = &pll0_vote,
2165 [PLL3] = &pll3.clkr, 2176 [PLL3] = &pll3.clkr,
2177 [PLL4_VOTE] = &pll4_vote,
2166 [PLL8] = &pll8.clkr, 2178 [PLL8] = &pll8.clkr,
2167 [PLL8_VOTE] = &pll8_vote, 2179 [PLL8_VOTE] = &pll8_vote,
2168 [PLL14] = &pll14.clkr, 2180 [PLL14] = &pll14.clkr,
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
new file mode 100644
index 000000000000..121ffde25dc3
--- /dev/null
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -0,0 +1,473 @@
1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/kernel.h>
15#include <linux/bitops.h>
16#include <linux/err.h>
17#include <linux/platform_device.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/of_device.h>
21#include <linux/clk-provider.h>
22#include <linux/regmap.h>
23
24#include <dt-bindings/clock/qcom,lcc-ipq806x.h>
25
26#include "common.h"
27#include "clk-regmap.h"
28#include "clk-pll.h"
29#include "clk-rcg.h"
30#include "clk-branch.h"
31#include "clk-regmap-divider.h"
32#include "clk-regmap-mux.h"
33
34static struct clk_pll pll4 = {
35 .l_reg = 0x4,
36 .m_reg = 0x8,
37 .n_reg = 0xc,
38 .config_reg = 0x14,
39 .mode_reg = 0x0,
40 .status_reg = 0x18,
41 .status_bit = 16,
42 .clkr.hw.init = &(struct clk_init_data){
43 .name = "pll4",
44 .parent_names = (const char *[]){ "pxo" },
45 .num_parents = 1,
46 .ops = &clk_pll_ops,
47 },
48};
49
50static const struct pll_config pll4_config = {
51 .l = 0xf,
52 .m = 0x91,
53 .n = 0xc7,
54 .vco_val = 0x0,
55 .vco_mask = BIT(17) | BIT(16),
56 .pre_div_val = 0x0,
57 .pre_div_mask = BIT(19),
58 .post_div_val = 0x0,
59 .post_div_mask = BIT(21) | BIT(20),
60 .mn_ena_mask = BIT(22),
61 .main_output_mask = BIT(23),
62};
63
64#define P_PXO 0
65#define P_PLL4 1
66
67static const u8 lcc_pxo_pll4_map[] = {
68 [P_PXO] = 0,
69 [P_PLL4] = 2,
70};
71
72static const char *lcc_pxo_pll4[] = {
73 "pxo",
74 "pll4_vote",
75};
76
77static struct freq_tbl clk_tbl_aif_mi2s[] = {
78 { 1024000, P_PLL4, 4, 1, 96 },
79 { 1411200, P_PLL4, 4, 2, 139 },
80 { 1536000, P_PLL4, 4, 1, 64 },
81 { 2048000, P_PLL4, 4, 1, 48 },
82 { 2116800, P_PLL4, 4, 2, 93 },
83 { 2304000, P_PLL4, 4, 2, 85 },
84 { 2822400, P_PLL4, 4, 6, 209 },
85 { 3072000, P_PLL4, 4, 1, 32 },
86 { 3175200, P_PLL4, 4, 1, 31 },
87 { 4096000, P_PLL4, 4, 1, 24 },
88 { 4233600, P_PLL4, 4, 9, 209 },
89 { 4608000, P_PLL4, 4, 3, 64 },
90 { 5644800, P_PLL4, 4, 12, 209 },
91 { 6144000, P_PLL4, 4, 1, 16 },
92 { 6350400, P_PLL4, 4, 2, 31 },
93 { 8192000, P_PLL4, 4, 1, 12 },
94 { 8467200, P_PLL4, 4, 18, 209 },
95 { 9216000, P_PLL4, 4, 3, 32 },
96 { 11289600, P_PLL4, 4, 24, 209 },
97 { 12288000, P_PLL4, 4, 1, 8 },
98 { 12700800, P_PLL4, 4, 27, 209 },
99 { 13824000, P_PLL4, 4, 9, 64 },
100 { 16384000, P_PLL4, 4, 1, 6 },
101 { 16934400, P_PLL4, 4, 41, 238 },
102 { 18432000, P_PLL4, 4, 3, 16 },
103 { 22579200, P_PLL4, 2, 24, 209 },
104 { 24576000, P_PLL4, 4, 1, 4 },
105 { 27648000, P_PLL4, 4, 9, 32 },
106 { 33868800, P_PLL4, 4, 41, 119 },
107 { 36864000, P_PLL4, 4, 3, 8 },
108 { 45158400, P_PLL4, 1, 24, 209 },
109 { 49152000, P_PLL4, 4, 1, 2 },
110 { 50803200, P_PLL4, 1, 27, 209 },
111 { }
112};
113
114static struct clk_rcg mi2s_osr_src = {
115 .ns_reg = 0x48,
116 .md_reg = 0x4c,
117 .mn = {
118 .mnctr_en_bit = 8,
119 .mnctr_reset_bit = 7,
120 .mnctr_mode_shift = 5,
121 .n_val_shift = 24,
122 .m_val_shift = 8,
123 .width = 8,
124 },
125 .p = {
126 .pre_div_shift = 3,
127 .pre_div_width = 2,
128 },
129 .s = {
130 .src_sel_shift = 0,
131 .parent_map = lcc_pxo_pll4_map,
132 },
133 .freq_tbl = clk_tbl_aif_mi2s,
134 .clkr = {
135 .enable_reg = 0x48,
136 .enable_mask = BIT(9),
137 .hw.init = &(struct clk_init_data){
138 .name = "mi2s_osr_src",
139 .parent_names = lcc_pxo_pll4,
140 .num_parents = 2,
141 .ops = &clk_rcg_ops,
142 .flags = CLK_SET_RATE_GATE,
143 },
144 },
145};
146
147static const char *lcc_mi2s_parents[] = {
148 "mi2s_osr_src",
149};
150
151static struct clk_branch mi2s_osr_clk = {
152 .halt_reg = 0x50,
153 .halt_bit = 1,
154 .halt_check = BRANCH_HALT_ENABLE,
155 .clkr = {
156 .enable_reg = 0x48,
157 .enable_mask = BIT(17),
158 .hw.init = &(struct clk_init_data){
159 .name = "mi2s_osr_clk",
160 .parent_names = lcc_mi2s_parents,
161 .num_parents = 1,
162 .ops = &clk_branch_ops,
163 .flags = CLK_SET_RATE_PARENT,
164 },
165 },
166};
167
168static struct clk_regmap_div mi2s_div_clk = {
169 .reg = 0x48,
170 .shift = 10,
171 .width = 4,
172 .clkr = {
173 .hw.init = &(struct clk_init_data){
174 .name = "mi2s_div_clk",
175 .parent_names = lcc_mi2s_parents,
176 .num_parents = 1,
177 .ops = &clk_regmap_div_ops,
178 },
179 },
180};
181
182static struct clk_branch mi2s_bit_div_clk = {
183 .halt_reg = 0x50,
184 .halt_bit = 0,
185 .halt_check = BRANCH_HALT_ENABLE,
186 .clkr = {
187 .enable_reg = 0x48,
188 .enable_mask = BIT(15),
189 .hw.init = &(struct clk_init_data){
190 .name = "mi2s_bit_div_clk",
191 .parent_names = (const char *[]){ "mi2s_div_clk" },
192 .num_parents = 1,
193 .ops = &clk_branch_ops,
194 .flags = CLK_SET_RATE_PARENT,
195 },
196 },
197};
198
199
200static struct clk_regmap_mux mi2s_bit_clk = {
201 .reg = 0x48,
202 .shift = 14,
203 .width = 1,
204 .clkr = {
205 .hw.init = &(struct clk_init_data){
206 .name = "mi2s_bit_clk",
207 .parent_names = (const char *[]){
208 "mi2s_bit_div_clk",
209 "mi2s_codec_clk",
210 },
211 .num_parents = 2,
212 .ops = &clk_regmap_mux_closest_ops,
213 .flags = CLK_SET_RATE_PARENT,
214 },
215 },
216};
217
218static struct freq_tbl clk_tbl_pcm[] = {
219 { 64000, P_PLL4, 4, 1, 1536 },
220 { 128000, P_PLL4, 4, 1, 768 },
221 { 256000, P_PLL4, 4, 1, 384 },
222 { 512000, P_PLL4, 4, 1, 192 },
223 { 1024000, P_PLL4, 4, 1, 96 },
224 { 2048000, P_PLL4, 4, 1, 48 },
225 { },
226};
227
228static struct clk_rcg pcm_src = {
229 .ns_reg = 0x54,
230 .md_reg = 0x58,
231 .mn = {
232 .mnctr_en_bit = 8,
233 .mnctr_reset_bit = 7,
234 .mnctr_mode_shift = 5,
235 .n_val_shift = 16,
236 .m_val_shift = 16,
237 .width = 16,
238 },
239 .p = {
240 .pre_div_shift = 3,
241 .pre_div_width = 2,
242 },
243 .s = {
244 .src_sel_shift = 0,
245 .parent_map = lcc_pxo_pll4_map,
246 },
247 .freq_tbl = clk_tbl_pcm,
248 .clkr = {
249 .enable_reg = 0x54,
250 .enable_mask = BIT(9),
251 .hw.init = &(struct clk_init_data){
252 .name = "pcm_src",
253 .parent_names = lcc_pxo_pll4,
254 .num_parents = 2,
255 .ops = &clk_rcg_ops,
256 .flags = CLK_SET_RATE_GATE,
257 },
258 },
259};
260
261static struct clk_branch pcm_clk_out = {
262 .halt_reg = 0x5c,
263 .halt_bit = 0,
264 .halt_check = BRANCH_HALT_ENABLE,
265 .clkr = {
266 .enable_reg = 0x54,
267 .enable_mask = BIT(11),
268 .hw.init = &(struct clk_init_data){
269 .name = "pcm_clk_out",
270 .parent_names = (const char *[]){ "pcm_src" },
271 .num_parents = 1,
272 .ops = &clk_branch_ops,
273 .flags = CLK_SET_RATE_PARENT,
274 },
275 },
276};
277
278static struct clk_regmap_mux pcm_clk = {
279 .reg = 0x54,
280 .shift = 10,
281 .width = 1,
282 .clkr = {
283 .hw.init = &(struct clk_init_data){
284 .name = "pcm_clk",
285 .parent_names = (const char *[]){
286 "pcm_clk_out",
287 "pcm_codec_clk",
288 },
289 .num_parents = 2,
290 .ops = &clk_regmap_mux_closest_ops,
291 .flags = CLK_SET_RATE_PARENT,
292 },
293 },
294};
295
296static struct freq_tbl clk_tbl_aif_osr[] = {
297 { 22050, P_PLL4, 1, 147, 20480 },
298 { 32000, P_PLL4, 1, 1, 96 },
299 { 44100, P_PLL4, 1, 147, 10240 },
300 { 48000, P_PLL4, 1, 1, 64 },
301 { 88200, P_PLL4, 1, 147, 5120 },
302 { 96000, P_PLL4, 1, 1, 32 },
303 { 176400, P_PLL4, 1, 147, 2560 },
304 { 192000, P_PLL4, 1, 1, 16 },
305 { },
306};
307
308static struct clk_rcg spdif_src = {
309 .ns_reg = 0xcc,
310 .md_reg = 0xd0,
311 .mn = {
312 .mnctr_en_bit = 8,
313 .mnctr_reset_bit = 7,
314 .mnctr_mode_shift = 5,
315 .n_val_shift = 16,
316 .m_val_shift = 16,
317 .width = 8,
318 },
319 .p = {
320 .pre_div_shift = 3,
321 .pre_div_width = 2,
322 },
323 .s = {
324 .src_sel_shift = 0,
325 .parent_map = lcc_pxo_pll4_map,
326 },
327 .freq_tbl = clk_tbl_aif_osr,
328 .clkr = {
329 .enable_reg = 0xcc,
330 .enable_mask = BIT(9),
331 .hw.init = &(struct clk_init_data){
332 .name = "spdif_src",
333 .parent_names = lcc_pxo_pll4,
334 .num_parents = 2,
335 .ops = &clk_rcg_ops,
336 .flags = CLK_SET_RATE_GATE,
337 },
338 },
339};
340
341static const char *lcc_spdif_parents[] = {
342 "spdif_src",
343};
344
345static struct clk_branch spdif_clk = {
346 .halt_reg = 0xd4,
347 .halt_bit = 1,
348 .halt_check = BRANCH_HALT_ENABLE,
349 .clkr = {
350 .enable_reg = 0xcc,
351 .enable_mask = BIT(12),
352 .hw.init = &(struct clk_init_data){
353 .name = "spdif_clk",
354 .parent_names = lcc_spdif_parents,
355 .num_parents = 1,
356 .ops = &clk_branch_ops,
357 .flags = CLK_SET_RATE_PARENT,
358 },
359 },
360};
361
362static struct freq_tbl clk_tbl_ahbix[] = {
363 { 131072, P_PLL4, 1, 1, 3 },
364 { },
365};
366
367static struct clk_rcg ahbix_clk = {
368 .ns_reg = 0x38,
369 .md_reg = 0x3c,
370 .mn = {
371 .mnctr_en_bit = 8,
372 .mnctr_reset_bit = 7,
373 .mnctr_mode_shift = 5,
374 .n_val_shift = 24,
375 .m_val_shift = 8,
376 .width = 8,
377 },
378 .p = {
379 .pre_div_shift = 3,
380 .pre_div_width = 2,
381 },
382 .s = {
383 .src_sel_shift = 0,
384 .parent_map = lcc_pxo_pll4_map,
385 },
386 .freq_tbl = clk_tbl_ahbix,
387 .clkr = {
388 .enable_reg = 0x38,
389 .enable_mask = BIT(10), /* toggle the gfmux to select mn/pxo */
390 .hw.init = &(struct clk_init_data){
391 .name = "ahbix",
392 .parent_names = lcc_pxo_pll4,
393 .num_parents = 2,
394 .ops = &clk_rcg_ops,
395 .flags = CLK_SET_RATE_GATE,
396 },
397 },
398};
399
400static struct clk_regmap *lcc_ipq806x_clks[] = {
401 [PLL4] = &pll4.clkr,
402 [MI2S_OSR_SRC] = &mi2s_osr_src.clkr,
403 [MI2S_OSR_CLK] = &mi2s_osr_clk.clkr,
404 [MI2S_DIV_CLK] = &mi2s_div_clk.clkr,
405 [MI2S_BIT_DIV_CLK] = &mi2s_bit_div_clk.clkr,
406 [MI2S_BIT_CLK] = &mi2s_bit_clk.clkr,
407 [PCM_SRC] = &pcm_src.clkr,
408 [PCM_CLK_OUT] = &pcm_clk_out.clkr,
409 [PCM_CLK] = &pcm_clk.clkr,
410 [SPDIF_SRC] = &spdif_src.clkr,
411 [SPDIF_CLK] = &spdif_clk.clkr,
412 [AHBIX_CLK] = &ahbix_clk.clkr,
413};
414
415static const struct regmap_config lcc_ipq806x_regmap_config = {
416 .reg_bits = 32,
417 .reg_stride = 4,
418 .val_bits = 32,
419 .max_register = 0xfc,
420 .fast_io = true,
421};
422
423static const struct qcom_cc_desc lcc_ipq806x_desc = {
424 .config = &lcc_ipq806x_regmap_config,
425 .clks = lcc_ipq806x_clks,
426 .num_clks = ARRAY_SIZE(lcc_ipq806x_clks),
427};
428
429static const struct of_device_id lcc_ipq806x_match_table[] = {
430 { .compatible = "qcom,lcc-ipq8064" },
431 { }
432};
433MODULE_DEVICE_TABLE(of, lcc_ipq806x_match_table);
434
435static int lcc_ipq806x_probe(struct platform_device *pdev)
436{
437 u32 val;
438 struct regmap *regmap;
439
440 regmap = qcom_cc_map(pdev, &lcc_ipq806x_desc);
441 if (IS_ERR(regmap))
442 return PTR_ERR(regmap);
443
444 /* Configure the rate of PLL4 if the bootloader hasn't already */
445 val = regmap_read(regmap, 0x0, &val);
446 if (!val)
447 clk_pll_configure_sr(&pll4, regmap, &pll4_config, true);
448 /* Enable PLL4 source on the LPASS Primary PLL Mux */
449 regmap_write(regmap, 0xc4, 0x1);
450
451 return qcom_cc_really_probe(pdev, &lcc_ipq806x_desc, regmap);
452}
453
454static int lcc_ipq806x_remove(struct platform_device *pdev)
455{
456 qcom_cc_remove(pdev);
457 return 0;
458}
459
460static struct platform_driver lcc_ipq806x_driver = {
461 .probe = lcc_ipq806x_probe,
462 .remove = lcc_ipq806x_remove,
463 .driver = {
464 .name = "lcc-ipq806x",
465 .owner = THIS_MODULE,
466 .of_match_table = lcc_ipq806x_match_table,
467 },
468};
469module_platform_driver(lcc_ipq806x_driver);
470
471MODULE_DESCRIPTION("QCOM LCC IPQ806x Driver");
472MODULE_LICENSE("GPL v2");
473MODULE_ALIAS("platform:lcc-ipq806x");
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
new file mode 100644
index 000000000000..a75a408cfccd
--- /dev/null
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -0,0 +1,585 @@
1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/kernel.h>
15#include <linux/bitops.h>
16#include <linux/err.h>
17#include <linux/platform_device.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/of_device.h>
21#include <linux/clk-provider.h>
22#include <linux/regmap.h>
23
24#include <dt-bindings/clock/qcom,lcc-msm8960.h>
25
26#include "common.h"
27#include "clk-regmap.h"
28#include "clk-pll.h"
29#include "clk-rcg.h"
30#include "clk-branch.h"
31#include "clk-regmap-divider.h"
32#include "clk-regmap-mux.h"
33
34static struct clk_pll pll4 = {
35 .l_reg = 0x4,
36 .m_reg = 0x8,
37 .n_reg = 0xc,
38 .config_reg = 0x14,
39 .mode_reg = 0x0,
40 .status_reg = 0x18,
41 .status_bit = 16,
42 .clkr.hw.init = &(struct clk_init_data){
43 .name = "pll4",
44 .parent_names = (const char *[]){ "pxo" },
45 .num_parents = 1,
46 .ops = &clk_pll_ops,
47 },
48};
49
50#define P_PXO 0
51#define P_PLL4 1
52
53static const u8 lcc_pxo_pll4_map[] = {
54 [P_PXO] = 0,
55 [P_PLL4] = 2,
56};
57
58static const char *lcc_pxo_pll4[] = {
59 "pxo",
60 "pll4_vote",
61};
62
63static struct freq_tbl clk_tbl_aif_osr_492[] = {
64 { 512000, P_PLL4, 4, 1, 240 },
65 { 768000, P_PLL4, 4, 1, 160 },
66 { 1024000, P_PLL4, 4, 1, 120 },
67 { 1536000, P_PLL4, 4, 1, 80 },
68 { 2048000, P_PLL4, 4, 1, 60 },
69 { 3072000, P_PLL4, 4, 1, 40 },
70 { 4096000, P_PLL4, 4, 1, 30 },
71 { 6144000, P_PLL4, 4, 1, 20 },
72 { 8192000, P_PLL4, 4, 1, 15 },
73 { 12288000, P_PLL4, 4, 1, 10 },
74 { 24576000, P_PLL4, 4, 1, 5 },
75 { 27000000, P_PXO, 1, 0, 0 },
76 { }
77};
78
79static struct freq_tbl clk_tbl_aif_osr_393[] = {
80 { 512000, P_PLL4, 4, 1, 192 },
81 { 768000, P_PLL4, 4, 1, 128 },
82 { 1024000, P_PLL4, 4, 1, 96 },
83 { 1536000, P_PLL4, 4, 1, 64 },
84 { 2048000, P_PLL4, 4, 1, 48 },
85 { 3072000, P_PLL4, 4, 1, 32 },
86 { 4096000, P_PLL4, 4, 1, 24 },
87 { 6144000, P_PLL4, 4, 1, 16 },
88 { 8192000, P_PLL4, 4, 1, 12 },
89 { 12288000, P_PLL4, 4, 1, 8 },
90 { 24576000, P_PLL4, 4, 1, 4 },
91 { 27000000, P_PXO, 1, 0, 0 },
92 { }
93};
94
95static struct clk_rcg mi2s_osr_src = {
96 .ns_reg = 0x48,
97 .md_reg = 0x4c,
98 .mn = {
99 .mnctr_en_bit = 8,
100 .mnctr_reset_bit = 7,
101 .mnctr_mode_shift = 5,
102 .n_val_shift = 24,
103 .m_val_shift = 8,
104 .width = 8,
105 },
106 .p = {
107 .pre_div_shift = 3,
108 .pre_div_width = 2,
109 },
110 .s = {
111 .src_sel_shift = 0,
112 .parent_map = lcc_pxo_pll4_map,
113 },
114 .freq_tbl = clk_tbl_aif_osr_393,
115 .clkr = {
116 .enable_reg = 0x48,
117 .enable_mask = BIT(9),
118 .hw.init = &(struct clk_init_data){
119 .name = "mi2s_osr_src",
120 .parent_names = lcc_pxo_pll4,
121 .num_parents = 2,
122 .ops = &clk_rcg_ops,
123 .flags = CLK_SET_RATE_GATE,
124 },
125 },
126};
127
128static const char *lcc_mi2s_parents[] = {
129 "mi2s_osr_src",
130};
131
132static struct clk_branch mi2s_osr_clk = {
133 .halt_reg = 0x50,
134 .halt_bit = 1,
135 .halt_check = BRANCH_HALT_ENABLE,
136 .clkr = {
137 .enable_reg = 0x48,
138 .enable_mask = BIT(17),
139 .hw.init = &(struct clk_init_data){
140 .name = "mi2s_osr_clk",
141 .parent_names = lcc_mi2s_parents,
142 .num_parents = 1,
143 .ops = &clk_branch_ops,
144 .flags = CLK_SET_RATE_PARENT,
145 },
146 },
147};
148
149static struct clk_regmap_div mi2s_div_clk = {
150 .reg = 0x48,
151 .shift = 10,
152 .width = 4,
153 .clkr = {
154 .enable_reg = 0x48,
155 .enable_mask = BIT(15),
156 .hw.init = &(struct clk_init_data){
157 .name = "mi2s_div_clk",
158 .parent_names = lcc_mi2s_parents,
159 .num_parents = 1,
160 .ops = &clk_regmap_div_ops,
161 },
162 },
163};
164
165static struct clk_branch mi2s_bit_div_clk = {
166 .halt_reg = 0x50,
167 .halt_bit = 0,
168 .halt_check = BRANCH_HALT_ENABLE,
169 .clkr = {
170 .enable_reg = 0x48,
171 .enable_mask = BIT(15),
172 .hw.init = &(struct clk_init_data){
173 .name = "mi2s_bit_div_clk",
174 .parent_names = (const char *[]){ "mi2s_div_clk" },
175 .num_parents = 1,
176 .ops = &clk_branch_ops,
177 .flags = CLK_SET_RATE_PARENT,
178 },
179 },
180};
181
182static struct clk_regmap_mux mi2s_bit_clk = {
183 .reg = 0x48,
184 .shift = 14,
185 .width = 1,
186 .clkr = {
187 .hw.init = &(struct clk_init_data){
188 .name = "mi2s_bit_clk",
189 .parent_names = (const char *[]){
190 "mi2s_bit_div_clk",
191 "mi2s_codec_clk",
192 },
193 .num_parents = 2,
194 .ops = &clk_regmap_mux_closest_ops,
195 .flags = CLK_SET_RATE_PARENT,
196 },
197 },
198};
199
200#define CLK_AIF_OSR_DIV(prefix, _ns, _md, hr) \
201static struct clk_rcg prefix##_osr_src = { \
202 .ns_reg = _ns, \
203 .md_reg = _md, \
204 .mn = { \
205 .mnctr_en_bit = 8, \
206 .mnctr_reset_bit = 7, \
207 .mnctr_mode_shift = 5, \
208 .n_val_shift = 24, \
209 .m_val_shift = 8, \
210 .width = 8, \
211 }, \
212 .p = { \
213 .pre_div_shift = 3, \
214 .pre_div_width = 2, \
215 }, \
216 .s = { \
217 .src_sel_shift = 0, \
218 .parent_map = lcc_pxo_pll4_map, \
219 }, \
220 .freq_tbl = clk_tbl_aif_osr_393, \
221 .clkr = { \
222 .enable_reg = _ns, \
223 .enable_mask = BIT(9), \
224 .hw.init = &(struct clk_init_data){ \
225 .name = #prefix "_osr_src", \
226 .parent_names = lcc_pxo_pll4, \
227 .num_parents = 2, \
228 .ops = &clk_rcg_ops, \
229 .flags = CLK_SET_RATE_GATE, \
230 }, \
231 }, \
232}; \
233 \
234static const char *lcc_##prefix##_parents[] = { \
235 #prefix "_osr_src", \
236}; \
237 \
238static struct clk_branch prefix##_osr_clk = { \
239 .halt_reg = hr, \
240 .halt_bit = 1, \
241 .halt_check = BRANCH_HALT_ENABLE, \
242 .clkr = { \
243 .enable_reg = _ns, \
244 .enable_mask = BIT(21), \
245 .hw.init = &(struct clk_init_data){ \
246 .name = #prefix "_osr_clk", \
247 .parent_names = lcc_##prefix##_parents, \
248 .num_parents = 1, \
249 .ops = &clk_branch_ops, \
250 .flags = CLK_SET_RATE_PARENT, \
251 }, \
252 }, \
253}; \
254 \
255static struct clk_regmap_div prefix##_div_clk = { \
256 .reg = _ns, \
257 .shift = 10, \
258 .width = 8, \
259 .clkr = { \
260 .hw.init = &(struct clk_init_data){ \
261 .name = #prefix "_div_clk", \
262 .parent_names = lcc_##prefix##_parents, \
263 .num_parents = 1, \
264 .ops = &clk_regmap_div_ops, \
265 }, \
266 }, \
267}; \
268 \
269static struct clk_branch prefix##_bit_div_clk = { \
270 .halt_reg = hr, \
271 .halt_bit = 0, \
272 .halt_check = BRANCH_HALT_ENABLE, \
273 .clkr = { \
274 .enable_reg = _ns, \
275 .enable_mask = BIT(19), \
276 .hw.init = &(struct clk_init_data){ \
277 .name = #prefix "_bit_div_clk", \
278 .parent_names = (const char *[]){ \
279 #prefix "_div_clk" \
280 }, \
281 .num_parents = 1, \
282 .ops = &clk_branch_ops, \
283 .flags = CLK_SET_RATE_PARENT, \
284 }, \
285 }, \
286}; \
287 \
288static struct clk_regmap_mux prefix##_bit_clk = { \
289 .reg = _ns, \
290 .shift = 18, \
291 .width = 1, \
292 .clkr = { \
293 .hw.init = &(struct clk_init_data){ \
294 .name = #prefix "_bit_clk", \
295 .parent_names = (const char *[]){ \
296 #prefix "_bit_div_clk", \
297 #prefix "_codec_clk", \
298 }, \
299 .num_parents = 2, \
300 .ops = &clk_regmap_mux_closest_ops, \
301 .flags = CLK_SET_RATE_PARENT, \
302 }, \
303 }, \
304}
305
306CLK_AIF_OSR_DIV(codec_i2s_mic, 0x60, 0x64, 0x68);
307CLK_AIF_OSR_DIV(spare_i2s_mic, 0x78, 0x7c, 0x80);
308CLK_AIF_OSR_DIV(codec_i2s_spkr, 0x6c, 0x70, 0x74);
309CLK_AIF_OSR_DIV(spare_i2s_spkr, 0x84, 0x88, 0x8c);
310
311static struct freq_tbl clk_tbl_pcm_492[] = {
312 { 256000, P_PLL4, 4, 1, 480 },
313 { 512000, P_PLL4, 4, 1, 240 },
314 { 768000, P_PLL4, 4, 1, 160 },
315 { 1024000, P_PLL4, 4, 1, 120 },
316 { 1536000, P_PLL4, 4, 1, 80 },
317 { 2048000, P_PLL4, 4, 1, 60 },
318 { 3072000, P_PLL4, 4, 1, 40 },
319 { 4096000, P_PLL4, 4, 1, 30 },
320 { 6144000, P_PLL4, 4, 1, 20 },
321 { 8192000, P_PLL4, 4, 1, 15 },
322 { 12288000, P_PLL4, 4, 1, 10 },
323 { 24576000, P_PLL4, 4, 1, 5 },
324 { 27000000, P_PXO, 1, 0, 0 },
325 { }
326};
327
328static struct freq_tbl clk_tbl_pcm_393[] = {
329 { 256000, P_PLL4, 4, 1, 384 },
330 { 512000, P_PLL4, 4, 1, 192 },
331 { 768000, P_PLL4, 4, 1, 128 },
332 { 1024000, P_PLL4, 4, 1, 96 },
333 { 1536000, P_PLL4, 4, 1, 64 },
334 { 2048000, P_PLL4, 4, 1, 48 },
335 { 3072000, P_PLL4, 4, 1, 32 },
336 { 4096000, P_PLL4, 4, 1, 24 },
337 { 6144000, P_PLL4, 4, 1, 16 },
338 { 8192000, P_PLL4, 4, 1, 12 },
339 { 12288000, P_PLL4, 4, 1, 8 },
340 { 24576000, P_PLL4, 4, 1, 4 },
341 { 27000000, P_PXO, 1, 0, 0 },
342 { }
343};
344
345static struct clk_rcg pcm_src = {
346 .ns_reg = 0x54,
347 .md_reg = 0x58,
348 .mn = {
349 .mnctr_en_bit = 8,
350 .mnctr_reset_bit = 7,
351 .mnctr_mode_shift = 5,
352 .n_val_shift = 16,
353 .m_val_shift = 16,
354 .width = 16,
355 },
356 .p = {
357 .pre_div_shift = 3,
358 .pre_div_width = 2,
359 },
360 .s = {
361 .src_sel_shift = 0,
362 .parent_map = lcc_pxo_pll4_map,
363 },
364 .freq_tbl = clk_tbl_pcm_393,
365 .clkr = {
366 .enable_reg = 0x54,
367 .enable_mask = BIT(9),
368 .hw.init = &(struct clk_init_data){
369 .name = "pcm_src",
370 .parent_names = lcc_pxo_pll4,
371 .num_parents = 2,
372 .ops = &clk_rcg_ops,
373 .flags = CLK_SET_RATE_GATE,
374 },
375 },
376};
377
378static struct clk_branch pcm_clk_out = {
379 .halt_reg = 0x5c,
380 .halt_bit = 0,
381 .halt_check = BRANCH_HALT_ENABLE,
382 .clkr = {
383 .enable_reg = 0x54,
384 .enable_mask = BIT(11),
385 .hw.init = &(struct clk_init_data){
386 .name = "pcm_clk_out",
387 .parent_names = (const char *[]){ "pcm_src" },
388 .num_parents = 1,
389 .ops = &clk_branch_ops,
390 .flags = CLK_SET_RATE_PARENT,
391 },
392 },
393};
394
395static struct clk_regmap_mux pcm_clk = {
396 .reg = 0x54,
397 .shift = 10,
398 .width = 1,
399 .clkr = {
400 .hw.init = &(struct clk_init_data){
401 .name = "pcm_clk",
402 .parent_names = (const char *[]){
403 "pcm_clk_out",
404 "pcm_codec_clk",
405 },
406 .num_parents = 2,
407 .ops = &clk_regmap_mux_closest_ops,
408 .flags = CLK_SET_RATE_PARENT,
409 },
410 },
411};
412
413static struct clk_rcg slimbus_src = {
414 .ns_reg = 0xcc,
415 .md_reg = 0xd0,
416 .mn = {
417 .mnctr_en_bit = 8,
418 .mnctr_reset_bit = 7,
419 .mnctr_mode_shift = 5,
420 .n_val_shift = 16,
421 .m_val_shift = 16,
422 .width = 8,
423 },
424 .p = {
425 .pre_div_shift = 3,
426 .pre_div_width = 2,
427 },
428 .s = {
429 .src_sel_shift = 0,
430 .parent_map = lcc_pxo_pll4_map,
431 },
432 .freq_tbl = clk_tbl_aif_osr_393,
433 .clkr = {
434 .enable_reg = 0xcc,
435 .enable_mask = BIT(9),
436 .hw.init = &(struct clk_init_data){
437 .name = "slimbus_src",
438 .parent_names = lcc_pxo_pll4,
439 .num_parents = 2,
440 .ops = &clk_rcg_ops,
441 .flags = CLK_SET_RATE_GATE,
442 },
443 },
444};
445
446static const char *lcc_slimbus_parents[] = {
447 "slimbus_src",
448};
449
450static struct clk_branch audio_slimbus_clk = {
451 .halt_reg = 0xd4,
452 .halt_bit = 0,
453 .halt_check = BRANCH_HALT_ENABLE,
454 .clkr = {
455 .enable_reg = 0xcc,
456 .enable_mask = BIT(10),
457 .hw.init = &(struct clk_init_data){
458 .name = "audio_slimbus_clk",
459 .parent_names = lcc_slimbus_parents,
460 .num_parents = 1,
461 .ops = &clk_branch_ops,
462 .flags = CLK_SET_RATE_PARENT,
463 },
464 },
465};
466
467static struct clk_branch sps_slimbus_clk = {
468 .halt_reg = 0xd4,
469 .halt_bit = 1,
470 .halt_check = BRANCH_HALT_ENABLE,
471 .clkr = {
472 .enable_reg = 0xcc,
473 .enable_mask = BIT(12),
474 .hw.init = &(struct clk_init_data){
475 .name = "sps_slimbus_clk",
476 .parent_names = lcc_slimbus_parents,
477 .num_parents = 1,
478 .ops = &clk_branch_ops,
479 .flags = CLK_SET_RATE_PARENT,
480 },
481 },
482};
483
484static struct clk_regmap *lcc_msm8960_clks[] = {
485 [PLL4] = &pll4.clkr,
486 [MI2S_OSR_SRC] = &mi2s_osr_src.clkr,
487 [MI2S_OSR_CLK] = &mi2s_osr_clk.clkr,
488 [MI2S_DIV_CLK] = &mi2s_div_clk.clkr,
489 [MI2S_BIT_DIV_CLK] = &mi2s_bit_div_clk.clkr,
490 [MI2S_BIT_CLK] = &mi2s_bit_clk.clkr,
491 [PCM_SRC] = &pcm_src.clkr,
492 [PCM_CLK_OUT] = &pcm_clk_out.clkr,
493 [PCM_CLK] = &pcm_clk.clkr,
494 [SLIMBUS_SRC] = &slimbus_src.clkr,
495 [AUDIO_SLIMBUS_CLK] = &audio_slimbus_clk.clkr,
496 [SPS_SLIMBUS_CLK] = &sps_slimbus_clk.clkr,
497 [CODEC_I2S_MIC_OSR_SRC] = &codec_i2s_mic_osr_src.clkr,
498 [CODEC_I2S_MIC_OSR_CLK] = &codec_i2s_mic_osr_clk.clkr,
499 [CODEC_I2S_MIC_DIV_CLK] = &codec_i2s_mic_div_clk.clkr,
500 [CODEC_I2S_MIC_BIT_DIV_CLK] = &codec_i2s_mic_bit_div_clk.clkr,
501 [CODEC_I2S_MIC_BIT_CLK] = &codec_i2s_mic_bit_clk.clkr,
502 [SPARE_I2S_MIC_OSR_SRC] = &spare_i2s_mic_osr_src.clkr,
503 [SPARE_I2S_MIC_OSR_CLK] = &spare_i2s_mic_osr_clk.clkr,
504 [SPARE_I2S_MIC_DIV_CLK] = &spare_i2s_mic_div_clk.clkr,
505 [SPARE_I2S_MIC_BIT_DIV_CLK] = &spare_i2s_mic_bit_div_clk.clkr,
506 [SPARE_I2S_MIC_BIT_CLK] = &spare_i2s_mic_bit_clk.clkr,
507 [CODEC_I2S_SPKR_OSR_SRC] = &codec_i2s_spkr_osr_src.clkr,
508 [CODEC_I2S_SPKR_OSR_CLK] = &codec_i2s_spkr_osr_clk.clkr,
509 [CODEC_I2S_SPKR_DIV_CLK] = &codec_i2s_spkr_div_clk.clkr,
510 [CODEC_I2S_SPKR_BIT_DIV_CLK] = &codec_i2s_spkr_bit_div_clk.clkr,
511 [CODEC_I2S_SPKR_BIT_CLK] = &codec_i2s_spkr_bit_clk.clkr,
512 [SPARE_I2S_SPKR_OSR_SRC] = &spare_i2s_spkr_osr_src.clkr,
513 [SPARE_I2S_SPKR_OSR_CLK] = &spare_i2s_spkr_osr_clk.clkr,
514 [SPARE_I2S_SPKR_DIV_CLK] = &spare_i2s_spkr_div_clk.clkr,
515 [SPARE_I2S_SPKR_BIT_DIV_CLK] = &spare_i2s_spkr_bit_div_clk.clkr,
516 [SPARE_I2S_SPKR_BIT_CLK] = &spare_i2s_spkr_bit_clk.clkr,
517};
518
519static const struct regmap_config lcc_msm8960_regmap_config = {
520 .reg_bits = 32,
521 .reg_stride = 4,
522 .val_bits = 32,
523 .max_register = 0xfc,
524 .fast_io = true,
525};
526
527static const struct qcom_cc_desc lcc_msm8960_desc = {
528 .config = &lcc_msm8960_regmap_config,
529 .clks = lcc_msm8960_clks,
530 .num_clks = ARRAY_SIZE(lcc_msm8960_clks),
531};
532
533static const struct of_device_id lcc_msm8960_match_table[] = {
534 { .compatible = "qcom,lcc-msm8960" },
535 { .compatible = "qcom,lcc-apq8064" },
536 { }
537};
538MODULE_DEVICE_TABLE(of, lcc_msm8960_match_table);
539
540static int lcc_msm8960_probe(struct platform_device *pdev)
541{
542 u32 val;
543 struct regmap *regmap;
544
545 regmap = qcom_cc_map(pdev, &lcc_msm8960_desc);
546 if (IS_ERR(regmap))
547 return PTR_ERR(regmap);
548
549 /* Use the correct frequency plan depending on speed of PLL4 */
550 val = regmap_read(regmap, 0x4, &val);
551 if (val == 0x12) {
552 slimbus_src.freq_tbl = clk_tbl_aif_osr_492;
553 mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492;
554 codec_i2s_mic_osr_src.freq_tbl = clk_tbl_aif_osr_492;
555 spare_i2s_mic_osr_src.freq_tbl = clk_tbl_aif_osr_492;
556 codec_i2s_spkr_osr_src.freq_tbl = clk_tbl_aif_osr_492;
557 spare_i2s_spkr_osr_src.freq_tbl = clk_tbl_aif_osr_492;
558 pcm_src.freq_tbl = clk_tbl_pcm_492;
559 }
560 /* Enable PLL4 source on the LPASS Primary PLL Mux */
561 regmap_write(regmap, 0xc4, 0x1);
562
563 return qcom_cc_really_probe(pdev, &lcc_msm8960_desc, regmap);
564}
565
566static int lcc_msm8960_remove(struct platform_device *pdev)
567{
568 qcom_cc_remove(pdev);
569 return 0;
570}
571
572static struct platform_driver lcc_msm8960_driver = {
573 .probe = lcc_msm8960_probe,
574 .remove = lcc_msm8960_remove,
575 .driver = {
576 .name = "lcc-msm8960",
577 .owner = THIS_MODULE,
578 .of_match_table = lcc_msm8960_match_table,
579 },
580};
581module_platform_driver(lcc_msm8960_driver);
582
583MODULE_DESCRIPTION("QCOM LCC MSM8960 Driver");
584MODULE_LICENSE("GPL v2");
585MODULE_ALIAS("platform:lcc-msm8960");
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index cbcddcc02475..05d7a0bc0599 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -535,44 +535,44 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
535 COMPOSITE(0, "uart0_src", mux_pll_src_cpll_gll_usb_npll_p, 0, 535 COMPOSITE(0, "uart0_src", mux_pll_src_cpll_gll_usb_npll_p, 0,
536 RK3288_CLKSEL_CON(13), 13, 2, MFLAGS, 0, 7, DFLAGS, 536 RK3288_CLKSEL_CON(13), 13, 2, MFLAGS, 0, 7, DFLAGS,
537 RK3288_CLKGATE_CON(1), 8, GFLAGS), 537 RK3288_CLKGATE_CON(1), 8, GFLAGS),
538 COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", 0, 538 COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
539 RK3288_CLKSEL_CON(17), 0, 539 RK3288_CLKSEL_CON(17), 0,
540 RK3288_CLKGATE_CON(1), 9, GFLAGS), 540 RK3288_CLKGATE_CON(1), 9, GFLAGS),
541 MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, 0, 541 MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT,
542 RK3288_CLKSEL_CON(13), 8, 2, MFLAGS), 542 RK3288_CLKSEL_CON(13), 8, 2, MFLAGS),
543 MUX(0, "uart_src", mux_pll_src_cpll_gpll_p, 0, 543 MUX(0, "uart_src", mux_pll_src_cpll_gpll_p, 0,
544 RK3288_CLKSEL_CON(13), 15, 1, MFLAGS), 544 RK3288_CLKSEL_CON(13), 15, 1, MFLAGS),
545 COMPOSITE_NOMUX(0, "uart1_src", "uart_src", 0, 545 COMPOSITE_NOMUX(0, "uart1_src", "uart_src", 0,
546 RK3288_CLKSEL_CON(14), 0, 7, DFLAGS, 546 RK3288_CLKSEL_CON(14), 0, 7, DFLAGS,
547 RK3288_CLKGATE_CON(1), 10, GFLAGS), 547 RK3288_CLKGATE_CON(1), 10, GFLAGS),
548 COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", 0, 548 COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT,
549 RK3288_CLKSEL_CON(18), 0, 549 RK3288_CLKSEL_CON(18), 0,
550 RK3288_CLKGATE_CON(1), 11, GFLAGS), 550 RK3288_CLKGATE_CON(1), 11, GFLAGS),
551 MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, 0, 551 MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT,
552 RK3288_CLKSEL_CON(14), 8, 2, MFLAGS), 552 RK3288_CLKSEL_CON(14), 8, 2, MFLAGS),
553 COMPOSITE_NOMUX(0, "uart2_src", "uart_src", 0, 553 COMPOSITE_NOMUX(0, "uart2_src", "uart_src", 0,
554 RK3288_CLKSEL_CON(15), 0, 7, DFLAGS, 554 RK3288_CLKSEL_CON(15), 0, 7, DFLAGS,
555 RK3288_CLKGATE_CON(1), 12, GFLAGS), 555 RK3288_CLKGATE_CON(1), 12, GFLAGS),
556 COMPOSITE_FRAC(0, "uart2_frac", "uart2_src", 0, 556 COMPOSITE_FRAC(0, "uart2_frac", "uart2_src", CLK_SET_RATE_PARENT,
557 RK3288_CLKSEL_CON(19), 0, 557 RK3288_CLKSEL_CON(19), 0,
558 RK3288_CLKGATE_CON(1), 13, GFLAGS), 558 RK3288_CLKGATE_CON(1), 13, GFLAGS),
559 MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, 0, 559 MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT,
560 RK3288_CLKSEL_CON(15), 8, 2, MFLAGS), 560 RK3288_CLKSEL_CON(15), 8, 2, MFLAGS),
561 COMPOSITE_NOMUX(0, "uart3_src", "uart_src", 0, 561 COMPOSITE_NOMUX(0, "uart3_src", "uart_src", 0,
562 RK3288_CLKSEL_CON(16), 0, 7, DFLAGS, 562 RK3288_CLKSEL_CON(16), 0, 7, DFLAGS,
563 RK3288_CLKGATE_CON(1), 14, GFLAGS), 563 RK3288_CLKGATE_CON(1), 14, GFLAGS),
564 COMPOSITE_FRAC(0, "uart3_frac", "uart3_src", 0, 564 COMPOSITE_FRAC(0, "uart3_frac", "uart3_src", CLK_SET_RATE_PARENT,
565 RK3288_CLKSEL_CON(20), 0, 565 RK3288_CLKSEL_CON(20), 0,
566 RK3288_CLKGATE_CON(1), 15, GFLAGS), 566 RK3288_CLKGATE_CON(1), 15, GFLAGS),
567 MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, 0, 567 MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, CLK_SET_RATE_PARENT,
568 RK3288_CLKSEL_CON(16), 8, 2, MFLAGS), 568 RK3288_CLKSEL_CON(16), 8, 2, MFLAGS),
569 COMPOSITE_NOMUX(0, "uart4_src", "uart_src", 0, 569 COMPOSITE_NOMUX(0, "uart4_src", "uart_src", 0,
570 RK3288_CLKSEL_CON(3), 0, 7, DFLAGS, 570 RK3288_CLKSEL_CON(3), 0, 7, DFLAGS,
571 RK3288_CLKGATE_CON(2), 12, GFLAGS), 571 RK3288_CLKGATE_CON(2), 12, GFLAGS),
572 COMPOSITE_FRAC(0, "uart4_frac", "uart4_src", 0, 572 COMPOSITE_FRAC(0, "uart4_frac", "uart4_src", CLK_SET_RATE_PARENT,
573 RK3288_CLKSEL_CON(7), 0, 573 RK3288_CLKSEL_CON(7), 0,
574 RK3288_CLKGATE_CON(2), 13, GFLAGS), 574 RK3288_CLKGATE_CON(2), 13, GFLAGS),
575 MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, 0, 575 MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, CLK_SET_RATE_PARENT,
576 RK3288_CLKSEL_CON(3), 8, 2, MFLAGS), 576 RK3288_CLKSEL_CON(3), 8, 2, MFLAGS),
577 577
578 COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0, 578 COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0,
@@ -598,7 +598,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
598 GATE(0, "jtag", "ext_jtag", 0, 598 GATE(0, "jtag", "ext_jtag", 0,
599 RK3288_CLKGATE_CON(4), 14, GFLAGS), 599 RK3288_CLKGATE_CON(4), 14, GFLAGS),
600 600
601 COMPOSITE_NODIV(0, "usbphy480m_src", mux_usbphy480m_p, 0, 601 COMPOSITE_NODIV(SCLK_USBPHY480M_SRC, "usbphy480m_src", mux_usbphy480m_p, 0,
602 RK3288_CLKSEL_CON(13), 11, 2, MFLAGS, 602 RK3288_CLKSEL_CON(13), 11, 2, MFLAGS,
603 RK3288_CLKGATE_CON(5), 14, GFLAGS), 603 RK3288_CLKGATE_CON(5), 14, GFLAGS),
604 COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0, 604 COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
@@ -704,8 +704,8 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
704 704
705 GATE(SCLK_LCDC_PWM0, "sclk_lcdc_pwm0", "xin24m", 0, RK3288_CLKGATE_CON(13), 10, GFLAGS), 705 GATE(SCLK_LCDC_PWM0, "sclk_lcdc_pwm0", "xin24m", 0, RK3288_CLKGATE_CON(13), 10, GFLAGS),
706 GATE(SCLK_LCDC_PWM1, "sclk_lcdc_pwm1", "xin24m", 0, RK3288_CLKGATE_CON(13), 11, GFLAGS), 706 GATE(SCLK_LCDC_PWM1, "sclk_lcdc_pwm1", "xin24m", 0, RK3288_CLKGATE_CON(13), 11, GFLAGS),
707 GATE(0, "sclk_pvtm_core", "xin24m", 0, RK3288_CLKGATE_CON(5), 9, GFLAGS), 707 GATE(SCLK_PVTM_CORE, "sclk_pvtm_core", "xin24m", 0, RK3288_CLKGATE_CON(5), 9, GFLAGS),
708 GATE(0, "sclk_pvtm_gpu", "xin24m", 0, RK3288_CLKGATE_CON(5), 10, GFLAGS), 708 GATE(SCLK_PVTM_GPU, "sclk_pvtm_gpu", "xin24m", 0, RK3288_CLKGATE_CON(5), 10, GFLAGS),
709 GATE(0, "sclk_mipidsi_24m", "xin24m", 0, RK3288_CLKGATE_CON(5), 15, GFLAGS), 709 GATE(0, "sclk_mipidsi_24m", "xin24m", 0, RK3288_CLKGATE_CON(5), 15, GFLAGS),
710 710
711 /* sclk_gpu gates */ 711 /* sclk_gpu gates */
@@ -805,6 +805,20 @@ static int rk3288_clk_suspend(void)
805 rk3288_saved_cru_regs[i] = 805 rk3288_saved_cru_regs[i] =
806 readl_relaxed(rk3288_cru_base + reg_id); 806 readl_relaxed(rk3288_cru_base + reg_id);
807 } 807 }
808
809 /*
810 * Switch PLLs other than DPLL (for SDRAM) to slow mode to
811 * avoid crashes on resume. The Mask ROM on the system will
812 * put APLL, CPLL, and GPLL into slow mode at resume time
813 * anyway (which is why we restore them), but we might not
814 * even make it to the Mask ROM if this isn't done at suspend
815 * time.
816 *
817 * NOTE: only APLL truly matters here, but we'll do them all.
818 */
819
820 writel_relaxed(0xf3030000, rk3288_cru_base + RK3288_MODE_CON);
821
808 return 0; 822 return 0;
809} 823}
810 824
@@ -866,6 +880,14 @@ static void __init rk3288_clk_init(struct device_node *np)
866 pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n", 880 pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n",
867 __func__, PTR_ERR(clk)); 881 __func__, PTR_ERR(clk));
868 882
883 /* Watchdog pclk is controlled by RK3288_SGRF_SOC_CON0[1]. */
884 clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1);
885 if (IS_ERR(clk))
886 pr_warn("%s: could not register clock pclk_wdt: %ld\n",
887 __func__, PTR_ERR(clk));
888 else
889 rockchip_clk_add_lookup(clk, PCLK_WDT);
890
869 rockchip_clk_register_plls(rk3288_pll_clks, 891 rockchip_clk_register_plls(rk3288_pll_clks,
870 ARRAY_SIZE(rk3288_pll_clks), 892 ARRAY_SIZE(rk3288_pll_clks),
871 RK3288_GRF_SOC_STATUS1); 893 RK3288_GRF_SOC_STATUS1);
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index f2c2ccce49bb..454b02ae486a 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -82,6 +82,26 @@ static const struct of_device_id exynos_audss_clk_of_match[] = {
82 {}, 82 {},
83}; 83};
84 84
85static void exynos_audss_clk_teardown(void)
86{
87 int i;
88
89 for (i = EXYNOS_MOUT_AUDSS; i < EXYNOS_DOUT_SRP; i++) {
90 if (!IS_ERR(clk_table[i]))
91 clk_unregister_mux(clk_table[i]);
92 }
93
94 for (; i < EXYNOS_SRP_CLK; i++) {
95 if (!IS_ERR(clk_table[i]))
96 clk_unregister_divider(clk_table[i]);
97 }
98
99 for (; i < clk_data.clk_num; i++) {
100 if (!IS_ERR(clk_table[i]))
101 clk_unregister_gate(clk_table[i]);
102 }
103}
104
85/* register exynos_audss clocks */ 105/* register exynos_audss clocks */
86static int exynos_audss_clk_probe(struct platform_device *pdev) 106static int exynos_audss_clk_probe(struct platform_device *pdev)
87{ 107{
@@ -219,10 +239,7 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
219 return 0; 239 return 0;
220 240
221unregister: 241unregister:
222 for (i = 0; i < clk_data.clk_num; i++) { 242 exynos_audss_clk_teardown();
223 if (!IS_ERR(clk_table[i]))
224 clk_unregister(clk_table[i]);
225 }
226 243
227 if (!IS_ERR(epll)) 244 if (!IS_ERR(epll))
228 clk_disable_unprepare(epll); 245 clk_disable_unprepare(epll);
@@ -232,18 +249,13 @@ unregister:
232 249
233static int exynos_audss_clk_remove(struct platform_device *pdev) 250static int exynos_audss_clk_remove(struct platform_device *pdev)
234{ 251{
235 int i;
236
237#ifdef CONFIG_PM_SLEEP 252#ifdef CONFIG_PM_SLEEP
238 unregister_syscore_ops(&exynos_audss_clk_syscore_ops); 253 unregister_syscore_ops(&exynos_audss_clk_syscore_ops);
239#endif 254#endif
240 255
241 of_clk_del_provider(pdev->dev.of_node); 256 of_clk_del_provider(pdev->dev.of_node);
242 257
243 for (i = 0; i < clk_data.clk_num; i++) { 258 exynos_audss_clk_teardown();
244 if (!IS_ERR(clk_table[i]))
245 clk_unregister(clk_table[i]);
246 }
247 259
248 if (!IS_ERR(epll)) 260 if (!IS_ERR(epll))
249 clk_disable_unprepare(epll); 261 clk_disable_unprepare(epll);
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index 6e6cca392082..cc4c348d8a24 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -104,27 +104,6 @@
104#define PWR_CTRL1_USE_CORE1_WFI (1 << 1) 104#define PWR_CTRL1_USE_CORE1_WFI (1 << 1)
105#define PWR_CTRL1_USE_CORE0_WFI (1 << 0) 105#define PWR_CTRL1_USE_CORE0_WFI (1 << 0)
106 106
107/* list of PLLs to be registered */
108enum exynos3250_plls {
109 apll, mpll, vpll, upll,
110 nr_plls
111};
112
113/* list of PLLs in DMC block to be registered */
114enum exynos3250_dmc_plls {
115 bpll, epll,
116 nr_dmc_plls
117};
118
119static void __iomem *reg_base;
120static void __iomem *dmc_reg_base;
121
122/*
123 * Support for CMU save/restore across system suspends
124 */
125#ifdef CONFIG_PM_SLEEP
126static struct samsung_clk_reg_dump *exynos3250_clk_regs;
127
128static unsigned long exynos3250_cmu_clk_regs[] __initdata = { 107static unsigned long exynos3250_cmu_clk_regs[] __initdata = {
129 SRC_LEFTBUS, 108 SRC_LEFTBUS,
130 DIV_LEFTBUS, 109 DIV_LEFTBUS,
@@ -195,43 +174,6 @@ static unsigned long exynos3250_cmu_clk_regs[] __initdata = {
195 PWR_CTRL2, 174 PWR_CTRL2,
196}; 175};
197 176
198static int exynos3250_clk_suspend(void)
199{
200 samsung_clk_save(reg_base, exynos3250_clk_regs,
201 ARRAY_SIZE(exynos3250_cmu_clk_regs));
202 return 0;
203}
204
205static void exynos3250_clk_resume(void)
206{
207 samsung_clk_restore(reg_base, exynos3250_clk_regs,
208 ARRAY_SIZE(exynos3250_cmu_clk_regs));
209}
210
211static struct syscore_ops exynos3250_clk_syscore_ops = {
212 .suspend = exynos3250_clk_suspend,
213 .resume = exynos3250_clk_resume,
214};
215
216static void exynos3250_clk_sleep_init(void)
217{
218 exynos3250_clk_regs =
219 samsung_clk_alloc_reg_dump(exynos3250_cmu_clk_regs,
220 ARRAY_SIZE(exynos3250_cmu_clk_regs));
221 if (!exynos3250_clk_regs) {
222 pr_warn("%s: Failed to allocate sleep save data\n", __func__);
223 goto err;
224 }
225
226 register_syscore_ops(&exynos3250_clk_syscore_ops);
227 return;
228err:
229 kfree(exynos3250_clk_regs);
230}
231#else
232static inline void exynos3250_clk_sleep_init(void) { }
233#endif
234
235/* list of all parent clock list */ 177/* list of all parent clock list */
236PNAME(mout_vpllsrc_p) = { "fin_pll", }; 178PNAME(mout_vpllsrc_p) = { "fin_pll", };
237 179
@@ -782,18 +724,18 @@ static struct samsung_pll_rate_table exynos3250_vpll_rates[] = {
782 { /* sentinel */ } 724 { /* sentinel */ }
783}; 725};
784 726
785static struct samsung_pll_clock exynos3250_plls[nr_plls] __initdata = { 727static struct samsung_pll_clock exynos3250_plls[] __initdata = {
786 [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll", 728 PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
787 APLL_LOCK, APLL_CON0, NULL), 729 APLL_LOCK, APLL_CON0, exynos3250_pll_rates),
788 [mpll] = PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll", 730 PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
789 MPLL_LOCK, MPLL_CON0, NULL), 731 MPLL_LOCK, MPLL_CON0, exynos3250_pll_rates),
790 [vpll] = PLL(pll_36xx, CLK_FOUT_VPLL, "fout_vpll", "fin_pll", 732 PLL(pll_36xx, CLK_FOUT_VPLL, "fout_vpll", "fin_pll",
791 VPLL_LOCK, VPLL_CON0, NULL), 733 VPLL_LOCK, VPLL_CON0, exynos3250_vpll_rates),
792 [upll] = PLL(pll_35xx, CLK_FOUT_UPLL, "fout_upll", "fin_pll", 734 PLL(pll_35xx, CLK_FOUT_UPLL, "fout_upll", "fin_pll",
793 UPLL_LOCK, UPLL_CON0, NULL), 735 UPLL_LOCK, UPLL_CON0, exynos3250_pll_rates),
794}; 736};
795 737
796static void __init exynos3_core_down_clock(void) 738static void __init exynos3_core_down_clock(void __iomem *reg_base)
797{ 739{
798 unsigned int tmp; 740 unsigned int tmp;
799 741
@@ -814,38 +756,31 @@ static void __init exynos3_core_down_clock(void)
814 __raw_writel(0x0, reg_base + PWR_CTRL2); 756 __raw_writel(0x0, reg_base + PWR_CTRL2);
815} 757}
816 758
759static struct samsung_cmu_info cmu_info __initdata = {
760 .pll_clks = exynos3250_plls,
761 .nr_pll_clks = ARRAY_SIZE(exynos3250_plls),
762 .mux_clks = mux_clks,
763 .nr_mux_clks = ARRAY_SIZE(mux_clks),
764 .div_clks = div_clks,
765 .nr_div_clks = ARRAY_SIZE(div_clks),
766 .gate_clks = gate_clks,
767 .nr_gate_clks = ARRAY_SIZE(gate_clks),
768 .fixed_factor_clks = fixed_factor_clks,
769 .nr_fixed_factor_clks = ARRAY_SIZE(fixed_factor_clks),
770 .nr_clk_ids = CLK_NR_CLKS,
771 .clk_regs = exynos3250_cmu_clk_regs,
772 .nr_clk_regs = ARRAY_SIZE(exynos3250_cmu_clk_regs),
773};
774
817static void __init exynos3250_cmu_init(struct device_node *np) 775static void __init exynos3250_cmu_init(struct device_node *np)
818{ 776{
819 struct samsung_clk_provider *ctx; 777 struct samsung_clk_provider *ctx;
820 778
821 reg_base = of_iomap(np, 0); 779 ctx = samsung_cmu_register_one(np, &cmu_info);
822 if (!reg_base)
823 panic("%s: failed to map registers\n", __func__);
824
825 ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
826 if (!ctx) 780 if (!ctx)
827 panic("%s: unable to allocate context.\n", __func__); 781 return;
828
829 samsung_clk_register_fixed_factor(ctx, fixed_factor_clks,
830 ARRAY_SIZE(fixed_factor_clks));
831
832 exynos3250_plls[apll].rate_table = exynos3250_pll_rates;
833 exynos3250_plls[mpll].rate_table = exynos3250_pll_rates;
834 exynos3250_plls[vpll].rate_table = exynos3250_vpll_rates;
835 exynos3250_plls[upll].rate_table = exynos3250_pll_rates;
836
837 samsung_clk_register_pll(ctx, exynos3250_plls,
838 ARRAY_SIZE(exynos3250_plls), reg_base);
839
840 samsung_clk_register_mux(ctx, mux_clks, ARRAY_SIZE(mux_clks));
841 samsung_clk_register_div(ctx, div_clks, ARRAY_SIZE(div_clks));
842 samsung_clk_register_gate(ctx, gate_clks, ARRAY_SIZE(gate_clks));
843
844 exynos3_core_down_clock();
845 782
846 exynos3250_clk_sleep_init(); 783 exynos3_core_down_clock(ctx->reg_base);
847
848 samsung_clk_of_add_provider(np, ctx);
849} 784}
850CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init); 785CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init);
851 786
@@ -872,12 +807,6 @@ CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init);
872#define EPLL_CON2 0x111c 807#define EPLL_CON2 0x111c
873#define SRC_EPLL 0x1120 808#define SRC_EPLL 0x1120
874 809
875/*
876 * Support for CMU save/restore across system suspends
877 */
878#ifdef CONFIG_PM_SLEEP
879static struct samsung_clk_reg_dump *exynos3250_dmc_clk_regs;
880
881static unsigned long exynos3250_cmu_dmc_clk_regs[] __initdata = { 810static unsigned long exynos3250_cmu_dmc_clk_regs[] __initdata = {
882 BPLL_LOCK, 811 BPLL_LOCK,
883 BPLL_CON0, 812 BPLL_CON0,
@@ -899,43 +828,6 @@ static unsigned long exynos3250_cmu_dmc_clk_regs[] __initdata = {
899 SRC_EPLL, 828 SRC_EPLL,
900}; 829};
901 830
902static int exynos3250_dmc_clk_suspend(void)
903{
904 samsung_clk_save(dmc_reg_base, exynos3250_dmc_clk_regs,
905 ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs));
906 return 0;
907}
908
909static void exynos3250_dmc_clk_resume(void)
910{
911 samsung_clk_restore(dmc_reg_base, exynos3250_dmc_clk_regs,
912 ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs));
913}
914
915static struct syscore_ops exynos3250_dmc_clk_syscore_ops = {
916 .suspend = exynos3250_dmc_clk_suspend,
917 .resume = exynos3250_dmc_clk_resume,
918};
919
920static void exynos3250_dmc_clk_sleep_init(void)
921{
922 exynos3250_dmc_clk_regs =
923 samsung_clk_alloc_reg_dump(exynos3250_cmu_dmc_clk_regs,
924 ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs));
925 if (!exynos3250_dmc_clk_regs) {
926 pr_warn("%s: Failed to allocate sleep save data\n", __func__);
927 goto err;
928 }
929
930 register_syscore_ops(&exynos3250_dmc_clk_syscore_ops);
931 return;
932err:
933 kfree(exynos3250_dmc_clk_regs);
934}
935#else
936static inline void exynos3250_dmc_clk_sleep_init(void) { }
937#endif
938
939PNAME(mout_epll_p) = { "fin_pll", "fout_epll", }; 831PNAME(mout_epll_p) = { "fin_pll", "fout_epll", };
940PNAME(mout_bpll_p) = { "fin_pll", "fout_bpll", }; 832PNAME(mout_bpll_p) = { "fin_pll", "fout_bpll", };
941PNAME(mout_mpll_mif_p) = { "fin_pll", "sclk_mpll_mif", }; 833PNAME(mout_mpll_mif_p) = { "fin_pll", "sclk_mpll_mif", };
@@ -977,43 +869,28 @@ static struct samsung_div_clock dmc_div_clks[] __initdata = {
977 DIV(CLK_DIV_DMCD, "div_dmcd", "div_dmc", DIV_DMC1, 11, 3), 869 DIV(CLK_DIV_DMCD, "div_dmcd", "div_dmc", DIV_DMC1, 11, 3),
978}; 870};
979 871
980static struct samsung_pll_clock exynos3250_dmc_plls[nr_dmc_plls] __initdata = { 872static struct samsung_pll_clock exynos3250_dmc_plls[] __initdata = {
981 [bpll] = PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll", 873 PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll",
982 BPLL_LOCK, BPLL_CON0, NULL), 874 BPLL_LOCK, BPLL_CON0, exynos3250_pll_rates),
983 [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll", 875 PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
984 EPLL_LOCK, EPLL_CON0, NULL), 876 EPLL_LOCK, EPLL_CON0, exynos3250_epll_rates),
877};
878
879static struct samsung_cmu_info dmc_cmu_info __initdata = {
880 .pll_clks = exynos3250_dmc_plls,
881 .nr_pll_clks = ARRAY_SIZE(exynos3250_dmc_plls),
882 .mux_clks = dmc_mux_clks,
883 .nr_mux_clks = ARRAY_SIZE(dmc_mux_clks),
884 .div_clks = dmc_div_clks,
885 .nr_div_clks = ARRAY_SIZE(dmc_div_clks),
886 .nr_clk_ids = NR_CLKS_DMC,
887 .clk_regs = exynos3250_cmu_dmc_clk_regs,
888 .nr_clk_regs = ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs),
985}; 889};
986 890
987static void __init exynos3250_cmu_dmc_init(struct device_node *np) 891static void __init exynos3250_cmu_dmc_init(struct device_node *np)
988{ 892{
989 struct samsung_clk_provider *ctx; 893 samsung_cmu_register_one(np, &dmc_cmu_info);
990
991 dmc_reg_base = of_iomap(np, 0);
992 if (!dmc_reg_base)
993 panic("%s: failed to map registers\n", __func__);
994
995 ctx = samsung_clk_init(np, dmc_reg_base, NR_CLKS_DMC);
996 if (!ctx)
997 panic("%s: unable to allocate context.\n", __func__);
998
999 exynos3250_dmc_plls[bpll].rate_table = exynos3250_pll_rates;
1000 exynos3250_dmc_plls[epll].rate_table = exynos3250_epll_rates;
1001
1002 pr_err("CLK registering epll bpll: %d, %d, %d, %d\n",
1003 exynos3250_dmc_plls[bpll].rate_table[0].rate,
1004 exynos3250_dmc_plls[bpll].rate_table[0].mdiv,
1005 exynos3250_dmc_plls[bpll].rate_table[0].pdiv,
1006 exynos3250_dmc_plls[bpll].rate_table[0].sdiv
1007 );
1008 samsung_clk_register_pll(ctx, exynos3250_dmc_plls,
1009 ARRAY_SIZE(exynos3250_dmc_plls), dmc_reg_base);
1010
1011 samsung_clk_register_mux(ctx, dmc_mux_clks, ARRAY_SIZE(dmc_mux_clks));
1012 samsung_clk_register_div(ctx, dmc_div_clks, ARRAY_SIZE(dmc_div_clks));
1013
1014 exynos3250_dmc_clk_sleep_init();
1015
1016 samsung_clk_of_add_provider(np, ctx);
1017} 894}
1018CLK_OF_DECLARE(exynos3250_cmu_dmc, "samsung,exynos3250-cmu-dmc", 895CLK_OF_DECLARE(exynos3250_cmu_dmc, "samsung,exynos3250-cmu-dmc",
1019 exynos3250_cmu_dmc_init); 896 exynos3250_cmu_dmc_init);
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 88e8c6bbd77f..51462e85675f 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -703,12 +703,12 @@ static struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
703 703
704/* list of divider clocks supported in all exynos4 soc's */ 704/* list of divider clocks supported in all exynos4 soc's */
705static struct samsung_div_clock exynos4_div_clks[] __initdata = { 705static struct samsung_div_clock exynos4_div_clks[] __initdata = {
706 DIV(0, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 3), 706 DIV(CLK_DIV_GDL, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 3),
707 DIV(0, "div_gpl", "div_gdl", DIV_LEFTBUS, 4, 3), 707 DIV(0, "div_gpl", "div_gdl", DIV_LEFTBUS, 4, 3),
708 DIV(0, "div_clkout_leftbus", "mout_clkout_leftbus", 708 DIV(0, "div_clkout_leftbus", "mout_clkout_leftbus",
709 CLKOUT_CMU_LEFTBUS, 8, 6), 709 CLKOUT_CMU_LEFTBUS, 8, 6),
710 710
711 DIV(0, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 3), 711 DIV(CLK_DIV_GDR, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 3),
712 DIV(0, "div_gpr", "div_gdr", DIV_RIGHTBUS, 4, 3), 712 DIV(0, "div_gpr", "div_gdr", DIV_RIGHTBUS, 4, 3),
713 DIV(0, "div_clkout_rightbus", "mout_clkout_rightbus", 713 DIV(0, "div_clkout_rightbus", "mout_clkout_rightbus",
714 CLKOUT_CMU_RIGHTBUS, 8, 6), 714 CLKOUT_CMU_RIGHTBUS, 8, 6),
@@ -781,10 +781,10 @@ static struct samsung_div_clock exynos4_div_clks[] __initdata = {
781 CLK_SET_RATE_PARENT, 0), 781 CLK_SET_RATE_PARENT, 0),
782 DIV(0, "div_clkout_top", "mout_clkout_top", CLKOUT_CMU_TOP, 8, 6), 782 DIV(0, "div_clkout_top", "mout_clkout_top", CLKOUT_CMU_TOP, 8, 6),
783 783
784 DIV(0, "div_acp", "mout_dmc_bus", DIV_DMC0, 0, 3), 784 DIV(CLK_DIV_ACP, "div_acp", "mout_dmc_bus", DIV_DMC0, 0, 3),
785 DIV(0, "div_acp_pclk", "div_acp", DIV_DMC0, 4, 3), 785 DIV(0, "div_acp_pclk", "div_acp", DIV_DMC0, 4, 3),
786 DIV(0, "div_dphy", "mout_dphy", DIV_DMC0, 8, 3), 786 DIV(0, "div_dphy", "mout_dphy", DIV_DMC0, 8, 3),
787 DIV(0, "div_dmc", "mout_dmc_bus", DIV_DMC0, 12, 3), 787 DIV(CLK_DIV_DMC, "div_dmc", "mout_dmc_bus", DIV_DMC0, 12, 3),
788 DIV(0, "div_dmcd", "div_dmc", DIV_DMC0, 16, 3), 788 DIV(0, "div_dmcd", "div_dmc", DIV_DMC0, 16, 3),
789 DIV(0, "div_dmcp", "div_dmcd", DIV_DMC0, 20, 3), 789 DIV(0, "div_dmcp", "div_dmcd", DIV_DMC0, 20, 3),
790 DIV(0, "div_pwi", "mout_pwi", DIV_DMC1, 8, 4), 790 DIV(0, "div_pwi", "mout_pwi", DIV_DMC1, 8, 4),
@@ -829,7 +829,7 @@ static struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
829 DIV_F(CLK_DIV_MCUISP1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, 829 DIV_F(CLK_DIV_MCUISP1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
830 8, 3, CLK_GET_RATE_NOCACHE, 0), 830 8, 3, CLK_GET_RATE_NOCACHE, 0),
831 DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4), 831 DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
832 DIV(0, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3), 832 DIV(CLK_DIV_C2C, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3),
833 DIV(0, "div_c2c_aclk", "div_c2c", DIV_DMC1, 12, 3), 833 DIV(0, "div_c2c_aclk", "div_c2c", DIV_DMC1, 12, 3),
834}; 834};
835 835
diff --git a/drivers/clk/samsung/clk-exynos4415.c b/drivers/clk/samsung/clk-exynos4415.c
index 2123fc251e0f..6c78b09c829f 100644
--- a/drivers/clk/samsung/clk-exynos4415.c
+++ b/drivers/clk/samsung/clk-exynos4415.c
@@ -113,19 +113,6 @@
113#define DIV_CPU0 0x14500 113#define DIV_CPU0 0x14500
114#define DIV_CPU1 0x14504 114#define DIV_CPU1 0x14504
115 115
116enum exynos4415_plls {
117 apll, epll, g3d_pll, isp_pll, disp_pll,
118 nr_plls,
119};
120
121static struct samsung_clk_provider *exynos4415_ctx;
122
123/*
124 * Support for CMU save/restore across system suspends
125 */
126#ifdef CONFIG_PM_SLEEP
127static struct samsung_clk_reg_dump *exynos4415_clk_regs;
128
129static unsigned long exynos4415_cmu_clk_regs[] __initdata = { 116static unsigned long exynos4415_cmu_clk_regs[] __initdata = {
130 SRC_LEFTBUS, 117 SRC_LEFTBUS,
131 DIV_LEFTBUS, 118 DIV_LEFTBUS,
@@ -219,41 +206,6 @@ static unsigned long exynos4415_cmu_clk_regs[] __initdata = {
219 DIV_CPU1, 206 DIV_CPU1,
220}; 207};
221 208
222static int exynos4415_clk_suspend(void)
223{
224 samsung_clk_save(exynos4415_ctx->reg_base, exynos4415_clk_regs,
225 ARRAY_SIZE(exynos4415_cmu_clk_regs));
226
227 return 0;
228}
229
230static void exynos4415_clk_resume(void)
231{
232 samsung_clk_restore(exynos4415_ctx->reg_base, exynos4415_clk_regs,
233 ARRAY_SIZE(exynos4415_cmu_clk_regs));
234}
235
236static struct syscore_ops exynos4415_clk_syscore_ops = {
237 .suspend = exynos4415_clk_suspend,
238 .resume = exynos4415_clk_resume,
239};
240
241static void exynos4415_clk_sleep_init(void)
242{
243 exynos4415_clk_regs =
244 samsung_clk_alloc_reg_dump(exynos4415_cmu_clk_regs,
245 ARRAY_SIZE(exynos4415_cmu_clk_regs));
246 if (!exynos4415_clk_regs) {
247 pr_warn("%s: Failed to allocate sleep save data\n", __func__);
248 return;
249 }
250
251 register_syscore_ops(&exynos4415_clk_syscore_ops);
252}
253#else
254static inline void exynos4415_clk_sleep_init(void) { }
255#endif
256
257/* list of all parent clock list */ 209/* list of all parent clock list */
258PNAME(mout_g3d_pllsrc_p) = { "fin_pll", }; 210PNAME(mout_g3d_pllsrc_p) = { "fin_pll", };
259 211
@@ -959,56 +911,40 @@ static struct samsung_pll_rate_table exynos4415_epll_rates[] = {
959 { /* sentinel */ } 911 { /* sentinel */ }
960}; 912};
961 913
962static struct samsung_pll_clock exynos4415_plls[nr_plls] __initdata = { 914static struct samsung_pll_clock exynos4415_plls[] __initdata = {
963 [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll", 915 PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
964 APLL_LOCK, APLL_CON0, NULL), 916 APLL_LOCK, APLL_CON0, exynos4415_pll_rates),
965 [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll", 917 PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
966 EPLL_LOCK, EPLL_CON0, NULL), 918 EPLL_LOCK, EPLL_CON0, exynos4415_epll_rates),
967 [g3d_pll] = PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll", 919 PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll", "mout_g3d_pllsrc",
968 "mout_g3d_pllsrc", G3D_PLL_LOCK, G3D_PLL_CON0, NULL), 920 G3D_PLL_LOCK, G3D_PLL_CON0, exynos4415_pll_rates),
969 [isp_pll] = PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "fin_pll", 921 PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "fin_pll",
970 ISP_PLL_LOCK, ISP_PLL_CON0, NULL), 922 ISP_PLL_LOCK, ISP_PLL_CON0, exynos4415_pll_rates),
971 [disp_pll] = PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll", 923 PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll",
972 "fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, NULL), 924 "fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, exynos4415_pll_rates),
925};
926
927static struct samsung_cmu_info cmu_info __initdata = {
928 .pll_clks = exynos4415_plls,
929 .nr_pll_clks = ARRAY_SIZE(exynos4415_plls),
930 .mux_clks = exynos4415_mux_clks,
931 .nr_mux_clks = ARRAY_SIZE(exynos4415_mux_clks),
932 .div_clks = exynos4415_div_clks,
933 .nr_div_clks = ARRAY_SIZE(exynos4415_div_clks),
934 .gate_clks = exynos4415_gate_clks,
935 .nr_gate_clks = ARRAY_SIZE(exynos4415_gate_clks),
936 .fixed_clks = exynos4415_fixed_rate_clks,
937 .nr_fixed_clks = ARRAY_SIZE(exynos4415_fixed_rate_clks),
938 .fixed_factor_clks = exynos4415_fixed_factor_clks,
939 .nr_fixed_factor_clks = ARRAY_SIZE(exynos4415_fixed_factor_clks),
940 .nr_clk_ids = CLK_NR_CLKS,
941 .clk_regs = exynos4415_cmu_clk_regs,
942 .nr_clk_regs = ARRAY_SIZE(exynos4415_cmu_clk_regs),
973}; 943};
974 944
975static void __init exynos4415_cmu_init(struct device_node *np) 945static void __init exynos4415_cmu_init(struct device_node *np)
976{ 946{
977 void __iomem *reg_base; 947 samsung_cmu_register_one(np, &cmu_info);
978
979 reg_base = of_iomap(np, 0);
980 if (!reg_base)
981 panic("%s: failed to map registers\n", __func__);
982
983 exynos4415_ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
984 if (!exynos4415_ctx)
985 panic("%s: unable to allocate context.\n", __func__);
986
987 exynos4415_plls[apll].rate_table = exynos4415_pll_rates;
988 exynos4415_plls[epll].rate_table = exynos4415_epll_rates;
989 exynos4415_plls[g3d_pll].rate_table = exynos4415_pll_rates;
990 exynos4415_plls[isp_pll].rate_table = exynos4415_pll_rates;
991 exynos4415_plls[disp_pll].rate_table = exynos4415_pll_rates;
992
993 samsung_clk_register_fixed_factor(exynos4415_ctx,
994 exynos4415_fixed_factor_clks,
995 ARRAY_SIZE(exynos4415_fixed_factor_clks));
996 samsung_clk_register_fixed_rate(exynos4415_ctx,
997 exynos4415_fixed_rate_clks,
998 ARRAY_SIZE(exynos4415_fixed_rate_clks));
999
1000 samsung_clk_register_pll(exynos4415_ctx, exynos4415_plls,
1001 ARRAY_SIZE(exynos4415_plls), reg_base);
1002 samsung_clk_register_mux(exynos4415_ctx, exynos4415_mux_clks,
1003 ARRAY_SIZE(exynos4415_mux_clks));
1004 samsung_clk_register_div(exynos4415_ctx, exynos4415_div_clks,
1005 ARRAY_SIZE(exynos4415_div_clks));
1006 samsung_clk_register_gate(exynos4415_ctx, exynos4415_gate_clks,
1007 ARRAY_SIZE(exynos4415_gate_clks));
1008
1009 exynos4415_clk_sleep_init();
1010
1011 samsung_clk_of_add_provider(np, exynos4415_ctx);
1012} 948}
1013CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init); 949CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init);
1014 950
@@ -1027,16 +963,6 @@ CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init);
1027#define SRC_DMC 0x300 963#define SRC_DMC 0x300
1028#define DIV_DMC1 0x504 964#define DIV_DMC1 0x504
1029 965
1030enum exynos4415_dmc_plls {
1031 mpll, bpll,
1032 nr_dmc_plls,
1033};
1034
1035static struct samsung_clk_provider *exynos4415_dmc_ctx;
1036
1037#ifdef CONFIG_PM_SLEEP
1038static struct samsung_clk_reg_dump *exynos4415_dmc_clk_regs;
1039
1040static unsigned long exynos4415_cmu_dmc_clk_regs[] __initdata = { 966static unsigned long exynos4415_cmu_dmc_clk_regs[] __initdata = {
1041 MPLL_LOCK, 967 MPLL_LOCK,
1042 MPLL_CON0, 968 MPLL_CON0,
@@ -1050,42 +976,6 @@ static unsigned long exynos4415_cmu_dmc_clk_regs[] __initdata = {
1050 DIV_DMC1, 976 DIV_DMC1,
1051}; 977};
1052 978
1053static int exynos4415_dmc_clk_suspend(void)
1054{
1055 samsung_clk_save(exynos4415_dmc_ctx->reg_base,
1056 exynos4415_dmc_clk_regs,
1057 ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
1058 return 0;
1059}
1060
1061static void exynos4415_dmc_clk_resume(void)
1062{
1063 samsung_clk_restore(exynos4415_dmc_ctx->reg_base,
1064 exynos4415_dmc_clk_regs,
1065 ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
1066}
1067
1068static struct syscore_ops exynos4415_dmc_clk_syscore_ops = {
1069 .suspend = exynos4415_dmc_clk_suspend,
1070 .resume = exynos4415_dmc_clk_resume,
1071};
1072
1073static void exynos4415_dmc_clk_sleep_init(void)
1074{
1075 exynos4415_dmc_clk_regs =
1076 samsung_clk_alloc_reg_dump(exynos4415_cmu_dmc_clk_regs,
1077 ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
1078 if (!exynos4415_dmc_clk_regs) {
1079 pr_warn("%s: Failed to allocate sleep save data\n", __func__);
1080 return;
1081 }
1082
1083 register_syscore_ops(&exynos4415_dmc_clk_syscore_ops);
1084}
1085#else
1086static inline void exynos4415_dmc_clk_sleep_init(void) { }
1087#endif /* CONFIG_PM_SLEEP */
1088
1089PNAME(mout_mpll_p) = { "fin_pll", "fout_mpll", }; 979PNAME(mout_mpll_p) = { "fin_pll", "fout_mpll", };
1090PNAME(mout_bpll_p) = { "fin_pll", "fout_bpll", }; 980PNAME(mout_bpll_p) = { "fin_pll", "fout_bpll", };
1091PNAME(mbpll_p) = { "mout_mpll", "mout_bpll", }; 981PNAME(mbpll_p) = { "mout_mpll", "mout_bpll", };
@@ -1107,38 +997,28 @@ static struct samsung_div_clock exynos4415_dmc_div_clks[] __initdata = {
1107 DIV(CLK_DMC_DIV_MPLL_PRE, "div_mpll_pre", "mout_mpll", DIV_DMC1, 8, 2), 997 DIV(CLK_DMC_DIV_MPLL_PRE, "div_mpll_pre", "mout_mpll", DIV_DMC1, 8, 2),
1108}; 998};
1109 999
1110static struct samsung_pll_clock exynos4415_dmc_plls[nr_dmc_plls] __initdata = { 1000static struct samsung_pll_clock exynos4415_dmc_plls[] __initdata = {
1111 [mpll] = PLL(pll_35xx, CLK_DMC_FOUT_MPLL, "fout_mpll", "fin_pll", 1001 PLL(pll_35xx, CLK_DMC_FOUT_MPLL, "fout_mpll", "fin_pll",
1112 MPLL_LOCK, MPLL_CON0, NULL), 1002 MPLL_LOCK, MPLL_CON0, exynos4415_pll_rates),
1113 [bpll] = PLL(pll_35xx, CLK_DMC_FOUT_BPLL, "fout_bpll", "fin_pll", 1003 PLL(pll_35xx, CLK_DMC_FOUT_BPLL, "fout_bpll", "fin_pll",
1114 BPLL_LOCK, BPLL_CON0, NULL), 1004 BPLL_LOCK, BPLL_CON0, exynos4415_pll_rates),
1005};
1006
1007static struct samsung_cmu_info cmu_dmc_info __initdata = {
1008 .pll_clks = exynos4415_dmc_plls,
1009 .nr_pll_clks = ARRAY_SIZE(exynos4415_dmc_plls),
1010 .mux_clks = exynos4415_dmc_mux_clks,
1011 .nr_mux_clks = ARRAY_SIZE(exynos4415_dmc_mux_clks),
1012 .div_clks = exynos4415_dmc_div_clks,
1013 .nr_div_clks = ARRAY_SIZE(exynos4415_dmc_div_clks),
1014 .nr_clk_ids = NR_CLKS_DMC,
1015 .clk_regs = exynos4415_cmu_dmc_clk_regs,
1016 .nr_clk_regs = ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs),
1115}; 1017};
1116 1018
1117static void __init exynos4415_cmu_dmc_init(struct device_node *np) 1019static void __init exynos4415_cmu_dmc_init(struct device_node *np)
1118{ 1020{
1119 void __iomem *reg_base; 1021 samsung_cmu_register_one(np, &cmu_dmc_info);
1120
1121 reg_base = of_iomap(np, 0);
1122 if (!reg_base)
1123 panic("%s: failed to map registers\n", __func__);
1124
1125 exynos4415_dmc_ctx = samsung_clk_init(np, reg_base, NR_CLKS_DMC);
1126 if (!exynos4415_dmc_ctx)
1127 panic("%s: unable to allocate context.\n", __func__);
1128
1129 exynos4415_dmc_plls[mpll].rate_table = exynos4415_pll_rates;
1130 exynos4415_dmc_plls[bpll].rate_table = exynos4415_pll_rates;
1131
1132 samsung_clk_register_pll(exynos4415_dmc_ctx, exynos4415_dmc_plls,
1133 ARRAY_SIZE(exynos4415_dmc_plls), reg_base);
1134 samsung_clk_register_mux(exynos4415_dmc_ctx, exynos4415_dmc_mux_clks,
1135 ARRAY_SIZE(exynos4415_dmc_mux_clks));
1136 samsung_clk_register_div(exynos4415_dmc_ctx, exynos4415_dmc_div_clks,
1137 ARRAY_SIZE(exynos4415_dmc_div_clks));
1138
1139 exynos4415_dmc_clk_sleep_init();
1140
1141 samsung_clk_of_add_provider(np, exynos4415_dmc_ctx);
1142} 1022}
1143CLK_OF_DECLARE(exynos4415_cmu_dmc, "samsung,exynos4415-cmu-dmc", 1023CLK_OF_DECLARE(exynos4415_cmu_dmc, "samsung,exynos4415-cmu-dmc",
1144 exynos4415_cmu_dmc_init); 1024 exynos4415_cmu_dmc_init);
diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
index ea4483b8d62e..03d36e847b78 100644
--- a/drivers/clk/samsung/clk-exynos7.c
+++ b/drivers/clk/samsung/clk-exynos7.c
@@ -34,6 +34,7 @@
34#define DIV_TOPC0 0x0600 34#define DIV_TOPC0 0x0600
35#define DIV_TOPC1 0x0604 35#define DIV_TOPC1 0x0604
36#define DIV_TOPC3 0x060C 36#define DIV_TOPC3 0x060C
37#define ENABLE_ACLK_TOPC1 0x0804
37 38
38static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = { 39static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = {
39 FFACTOR(0, "ffac_topc_bus0_pll_div2", "mout_bus0_pll_ctrl", 1, 2, 0), 40 FFACTOR(0, "ffac_topc_bus0_pll_div2", "mout_bus0_pll_ctrl", 1, 2, 0),
@@ -45,6 +46,7 @@ static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = {
45}; 46};
46 47
47/* List of parent clocks for Muxes in CMU_TOPC */ 48/* List of parent clocks for Muxes in CMU_TOPC */
49PNAME(mout_aud_pll_ctrl_p) = { "fin_pll", "fout_aud_pll" };
48PNAME(mout_bus0_pll_ctrl_p) = { "fin_pll", "fout_bus0_pll" }; 50PNAME(mout_bus0_pll_ctrl_p) = { "fin_pll", "fout_bus0_pll" };
49PNAME(mout_bus1_pll_ctrl_p) = { "fin_pll", "fout_bus1_pll" }; 51PNAME(mout_bus1_pll_ctrl_p) = { "fin_pll", "fout_bus1_pll" };
50PNAME(mout_cc_pll_ctrl_p) = { "fin_pll", "fout_cc_pll" }; 52PNAME(mout_cc_pll_ctrl_p) = { "fin_pll", "fout_cc_pll" };
@@ -104,9 +106,11 @@ static struct samsung_mux_clock topc_mux_clks[] __initdata = {
104 106
105 MUX(0, "mout_sclk_bus0_pll_out", mout_sclk_bus0_pll_out_p, 107 MUX(0, "mout_sclk_bus0_pll_out", mout_sclk_bus0_pll_out_p,
106 MUX_SEL_TOPC1, 16, 1), 108 MUX_SEL_TOPC1, 16, 1),
109 MUX(0, "mout_aud_pll_ctrl", mout_aud_pll_ctrl_p, MUX_SEL_TOPC1, 0, 1),
107 110
108 MUX(0, "mout_aclk_ccore_133", mout_topc_group2, MUX_SEL_TOPC2, 4, 2), 111 MUX(0, "mout_aclk_ccore_133", mout_topc_group2, MUX_SEL_TOPC2, 4, 2),
109 112
113 MUX(0, "mout_aclk_mscl_532", mout_topc_group2, MUX_SEL_TOPC3, 20, 2),
110 MUX(0, "mout_aclk_peris_66", mout_topc_group2, MUX_SEL_TOPC3, 24, 2), 114 MUX(0, "mout_aclk_peris_66", mout_topc_group2, MUX_SEL_TOPC3, 24, 2),
111}; 115};
112 116
@@ -114,6 +118,8 @@ static struct samsung_div_clock topc_div_clks[] __initdata = {
114 DIV(DOUT_ACLK_CCORE_133, "dout_aclk_ccore_133", "mout_aclk_ccore_133", 118 DIV(DOUT_ACLK_CCORE_133, "dout_aclk_ccore_133", "mout_aclk_ccore_133",
115 DIV_TOPC0, 4, 4), 119 DIV_TOPC0, 4, 4),
116 120
121 DIV(DOUT_ACLK_MSCL_532, "dout_aclk_mscl_532", "mout_aclk_mscl_532",
122 DIV_TOPC1, 20, 4),
117 DIV(DOUT_ACLK_PERIS, "dout_aclk_peris_66", "mout_aclk_peris_66", 123 DIV(DOUT_ACLK_PERIS, "dout_aclk_peris_66", "mout_aclk_peris_66",
118 DIV_TOPC1, 24, 4), 124 DIV_TOPC1, 24, 4),
119 125
@@ -125,6 +131,18 @@ static struct samsung_div_clock topc_div_clks[] __initdata = {
125 DIV_TOPC3, 12, 3), 131 DIV_TOPC3, 12, 3),
126 DIV(DOUT_SCLK_MFC_PLL, "dout_sclk_mfc_pll", "mout_mfc_pll_ctrl", 132 DIV(DOUT_SCLK_MFC_PLL, "dout_sclk_mfc_pll", "mout_mfc_pll_ctrl",
127 DIV_TOPC3, 16, 3), 133 DIV_TOPC3, 16, 3),
134 DIV(DOUT_SCLK_AUD_PLL, "dout_sclk_aud_pll", "mout_aud_pll_ctrl",
135 DIV_TOPC3, 28, 3),
136};
137
138static struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initdata = {
139 PLL_36XX_RATE(491520000, 20, 1, 0, 31457),
140 {},
141};
142
143static struct samsung_gate_clock topc_gate_clks[] __initdata = {
144 GATE(ACLK_MSCL_532, "aclk_mscl_532", "dout_aclk_mscl_532",
145 ENABLE_ACLK_TOPC1, 20, 0, 0),
128}; 146};
129 147
130static struct samsung_pll_clock topc_pll_clks[] __initdata = { 148static struct samsung_pll_clock topc_pll_clks[] __initdata = {
@@ -136,8 +154,8 @@ static struct samsung_pll_clock topc_pll_clks[] __initdata = {
136 BUS1_DPLL_CON0, NULL), 154 BUS1_DPLL_CON0, NULL),
137 PLL(pll_1452x, 0, "fout_mfc_pll", "fin_pll", MFC_PLL_LOCK, 155 PLL(pll_1452x, 0, "fout_mfc_pll", "fin_pll", MFC_PLL_LOCK,
138 MFC_PLL_CON0, NULL), 156 MFC_PLL_CON0, NULL),
139 PLL(pll_1460x, 0, "fout_aud_pll", "fin_pll", AUD_PLL_LOCK, 157 PLL(pll_1460x, FOUT_AUD_PLL, "fout_aud_pll", "fin_pll", AUD_PLL_LOCK,
140 AUD_PLL_CON0, NULL), 158 AUD_PLL_CON0, pll1460x_24mhz_tbl),
141}; 159};
142 160
143static struct samsung_cmu_info topc_cmu_info __initdata = { 161static struct samsung_cmu_info topc_cmu_info __initdata = {
@@ -147,6 +165,8 @@ static struct samsung_cmu_info topc_cmu_info __initdata = {
147 .nr_mux_clks = ARRAY_SIZE(topc_mux_clks), 165 .nr_mux_clks = ARRAY_SIZE(topc_mux_clks),
148 .div_clks = topc_div_clks, 166 .div_clks = topc_div_clks,
149 .nr_div_clks = ARRAY_SIZE(topc_div_clks), 167 .nr_div_clks = ARRAY_SIZE(topc_div_clks),
168 .gate_clks = topc_gate_clks,
169 .nr_gate_clks = ARRAY_SIZE(topc_gate_clks),
150 .fixed_factor_clks = topc_fixed_factor_clks, 170 .fixed_factor_clks = topc_fixed_factor_clks,
151 .nr_fixed_factor_clks = ARRAY_SIZE(topc_fixed_factor_clks), 171 .nr_fixed_factor_clks = ARRAY_SIZE(topc_fixed_factor_clks),
152 .nr_clk_ids = TOPC_NR_CLK, 172 .nr_clk_ids = TOPC_NR_CLK,
@@ -166,9 +186,18 @@ CLK_OF_DECLARE(exynos7_clk_topc, "samsung,exynos7-clock-topc",
166#define MUX_SEL_TOP00 0x0200 186#define MUX_SEL_TOP00 0x0200
167#define MUX_SEL_TOP01 0x0204 187#define MUX_SEL_TOP01 0x0204
168#define MUX_SEL_TOP03 0x020C 188#define MUX_SEL_TOP03 0x020C
189#define MUX_SEL_TOP0_PERIC0 0x0230
190#define MUX_SEL_TOP0_PERIC1 0x0234
191#define MUX_SEL_TOP0_PERIC2 0x0238
169#define MUX_SEL_TOP0_PERIC3 0x023C 192#define MUX_SEL_TOP0_PERIC3 0x023C
170#define DIV_TOP03 0x060C 193#define DIV_TOP03 0x060C
194#define DIV_TOP0_PERIC0 0x0630
195#define DIV_TOP0_PERIC1 0x0634
196#define DIV_TOP0_PERIC2 0x0638
171#define DIV_TOP0_PERIC3 0x063C 197#define DIV_TOP0_PERIC3 0x063C
198#define ENABLE_SCLK_TOP0_PERIC0 0x0A30
199#define ENABLE_SCLK_TOP0_PERIC1 0x0A34
200#define ENABLE_SCLK_TOP0_PERIC2 0x0A38
172#define ENABLE_SCLK_TOP0_PERIC3 0x0A3C 201#define ENABLE_SCLK_TOP0_PERIC3 0x0A3C
173 202
174/* List of parent clocks for Muxes in CMU_TOP0 */ 203/* List of parent clocks for Muxes in CMU_TOP0 */
@@ -176,6 +205,7 @@ PNAME(mout_bus0_pll_p) = { "fin_pll", "dout_sclk_bus0_pll" };
176PNAME(mout_bus1_pll_p) = { "fin_pll", "dout_sclk_bus1_pll" }; 205PNAME(mout_bus1_pll_p) = { "fin_pll", "dout_sclk_bus1_pll" };
177PNAME(mout_cc_pll_p) = { "fin_pll", "dout_sclk_cc_pll" }; 206PNAME(mout_cc_pll_p) = { "fin_pll", "dout_sclk_cc_pll" };
178PNAME(mout_mfc_pll_p) = { "fin_pll", "dout_sclk_mfc_pll" }; 207PNAME(mout_mfc_pll_p) = { "fin_pll", "dout_sclk_mfc_pll" };
208PNAME(mout_aud_pll_p) = { "fin_pll", "dout_sclk_aud_pll" };
179 209
180PNAME(mout_top0_half_bus0_pll_p) = {"mout_top0_bus0_pll", 210PNAME(mout_top0_half_bus0_pll_p) = {"mout_top0_bus0_pll",
181 "ffac_top0_bus0_pll_div2"}; 211 "ffac_top0_bus0_pll_div2"};
@@ -189,18 +219,34 @@ PNAME(mout_top0_half_mfc_pll_p) = {"mout_top0_mfc_pll",
189PNAME(mout_top0_group1) = {"mout_top0_half_bus0_pll", 219PNAME(mout_top0_group1) = {"mout_top0_half_bus0_pll",
190 "mout_top0_half_bus1_pll", "mout_top0_half_cc_pll", 220 "mout_top0_half_bus1_pll", "mout_top0_half_cc_pll",
191 "mout_top0_half_mfc_pll"}; 221 "mout_top0_half_mfc_pll"};
222PNAME(mout_top0_group3) = {"ioclk_audiocdclk0",
223 "ioclk_audiocdclk1", "ioclk_spdif_extclk",
224 "mout_top0_aud_pll", "mout_top0_half_bus0_pll",
225 "mout_top0_half_bus1_pll"};
226PNAME(mout_top0_group4) = {"ioclk_audiocdclk1", "mout_top0_aud_pll",
227 "mout_top0_half_bus0_pll", "mout_top0_half_bus1_pll"};
192 228
193static unsigned long top0_clk_regs[] __initdata = { 229static unsigned long top0_clk_regs[] __initdata = {
194 MUX_SEL_TOP00, 230 MUX_SEL_TOP00,
195 MUX_SEL_TOP01, 231 MUX_SEL_TOP01,
196 MUX_SEL_TOP03, 232 MUX_SEL_TOP03,
233 MUX_SEL_TOP0_PERIC0,
234 MUX_SEL_TOP0_PERIC1,
235 MUX_SEL_TOP0_PERIC2,
197 MUX_SEL_TOP0_PERIC3, 236 MUX_SEL_TOP0_PERIC3,
198 DIV_TOP03, 237 DIV_TOP03,
238 DIV_TOP0_PERIC0,
239 DIV_TOP0_PERIC1,
240 DIV_TOP0_PERIC2,
199 DIV_TOP0_PERIC3, 241 DIV_TOP0_PERIC3,
242 ENABLE_SCLK_TOP0_PERIC0,
243 ENABLE_SCLK_TOP0_PERIC1,
244 ENABLE_SCLK_TOP0_PERIC2,
200 ENABLE_SCLK_TOP0_PERIC3, 245 ENABLE_SCLK_TOP0_PERIC3,
201}; 246};
202 247
203static struct samsung_mux_clock top0_mux_clks[] __initdata = { 248static struct samsung_mux_clock top0_mux_clks[] __initdata = {
249 MUX(0, "mout_top0_aud_pll", mout_aud_pll_p, MUX_SEL_TOP00, 0, 1),
204 MUX(0, "mout_top0_mfc_pll", mout_mfc_pll_p, MUX_SEL_TOP00, 4, 1), 250 MUX(0, "mout_top0_mfc_pll", mout_mfc_pll_p, MUX_SEL_TOP00, 4, 1),
205 MUX(0, "mout_top0_cc_pll", mout_cc_pll_p, MUX_SEL_TOP00, 8, 1), 251 MUX(0, "mout_top0_cc_pll", mout_cc_pll_p, MUX_SEL_TOP00, 8, 1),
206 MUX(0, "mout_top0_bus1_pll", mout_bus1_pll_p, MUX_SEL_TOP00, 12, 1), 252 MUX(0, "mout_top0_bus1_pll", mout_bus1_pll_p, MUX_SEL_TOP00, 12, 1),
@@ -218,10 +264,20 @@ static struct samsung_mux_clock top0_mux_clks[] __initdata = {
218 MUX(0, "mout_aclk_peric1_66", mout_top0_group1, MUX_SEL_TOP03, 12, 2), 264 MUX(0, "mout_aclk_peric1_66", mout_top0_group1, MUX_SEL_TOP03, 12, 2),
219 MUX(0, "mout_aclk_peric0_66", mout_top0_group1, MUX_SEL_TOP03, 20, 2), 265 MUX(0, "mout_aclk_peric0_66", mout_top0_group1, MUX_SEL_TOP03, 20, 2),
220 266
267 MUX(0, "mout_sclk_spdif", mout_top0_group3, MUX_SEL_TOP0_PERIC0, 4, 3),
268 MUX(0, "mout_sclk_pcm1", mout_top0_group4, MUX_SEL_TOP0_PERIC0, 8, 2),
269 MUX(0, "mout_sclk_i2s1", mout_top0_group4, MUX_SEL_TOP0_PERIC0, 20, 2),
270
271 MUX(0, "mout_sclk_spi1", mout_top0_group1, MUX_SEL_TOP0_PERIC1, 8, 2),
272 MUX(0, "mout_sclk_spi0", mout_top0_group1, MUX_SEL_TOP0_PERIC1, 20, 2),
273
274 MUX(0, "mout_sclk_spi3", mout_top0_group1, MUX_SEL_TOP0_PERIC2, 8, 2),
275 MUX(0, "mout_sclk_spi2", mout_top0_group1, MUX_SEL_TOP0_PERIC2, 20, 2),
221 MUX(0, "mout_sclk_uart3", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 4, 2), 276 MUX(0, "mout_sclk_uart3", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 4, 2),
222 MUX(0, "mout_sclk_uart2", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 8, 2), 277 MUX(0, "mout_sclk_uart2", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 8, 2),
223 MUX(0, "mout_sclk_uart1", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 12, 2), 278 MUX(0, "mout_sclk_uart1", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 12, 2),
224 MUX(0, "mout_sclk_uart0", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 16, 2), 279 MUX(0, "mout_sclk_uart0", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 16, 2),
280 MUX(0, "mout_sclk_spi4", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 20, 2),
225}; 281};
226 282
227static struct samsung_div_clock top0_div_clks[] __initdata = { 283static struct samsung_div_clock top0_div_clks[] __initdata = {
@@ -230,13 +286,40 @@ static struct samsung_div_clock top0_div_clks[] __initdata = {
230 DIV(DOUT_ACLK_PERIC0, "dout_aclk_peric0_66", "mout_aclk_peric0_66", 286 DIV(DOUT_ACLK_PERIC0, "dout_aclk_peric0_66", "mout_aclk_peric0_66",
231 DIV_TOP03, 20, 6), 287 DIV_TOP03, 20, 6),
232 288
289 DIV(0, "dout_sclk_spdif", "mout_sclk_spdif", DIV_TOP0_PERIC0, 4, 4),
290 DIV(0, "dout_sclk_pcm1", "mout_sclk_pcm1", DIV_TOP0_PERIC0, 8, 12),
291 DIV(0, "dout_sclk_i2s1", "mout_sclk_i2s1", DIV_TOP0_PERIC0, 20, 10),
292
293 DIV(0, "dout_sclk_spi1", "mout_sclk_spi1", DIV_TOP0_PERIC1, 8, 12),
294 DIV(0, "dout_sclk_spi0", "mout_sclk_spi0", DIV_TOP0_PERIC1, 20, 12),
295
296 DIV(0, "dout_sclk_spi3", "mout_sclk_spi3", DIV_TOP0_PERIC2, 8, 12),
297 DIV(0, "dout_sclk_spi2", "mout_sclk_spi2", DIV_TOP0_PERIC2, 20, 12),
298
233 DIV(0, "dout_sclk_uart3", "mout_sclk_uart3", DIV_TOP0_PERIC3, 4, 4), 299 DIV(0, "dout_sclk_uart3", "mout_sclk_uart3", DIV_TOP0_PERIC3, 4, 4),
234 DIV(0, "dout_sclk_uart2", "mout_sclk_uart2", DIV_TOP0_PERIC3, 8, 4), 300 DIV(0, "dout_sclk_uart2", "mout_sclk_uart2", DIV_TOP0_PERIC3, 8, 4),
235 DIV(0, "dout_sclk_uart1", "mout_sclk_uart1", DIV_TOP0_PERIC3, 12, 4), 301 DIV(0, "dout_sclk_uart1", "mout_sclk_uart1", DIV_TOP0_PERIC3, 12, 4),
236 DIV(0, "dout_sclk_uart0", "mout_sclk_uart0", DIV_TOP0_PERIC3, 16, 4), 302 DIV(0, "dout_sclk_uart0", "mout_sclk_uart0", DIV_TOP0_PERIC3, 16, 4),
303 DIV(0, "dout_sclk_spi4", "mout_sclk_spi4", DIV_TOP0_PERIC3, 20, 12),
237}; 304};
238 305
239static struct samsung_gate_clock top0_gate_clks[] __initdata = { 306static struct samsung_gate_clock top0_gate_clks[] __initdata = {
307 GATE(CLK_SCLK_SPDIF, "sclk_spdif", "dout_sclk_spdif",
308 ENABLE_SCLK_TOP0_PERIC0, 4, CLK_SET_RATE_PARENT, 0),
309 GATE(CLK_SCLK_PCM1, "sclk_pcm1", "dout_sclk_pcm1",
310 ENABLE_SCLK_TOP0_PERIC0, 8, CLK_SET_RATE_PARENT, 0),
311 GATE(CLK_SCLK_I2S1, "sclk_i2s1", "dout_sclk_i2s1",
312 ENABLE_SCLK_TOP0_PERIC0, 20, CLK_SET_RATE_PARENT, 0),
313
314 GATE(CLK_SCLK_SPI1, "sclk_spi1", "dout_sclk_spi1",
315 ENABLE_SCLK_TOP0_PERIC1, 8, CLK_SET_RATE_PARENT, 0),
316 GATE(CLK_SCLK_SPI0, "sclk_spi0", "dout_sclk_spi0",
317 ENABLE_SCLK_TOP0_PERIC1, 20, CLK_SET_RATE_PARENT, 0),
318
319 GATE(CLK_SCLK_SPI3, "sclk_spi3", "dout_sclk_spi3",
320 ENABLE_SCLK_TOP0_PERIC2, 8, CLK_SET_RATE_PARENT, 0),
321 GATE(CLK_SCLK_SPI2, "sclk_spi2", "dout_sclk_spi2",
322 ENABLE_SCLK_TOP0_PERIC2, 20, CLK_SET_RATE_PARENT, 0),
240 GATE(CLK_SCLK_UART3, "sclk_uart3", "dout_sclk_uart3", 323 GATE(CLK_SCLK_UART3, "sclk_uart3", "dout_sclk_uart3",
241 ENABLE_SCLK_TOP0_PERIC3, 4, 0, 0), 324 ENABLE_SCLK_TOP0_PERIC3, 4, 0, 0),
242 GATE(CLK_SCLK_UART2, "sclk_uart2", "dout_sclk_uart2", 325 GATE(CLK_SCLK_UART2, "sclk_uart2", "dout_sclk_uart2",
@@ -245,6 +328,8 @@ static struct samsung_gate_clock top0_gate_clks[] __initdata = {
245 ENABLE_SCLK_TOP0_PERIC3, 12, 0, 0), 328 ENABLE_SCLK_TOP0_PERIC3, 12, 0, 0),
246 GATE(CLK_SCLK_UART0, "sclk_uart0", "dout_sclk_uart0", 329 GATE(CLK_SCLK_UART0, "sclk_uart0", "dout_sclk_uart0",
247 ENABLE_SCLK_TOP0_PERIC3, 16, 0, 0), 330 ENABLE_SCLK_TOP0_PERIC3, 16, 0, 0),
331 GATE(CLK_SCLK_SPI4, "sclk_spi4", "dout_sclk_spi4",
332 ENABLE_SCLK_TOP0_PERIC3, 20, CLK_SET_RATE_PARENT, 0),
248}; 333};
249 334
250static struct samsung_fixed_factor_clock top0_fixed_factor_clks[] __initdata = { 335static struct samsung_fixed_factor_clock top0_fixed_factor_clks[] __initdata = {
@@ -343,6 +428,8 @@ static struct samsung_mux_clock top1_mux_clks[] __initdata = {
343 MUX(0, "mout_aclk_fsys0_200", mout_top1_group1, MUX_SEL_TOP13, 28, 2), 428 MUX(0, "mout_aclk_fsys0_200", mout_top1_group1, MUX_SEL_TOP13, 28, 2),
344 429
345 MUX(0, "mout_sclk_mmc2", mout_top1_group1, MUX_SEL_TOP1_FSYS0, 24, 2), 430 MUX(0, "mout_sclk_mmc2", mout_top1_group1, MUX_SEL_TOP1_FSYS0, 24, 2),
431 MUX(0, "mout_sclk_usbdrd300", mout_top1_group1,
432 MUX_SEL_TOP1_FSYS0, 28, 2),
346 433
347 MUX(0, "mout_sclk_mmc1", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 24, 2), 434 MUX(0, "mout_sclk_mmc1", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 24, 2),
348 MUX(0, "mout_sclk_mmc0", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 28, 2), 435 MUX(0, "mout_sclk_mmc0", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 28, 2),
@@ -356,6 +443,8 @@ static struct samsung_div_clock top1_div_clks[] __initdata = {
356 443
357 DIV(DOUT_SCLK_MMC2, "dout_sclk_mmc2", "mout_sclk_mmc2", 444 DIV(DOUT_SCLK_MMC2, "dout_sclk_mmc2", "mout_sclk_mmc2",
358 DIV_TOP1_FSYS0, 24, 4), 445 DIV_TOP1_FSYS0, 24, 4),
446 DIV(0, "dout_sclk_usbdrd300", "mout_sclk_usbdrd300",
447 DIV_TOP1_FSYS0, 28, 4),
359 448
360 DIV(DOUT_SCLK_MMC1, "dout_sclk_mmc1", "mout_sclk_mmc1", 449 DIV(DOUT_SCLK_MMC1, "dout_sclk_mmc1", "mout_sclk_mmc1",
361 DIV_TOP1_FSYS1, 24, 4), 450 DIV_TOP1_FSYS1, 24, 4),
@@ -366,6 +455,8 @@ static struct samsung_div_clock top1_div_clks[] __initdata = {
366static struct samsung_gate_clock top1_gate_clks[] __initdata = { 455static struct samsung_gate_clock top1_gate_clks[] __initdata = {
367 GATE(CLK_SCLK_MMC2, "sclk_mmc2", "dout_sclk_mmc2", 456 GATE(CLK_SCLK_MMC2, "sclk_mmc2", "dout_sclk_mmc2",
368 ENABLE_SCLK_TOP1_FSYS0, 24, CLK_SET_RATE_PARENT, 0), 457 ENABLE_SCLK_TOP1_FSYS0, 24, CLK_SET_RATE_PARENT, 0),
458 GATE(0, "sclk_usbdrd300", "dout_sclk_usbdrd300",
459 ENABLE_SCLK_TOP1_FSYS0, 28, 0, 0),
369 460
370 GATE(CLK_SCLK_MMC1, "sclk_mmc1", "dout_sclk_mmc1", 461 GATE(CLK_SCLK_MMC1, "sclk_mmc1", "dout_sclk_mmc1",
371 ENABLE_SCLK_TOP1_FSYS1, 24, CLK_SET_RATE_PARENT, 0), 462 ENABLE_SCLK_TOP1_FSYS1, 24, CLK_SET_RATE_PARENT, 0),
@@ -514,6 +605,7 @@ static void __init exynos7_clk_peric0_init(struct device_node *np)
514/* Register Offset definitions for CMU_PERIC1 (0x14C80000) */ 605/* Register Offset definitions for CMU_PERIC1 (0x14C80000) */
515#define MUX_SEL_PERIC10 0x0200 606#define MUX_SEL_PERIC10 0x0200
516#define MUX_SEL_PERIC11 0x0204 607#define MUX_SEL_PERIC11 0x0204
608#define MUX_SEL_PERIC12 0x0208
517#define ENABLE_PCLK_PERIC1 0x0900 609#define ENABLE_PCLK_PERIC1 0x0900
518#define ENABLE_SCLK_PERIC10 0x0A00 610#define ENABLE_SCLK_PERIC10 0x0A00
519 611
@@ -525,10 +617,16 @@ PNAME(mout_aclk_peric1_66_p) = { "fin_pll", "dout_aclk_peric1_66" };
525PNAME(mout_sclk_uart1_p) = { "fin_pll", "sclk_uart1" }; 617PNAME(mout_sclk_uart1_p) = { "fin_pll", "sclk_uart1" };
526PNAME(mout_sclk_uart2_p) = { "fin_pll", "sclk_uart2" }; 618PNAME(mout_sclk_uart2_p) = { "fin_pll", "sclk_uart2" };
527PNAME(mout_sclk_uart3_p) = { "fin_pll", "sclk_uart3" }; 619PNAME(mout_sclk_uart3_p) = { "fin_pll", "sclk_uart3" };
620PNAME(mout_sclk_spi0_p) = { "fin_pll", "sclk_spi0" };
621PNAME(mout_sclk_spi1_p) = { "fin_pll", "sclk_spi1" };
622PNAME(mout_sclk_spi2_p) = { "fin_pll", "sclk_spi2" };
623PNAME(mout_sclk_spi3_p) = { "fin_pll", "sclk_spi3" };
624PNAME(mout_sclk_spi4_p) = { "fin_pll", "sclk_spi4" };
528 625
529static unsigned long peric1_clk_regs[] __initdata = { 626static unsigned long peric1_clk_regs[] __initdata = {
530 MUX_SEL_PERIC10, 627 MUX_SEL_PERIC10,
531 MUX_SEL_PERIC11, 628 MUX_SEL_PERIC11,
629 MUX_SEL_PERIC12,
532 ENABLE_PCLK_PERIC1, 630 ENABLE_PCLK_PERIC1,
533 ENABLE_SCLK_PERIC10, 631 ENABLE_SCLK_PERIC10,
534}; 632};
@@ -537,6 +635,16 @@ static struct samsung_mux_clock peric1_mux_clks[] __initdata = {
537 MUX(0, "mout_aclk_peric1_66_user", mout_aclk_peric1_66_p, 635 MUX(0, "mout_aclk_peric1_66_user", mout_aclk_peric1_66_p,
538 MUX_SEL_PERIC10, 0, 1), 636 MUX_SEL_PERIC10, 0, 1),
539 637
638 MUX_F(0, "mout_sclk_spi0_user", mout_sclk_spi0_p,
639 MUX_SEL_PERIC11, 0, 1, CLK_SET_RATE_PARENT, 0),
640 MUX_F(0, "mout_sclk_spi1_user", mout_sclk_spi1_p,
641 MUX_SEL_PERIC11, 4, 1, CLK_SET_RATE_PARENT, 0),
642 MUX_F(0, "mout_sclk_spi2_user", mout_sclk_spi2_p,
643 MUX_SEL_PERIC11, 8, 1, CLK_SET_RATE_PARENT, 0),
644 MUX_F(0, "mout_sclk_spi3_user", mout_sclk_spi3_p,
645 MUX_SEL_PERIC11, 12, 1, CLK_SET_RATE_PARENT, 0),
646 MUX_F(0, "mout_sclk_spi4_user", mout_sclk_spi4_p,
647 MUX_SEL_PERIC11, 16, 1, CLK_SET_RATE_PARENT, 0),
540 MUX(0, "mout_sclk_uart1_user", mout_sclk_uart1_p, 648 MUX(0, "mout_sclk_uart1_user", mout_sclk_uart1_p,
541 MUX_SEL_PERIC11, 20, 1), 649 MUX_SEL_PERIC11, 20, 1),
542 MUX(0, "mout_sclk_uart2_user", mout_sclk_uart2_p, 650 MUX(0, "mout_sclk_uart2_user", mout_sclk_uart2_p,
@@ -562,6 +670,22 @@ static struct samsung_gate_clock peric1_gate_clks[] __initdata = {
562 ENABLE_PCLK_PERIC1, 10, 0, 0), 670 ENABLE_PCLK_PERIC1, 10, 0, 0),
563 GATE(PCLK_UART3, "pclk_uart3", "mout_aclk_peric1_66_user", 671 GATE(PCLK_UART3, "pclk_uart3", "mout_aclk_peric1_66_user",
564 ENABLE_PCLK_PERIC1, 11, 0, 0), 672 ENABLE_PCLK_PERIC1, 11, 0, 0),
673 GATE(PCLK_SPI0, "pclk_spi0", "mout_aclk_peric1_66_user",
674 ENABLE_PCLK_PERIC1, 12, 0, 0),
675 GATE(PCLK_SPI1, "pclk_spi1", "mout_aclk_peric1_66_user",
676 ENABLE_PCLK_PERIC1, 13, 0, 0),
677 GATE(PCLK_SPI2, "pclk_spi2", "mout_aclk_peric1_66_user",
678 ENABLE_PCLK_PERIC1, 14, 0, 0),
679 GATE(PCLK_SPI3, "pclk_spi3", "mout_aclk_peric1_66_user",
680 ENABLE_PCLK_PERIC1, 15, 0, 0),
681 GATE(PCLK_SPI4, "pclk_spi4", "mout_aclk_peric1_66_user",
682 ENABLE_PCLK_PERIC1, 16, 0, 0),
683 GATE(PCLK_I2S1, "pclk_i2s1", "mout_aclk_peric1_66_user",
684 ENABLE_PCLK_PERIC1, 17, CLK_SET_RATE_PARENT, 0),
685 GATE(PCLK_PCM1, "pclk_pcm1", "mout_aclk_peric1_66_user",
686 ENABLE_PCLK_PERIC1, 18, 0, 0),
687 GATE(PCLK_SPDIF, "pclk_spdif", "mout_aclk_peric1_66_user",
688 ENABLE_PCLK_PERIC1, 19, 0, 0),
565 689
566 GATE(SCLK_UART1, "sclk_uart1_user", "mout_sclk_uart1_user", 690 GATE(SCLK_UART1, "sclk_uart1_user", "mout_sclk_uart1_user",
567 ENABLE_SCLK_PERIC10, 9, 0, 0), 691 ENABLE_SCLK_PERIC10, 9, 0, 0),
@@ -569,6 +693,22 @@ static struct samsung_gate_clock peric1_gate_clks[] __initdata = {
569 ENABLE_SCLK_PERIC10, 10, 0, 0), 693 ENABLE_SCLK_PERIC10, 10, 0, 0),
570 GATE(SCLK_UART3, "sclk_uart3_user", "mout_sclk_uart3_user", 694 GATE(SCLK_UART3, "sclk_uart3_user", "mout_sclk_uart3_user",
571 ENABLE_SCLK_PERIC10, 11, 0, 0), 695 ENABLE_SCLK_PERIC10, 11, 0, 0),
696 GATE(SCLK_SPI0, "sclk_spi0_user", "mout_sclk_spi0_user",
697 ENABLE_SCLK_PERIC10, 12, CLK_SET_RATE_PARENT, 0),
698 GATE(SCLK_SPI1, "sclk_spi1_user", "mout_sclk_spi1_user",
699 ENABLE_SCLK_PERIC10, 13, CLK_SET_RATE_PARENT, 0),
700 GATE(SCLK_SPI2, "sclk_spi2_user", "mout_sclk_spi2_user",
701 ENABLE_SCLK_PERIC10, 14, CLK_SET_RATE_PARENT, 0),
702 GATE(SCLK_SPI3, "sclk_spi3_user", "mout_sclk_spi3_user",
703 ENABLE_SCLK_PERIC10, 15, CLK_SET_RATE_PARENT, 0),
704 GATE(SCLK_SPI4, "sclk_spi4_user", "mout_sclk_spi4_user",
705 ENABLE_SCLK_PERIC10, 16, CLK_SET_RATE_PARENT, 0),
706 GATE(SCLK_I2S1, "sclk_i2s1_user", "sclk_i2s1",
707 ENABLE_SCLK_PERIC10, 17, CLK_SET_RATE_PARENT, 0),
708 GATE(SCLK_PCM1, "sclk_pcm1_user", "sclk_pcm1",
709 ENABLE_SCLK_PERIC10, 18, CLK_SET_RATE_PARENT, 0),
710 GATE(SCLK_SPDIF, "sclk_spdif_user", "sclk_spdif",
711 ENABLE_SCLK_PERIC10, 19, CLK_SET_RATE_PARENT, 0),
572}; 712};
573 713
574static struct samsung_cmu_info peric1_cmu_info __initdata = { 714static struct samsung_cmu_info peric1_cmu_info __initdata = {
@@ -647,7 +787,12 @@ CLK_OF_DECLARE(exynos7_clk_peris, "samsung,exynos7-clock-peris",
647/* Register Offset definitions for CMU_FSYS0 (0x10E90000) */ 787/* Register Offset definitions for CMU_FSYS0 (0x10E90000) */
648#define MUX_SEL_FSYS00 0x0200 788#define MUX_SEL_FSYS00 0x0200
649#define MUX_SEL_FSYS01 0x0204 789#define MUX_SEL_FSYS01 0x0204
790#define MUX_SEL_FSYS02 0x0208
791#define ENABLE_ACLK_FSYS00 0x0800
650#define ENABLE_ACLK_FSYS01 0x0804 792#define ENABLE_ACLK_FSYS01 0x0804
793#define ENABLE_SCLK_FSYS01 0x0A04
794#define ENABLE_SCLK_FSYS02 0x0A08
795#define ENABLE_SCLK_FSYS04 0x0A10
651 796
652/* 797/*
653 * List of parent clocks for Muxes in CMU_FSYS0 798 * List of parent clocks for Muxes in CMU_FSYS0
@@ -655,10 +800,29 @@ CLK_OF_DECLARE(exynos7_clk_peris, "samsung,exynos7-clock-peris",
655PNAME(mout_aclk_fsys0_200_p) = { "fin_pll", "dout_aclk_fsys0_200" }; 800PNAME(mout_aclk_fsys0_200_p) = { "fin_pll", "dout_aclk_fsys0_200" };
656PNAME(mout_sclk_mmc2_p) = { "fin_pll", "sclk_mmc2" }; 801PNAME(mout_sclk_mmc2_p) = { "fin_pll", "sclk_mmc2" };
657 802
803PNAME(mout_sclk_usbdrd300_p) = { "fin_pll", "sclk_usbdrd300" };
804PNAME(mout_phyclk_usbdrd300_udrd30_phyclk_p) = { "fin_pll",
805 "phyclk_usbdrd300_udrd30_phyclock" };
806PNAME(mout_phyclk_usbdrd300_udrd30_pipe_pclk_p) = { "fin_pll",
807 "phyclk_usbdrd300_udrd30_pipe_pclk" };
808
809/* fixed rate clocks used in the FSYS0 block */
810struct samsung_fixed_rate_clock fixed_rate_clks_fsys0[] __initdata = {
811 FRATE(0, "phyclk_usbdrd300_udrd30_phyclock", NULL,
812 CLK_IS_ROOT, 60000000),
813 FRATE(0, "phyclk_usbdrd300_udrd30_pipe_pclk", NULL,
814 CLK_IS_ROOT, 125000000),
815};
816
658static unsigned long fsys0_clk_regs[] __initdata = { 817static unsigned long fsys0_clk_regs[] __initdata = {
659 MUX_SEL_FSYS00, 818 MUX_SEL_FSYS00,
660 MUX_SEL_FSYS01, 819 MUX_SEL_FSYS01,
820 MUX_SEL_FSYS02,
821 ENABLE_ACLK_FSYS00,
661 ENABLE_ACLK_FSYS01, 822 ENABLE_ACLK_FSYS01,
823 ENABLE_SCLK_FSYS01,
824 ENABLE_SCLK_FSYS02,
825 ENABLE_SCLK_FSYS04,
662}; 826};
663 827
664static struct samsung_mux_clock fsys0_mux_clks[] __initdata = { 828static struct samsung_mux_clock fsys0_mux_clks[] __initdata = {
@@ -666,11 +830,49 @@ static struct samsung_mux_clock fsys0_mux_clks[] __initdata = {
666 MUX_SEL_FSYS00, 24, 1), 830 MUX_SEL_FSYS00, 24, 1),
667 831
668 MUX(0, "mout_sclk_mmc2_user", mout_sclk_mmc2_p, MUX_SEL_FSYS01, 24, 1), 832 MUX(0, "mout_sclk_mmc2_user", mout_sclk_mmc2_p, MUX_SEL_FSYS01, 24, 1),
833 MUX(0, "mout_sclk_usbdrd300_user", mout_sclk_usbdrd300_p,
834 MUX_SEL_FSYS01, 28, 1),
835
836 MUX(0, "mout_phyclk_usbdrd300_udrd30_pipe_pclk_user",
837 mout_phyclk_usbdrd300_udrd30_pipe_pclk_p,
838 MUX_SEL_FSYS02, 24, 1),
839 MUX(0, "mout_phyclk_usbdrd300_udrd30_phyclk_user",
840 mout_phyclk_usbdrd300_udrd30_phyclk_p,
841 MUX_SEL_FSYS02, 28, 1),
669}; 842};
670 843
671static struct samsung_gate_clock fsys0_gate_clks[] __initdata = { 844static struct samsung_gate_clock fsys0_gate_clks[] __initdata = {
845 GATE(ACLK_AXIUS_USBDRD30X_FSYS0X, "aclk_axius_usbdrd30x_fsys0x",
846 "mout_aclk_fsys0_200_user",
847 ENABLE_ACLK_FSYS00, 19, 0, 0),
848 GATE(ACLK_PDMA1, "aclk_pdma1", "mout_aclk_fsys0_200_user",
849 ENABLE_ACLK_FSYS00, 3, 0, 0),
850 GATE(ACLK_PDMA0, "aclk_pdma0", "mout_aclk_fsys0_200_user",
851 ENABLE_ACLK_FSYS00, 4, 0, 0),
852
853 GATE(ACLK_USBDRD300, "aclk_usbdrd300", "mout_aclk_fsys0_200_user",
854 ENABLE_ACLK_FSYS01, 29, 0, 0),
672 GATE(ACLK_MMC2, "aclk_mmc2", "mout_aclk_fsys0_200_user", 855 GATE(ACLK_MMC2, "aclk_mmc2", "mout_aclk_fsys0_200_user",
673 ENABLE_ACLK_FSYS01, 31, 0, 0), 856 ENABLE_ACLK_FSYS01, 31, 0, 0),
857
858 GATE(SCLK_USBDRD300_SUSPENDCLK, "sclk_usbdrd300_suspendclk",
859 "mout_sclk_usbdrd300_user",
860 ENABLE_SCLK_FSYS01, 4, 0, 0),
861 GATE(SCLK_USBDRD300_REFCLK, "sclk_usbdrd300_refclk", "fin_pll",
862 ENABLE_SCLK_FSYS01, 8, 0, 0),
863
864 GATE(PHYCLK_USBDRD300_UDRD30_PIPE_PCLK_USER,
865 "phyclk_usbdrd300_udrd30_pipe_pclk_user",
866 "mout_phyclk_usbdrd300_udrd30_pipe_pclk_user",
867 ENABLE_SCLK_FSYS02, 24, 0, 0),
868 GATE(PHYCLK_USBDRD300_UDRD30_PHYCLK_USER,
869 "phyclk_usbdrd300_udrd30_phyclk_user",
870 "mout_phyclk_usbdrd300_udrd30_phyclk_user",
871 ENABLE_SCLK_FSYS02, 28, 0, 0),
872
873 GATE(OSCCLK_PHY_CLKOUT_USB30_PHY, "oscclk_phy_clkout_usb30_phy",
874 "fin_pll",
875 ENABLE_SCLK_FSYS04, 28, 0, 0),
674}; 876};
675 877
676static struct samsung_cmu_info fsys0_cmu_info __initdata = { 878static struct samsung_cmu_info fsys0_cmu_info __initdata = {
@@ -741,3 +943,205 @@ static void __init exynos7_clk_fsys1_init(struct device_node *np)
741 943
742CLK_OF_DECLARE(exynos7_clk_fsys1, "samsung,exynos7-clock-fsys1", 944CLK_OF_DECLARE(exynos7_clk_fsys1, "samsung,exynos7-clock-fsys1",
743 exynos7_clk_fsys1_init); 945 exynos7_clk_fsys1_init);
946
947#define MUX_SEL_MSCL 0x0200
948#define DIV_MSCL 0x0600
949#define ENABLE_ACLK_MSCL 0x0800
950#define ENABLE_PCLK_MSCL 0x0900
951
952/* List of parent clocks for Muxes in CMU_MSCL */
953PNAME(mout_aclk_mscl_532_user_p) = { "fin_pll", "aclk_mscl_532" };
954
955static unsigned long mscl_clk_regs[] __initdata = {
956 MUX_SEL_MSCL,
957 DIV_MSCL,
958 ENABLE_ACLK_MSCL,
959 ENABLE_PCLK_MSCL,
960};
961
962static struct samsung_mux_clock mscl_mux_clks[] __initdata = {
963 MUX(USERMUX_ACLK_MSCL_532, "usermux_aclk_mscl_532",
964 mout_aclk_mscl_532_user_p, MUX_SEL_MSCL, 0, 1),
965};
966static struct samsung_div_clock mscl_div_clks[] __initdata = {
967 DIV(DOUT_PCLK_MSCL, "dout_pclk_mscl", "usermux_aclk_mscl_532",
968 DIV_MSCL, 0, 3),
969};
970static struct samsung_gate_clock mscl_gate_clks[] __initdata = {
971
972 GATE(ACLK_MSCL_0, "aclk_mscl_0", "usermux_aclk_mscl_532",
973 ENABLE_ACLK_MSCL, 31, 0, 0),
974 GATE(ACLK_MSCL_1, "aclk_mscl_1", "usermux_aclk_mscl_532",
975 ENABLE_ACLK_MSCL, 30, 0, 0),
976 GATE(ACLK_JPEG, "aclk_jpeg", "usermux_aclk_mscl_532",
977 ENABLE_ACLK_MSCL, 29, 0, 0),
978 GATE(ACLK_G2D, "aclk_g2d", "usermux_aclk_mscl_532",
979 ENABLE_ACLK_MSCL, 28, 0, 0),
980 GATE(ACLK_LH_ASYNC_SI_MSCL_0, "aclk_lh_async_si_mscl_0",
981 "usermux_aclk_mscl_532",
982 ENABLE_ACLK_MSCL, 27, 0, 0),
983 GATE(ACLK_LH_ASYNC_SI_MSCL_1, "aclk_lh_async_si_mscl_1",
984 "usermux_aclk_mscl_532",
985 ENABLE_ACLK_MSCL, 26, 0, 0),
986 GATE(ACLK_XIU_MSCLX_0, "aclk_xiu_msclx_0", "usermux_aclk_mscl_532",
987 ENABLE_ACLK_MSCL, 25, 0, 0),
988 GATE(ACLK_XIU_MSCLX_1, "aclk_xiu_msclx_1", "usermux_aclk_mscl_532",
989 ENABLE_ACLK_MSCL, 24, 0, 0),
990 GATE(ACLK_AXI2ACEL_BRIDGE, "aclk_axi2acel_bridge",
991 "usermux_aclk_mscl_532",
992 ENABLE_ACLK_MSCL, 23, 0, 0),
993 GATE(ACLK_QE_MSCL_0, "aclk_qe_mscl_0", "usermux_aclk_mscl_532",
994 ENABLE_ACLK_MSCL, 22, 0, 0),
995 GATE(ACLK_QE_MSCL_1, "aclk_qe_mscl_1", "usermux_aclk_mscl_532",
996 ENABLE_ACLK_MSCL, 21, 0, 0),
997 GATE(ACLK_QE_JPEG, "aclk_qe_jpeg", "usermux_aclk_mscl_532",
998 ENABLE_ACLK_MSCL, 20, 0, 0),
999 GATE(ACLK_QE_G2D, "aclk_qe_g2d", "usermux_aclk_mscl_532",
1000 ENABLE_ACLK_MSCL, 19, 0, 0),
1001 GATE(ACLK_PPMU_MSCL_0, "aclk_ppmu_mscl_0", "usermux_aclk_mscl_532",
1002 ENABLE_ACLK_MSCL, 18, 0, 0),
1003 GATE(ACLK_PPMU_MSCL_1, "aclk_ppmu_mscl_1", "usermux_aclk_mscl_532",
1004 ENABLE_ACLK_MSCL, 17, 0, 0),
1005 GATE(ACLK_MSCLNP_133, "aclk_msclnp_133", "usermux_aclk_mscl_532",
1006 ENABLE_ACLK_MSCL, 16, 0, 0),
1007 GATE(ACLK_AHB2APB_MSCL0P, "aclk_ahb2apb_mscl0p",
1008 "usermux_aclk_mscl_532",
1009 ENABLE_ACLK_MSCL, 15, 0, 0),
1010 GATE(ACLK_AHB2APB_MSCL1P, "aclk_ahb2apb_mscl1p",
1011 "usermux_aclk_mscl_532",
1012 ENABLE_ACLK_MSCL, 14, 0, 0),
1013
1014 GATE(PCLK_MSCL_0, "pclk_mscl_0", "dout_pclk_mscl",
1015 ENABLE_PCLK_MSCL, 31, 0, 0),
1016 GATE(PCLK_MSCL_1, "pclk_mscl_1", "dout_pclk_mscl",
1017 ENABLE_PCLK_MSCL, 30, 0, 0),
1018 GATE(PCLK_JPEG, "pclk_jpeg", "dout_pclk_mscl",
1019 ENABLE_PCLK_MSCL, 29, 0, 0),
1020 GATE(PCLK_G2D, "pclk_g2d", "dout_pclk_mscl",
1021 ENABLE_PCLK_MSCL, 28, 0, 0),
1022 GATE(PCLK_QE_MSCL_0, "pclk_qe_mscl_0", "dout_pclk_mscl",
1023 ENABLE_PCLK_MSCL, 27, 0, 0),
1024 GATE(PCLK_QE_MSCL_1, "pclk_qe_mscl_1", "dout_pclk_mscl",
1025 ENABLE_PCLK_MSCL, 26, 0, 0),
1026 GATE(PCLK_QE_JPEG, "pclk_qe_jpeg", "dout_pclk_mscl",
1027 ENABLE_PCLK_MSCL, 25, 0, 0),
1028 GATE(PCLK_QE_G2D, "pclk_qe_g2d", "dout_pclk_mscl",
1029 ENABLE_PCLK_MSCL, 24, 0, 0),
1030 GATE(PCLK_PPMU_MSCL_0, "pclk_ppmu_mscl_0", "dout_pclk_mscl",
1031 ENABLE_PCLK_MSCL, 23, 0, 0),
1032 GATE(PCLK_PPMU_MSCL_1, "pclk_ppmu_mscl_1", "dout_pclk_mscl",
1033 ENABLE_PCLK_MSCL, 22, 0, 0),
1034 GATE(PCLK_AXI2ACEL_BRIDGE, "pclk_axi2acel_bridge", "dout_pclk_mscl",
1035 ENABLE_PCLK_MSCL, 21, 0, 0),
1036 GATE(PCLK_PMU_MSCL, "pclk_pmu_mscl", "dout_pclk_mscl",
1037 ENABLE_PCLK_MSCL, 20, 0, 0),
1038};
1039
1040static struct samsung_cmu_info mscl_cmu_info __initdata = {
1041 .mux_clks = mscl_mux_clks,
1042 .nr_mux_clks = ARRAY_SIZE(mscl_mux_clks),
1043 .div_clks = mscl_div_clks,
1044 .nr_div_clks = ARRAY_SIZE(mscl_div_clks),
1045 .gate_clks = mscl_gate_clks,
1046 .nr_gate_clks = ARRAY_SIZE(mscl_gate_clks),
1047 .nr_clk_ids = MSCL_NR_CLK,
1048 .clk_regs = mscl_clk_regs,
1049 .nr_clk_regs = ARRAY_SIZE(mscl_clk_regs),
1050};
1051
1052static void __init exynos7_clk_mscl_init(struct device_node *np)
1053{
1054 samsung_cmu_register_one(np, &mscl_cmu_info);
1055}
1056
1057CLK_OF_DECLARE(exynos7_clk_mscl, "samsung,exynos7-clock-mscl",
1058 exynos7_clk_mscl_init);
1059
1060/* Register Offset definitions for CMU_AUD (0x114C0000) */
1061#define MUX_SEL_AUD 0x0200
1062#define DIV_AUD0 0x0600
1063#define DIV_AUD1 0x0604
1064#define ENABLE_ACLK_AUD 0x0800
1065#define ENABLE_PCLK_AUD 0x0900
1066#define ENABLE_SCLK_AUD 0x0A00
1067
1068/*
1069 * List of parent clocks for Muxes in CMU_AUD
1070 */
1071PNAME(mout_aud_pll_user_p) = { "fin_pll", "fout_aud_pll" };
1072PNAME(mout_aud_group_p) = { "dout_aud_cdclk", "ioclk_audiocdclk0" };
1073
1074static unsigned long aud_clk_regs[] __initdata = {
1075 MUX_SEL_AUD,
1076 DIV_AUD0,
1077 DIV_AUD1,
1078 ENABLE_ACLK_AUD,
1079 ENABLE_PCLK_AUD,
1080 ENABLE_SCLK_AUD,
1081};
1082
1083static struct samsung_mux_clock aud_mux_clks[] __initdata = {
1084 MUX(0, "mout_sclk_i2s", mout_aud_group_p, MUX_SEL_AUD, 12, 1),
1085 MUX(0, "mout_sclk_pcm", mout_aud_group_p, MUX_SEL_AUD, 16, 1),
1086 MUX(0, "mout_aud_pll_user", mout_aud_pll_user_p, MUX_SEL_AUD, 20, 1),
1087};
1088
1089static struct samsung_div_clock aud_div_clks[] __initdata = {
1090 DIV(0, "dout_aud_ca5", "mout_aud_pll_user", DIV_AUD0, 0, 4),
1091 DIV(0, "dout_aclk_aud", "dout_aud_ca5", DIV_AUD0, 4, 4),
1092 DIV(0, "dout_aud_pclk_dbg", "dout_aud_ca5", DIV_AUD0, 8, 4),
1093
1094 DIV(0, "dout_sclk_i2s", "mout_sclk_i2s", DIV_AUD1, 0, 4),
1095 DIV(0, "dout_sclk_pcm", "mout_sclk_pcm", DIV_AUD1, 4, 8),
1096 DIV(0, "dout_sclk_uart", "dout_aud_cdclk", DIV_AUD1, 12, 4),
1097 DIV(0, "dout_sclk_slimbus", "dout_aud_cdclk", DIV_AUD1, 16, 5),
1098 DIV(0, "dout_aud_cdclk", "mout_aud_pll_user", DIV_AUD1, 24, 4),
1099};
1100
1101static struct samsung_gate_clock aud_gate_clks[] __initdata = {
1102 GATE(SCLK_PCM, "sclk_pcm", "dout_sclk_pcm",
1103 ENABLE_SCLK_AUD, 27, CLK_SET_RATE_PARENT, 0),
1104 GATE(SCLK_I2S, "sclk_i2s", "dout_sclk_i2s",
1105 ENABLE_SCLK_AUD, 28, CLK_SET_RATE_PARENT, 0),
1106 GATE(0, "sclk_uart", "dout_sclk_uart", ENABLE_SCLK_AUD, 29, 0, 0),
1107 GATE(0, "sclk_slimbus", "dout_sclk_slimbus",
1108 ENABLE_SCLK_AUD, 30, 0, 0),
1109
1110 GATE(0, "pclk_dbg_aud", "dout_aud_pclk_dbg", ENABLE_PCLK_AUD, 19, 0, 0),
1111 GATE(0, "pclk_gpio_aud", "dout_aclk_aud", ENABLE_PCLK_AUD, 20, 0, 0),
1112 GATE(0, "pclk_wdt1", "dout_aclk_aud", ENABLE_PCLK_AUD, 22, 0, 0),
1113 GATE(0, "pclk_wdt0", "dout_aclk_aud", ENABLE_PCLK_AUD, 23, 0, 0),
1114 GATE(0, "pclk_slimbus", "dout_aclk_aud", ENABLE_PCLK_AUD, 24, 0, 0),
1115 GATE(0, "pclk_uart", "dout_aclk_aud", ENABLE_PCLK_AUD, 25, 0, 0),
1116 GATE(PCLK_PCM, "pclk_pcm", "dout_aclk_aud",
1117 ENABLE_PCLK_AUD, 26, CLK_SET_RATE_PARENT, 0),
1118 GATE(PCLK_I2S, "pclk_i2s", "dout_aclk_aud",
1119 ENABLE_PCLK_AUD, 27, CLK_SET_RATE_PARENT, 0),
1120 GATE(0, "pclk_timer", "dout_aclk_aud", ENABLE_PCLK_AUD, 28, 0, 0),
1121 GATE(0, "pclk_smmu_aud", "dout_aclk_aud", ENABLE_PCLK_AUD, 31, 0, 0),
1122
1123 GATE(0, "aclk_smmu_aud", "dout_aclk_aud", ENABLE_ACLK_AUD, 27, 0, 0),
1124 GATE(0, "aclk_acel_lh_async_si_top", "dout_aclk_aud",
1125 ENABLE_ACLK_AUD, 28, 0, 0),
1126 GATE(ACLK_ADMA, "aclk_dmac", "dout_aclk_aud", ENABLE_ACLK_AUD, 31, 0, 0),
1127};
1128
1129static struct samsung_cmu_info aud_cmu_info __initdata = {
1130 .mux_clks = aud_mux_clks,
1131 .nr_mux_clks = ARRAY_SIZE(aud_mux_clks),
1132 .div_clks = aud_div_clks,
1133 .nr_div_clks = ARRAY_SIZE(aud_div_clks),
1134 .gate_clks = aud_gate_clks,
1135 .nr_gate_clks = ARRAY_SIZE(aud_gate_clks),
1136 .nr_clk_ids = AUD_NR_CLK,
1137 .clk_regs = aud_clk_regs,
1138 .nr_clk_regs = ARRAY_SIZE(aud_clk_regs),
1139};
1140
1141static void __init exynos7_clk_aud_init(struct device_node *np)
1142{
1143 samsung_cmu_register_one(np, &aud_cmu_info);
1144}
1145
1146CLK_OF_DECLARE(exynos7_clk_aud, "samsung,exynos7-clock-aud",
1147 exynos7_clk_aud_init);
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index 4bda54095a16..9e1f88c04fd4 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -374,19 +374,24 @@ static void samsung_clk_sleep_init(void __iomem *reg_base,
374 * Common function which registers plls, muxes, dividers and gates 374 * Common function which registers plls, muxes, dividers and gates
375 * for each CMU. It also add CMU register list to register cache. 375 * for each CMU. It also add CMU register list to register cache.
376 */ 376 */
377void __init samsung_cmu_register_one(struct device_node *np, 377struct samsung_clk_provider * __init samsung_cmu_register_one(
378 struct device_node *np,
378 struct samsung_cmu_info *cmu) 379 struct samsung_cmu_info *cmu)
379{ 380{
380 void __iomem *reg_base; 381 void __iomem *reg_base;
381 struct samsung_clk_provider *ctx; 382 struct samsung_clk_provider *ctx;
382 383
383 reg_base = of_iomap(np, 0); 384 reg_base = of_iomap(np, 0);
384 if (!reg_base) 385 if (!reg_base) {
385 panic("%s: failed to map registers\n", __func__); 386 panic("%s: failed to map registers\n", __func__);
387 return NULL;
388 }
386 389
387 ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids); 390 ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
388 if (!ctx) 391 if (!ctx) {
389 panic("%s: unable to alllocate ctx\n", __func__); 392 panic("%s: unable to alllocate ctx\n", __func__);
393 return ctx;
394 }
390 395
391 if (cmu->pll_clks) 396 if (cmu->pll_clks)
392 samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks, 397 samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks,
@@ -410,4 +415,6 @@ void __init samsung_cmu_register_one(struct device_node *np,
410 cmu->nr_clk_regs); 415 cmu->nr_clk_regs);
411 416
412 samsung_clk_of_add_provider(np, ctx); 417 samsung_clk_of_add_provider(np, ctx);
418
419 return ctx;
413} 420}
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index 8acabe1f32c4..e4c75383cea7 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -392,7 +392,8 @@ extern void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
392 struct samsung_pll_clock *pll_list, 392 struct samsung_pll_clock *pll_list,
393 unsigned int nr_clk, void __iomem *base); 393 unsigned int nr_clk, void __iomem *base);
394 394
395extern void __init samsung_cmu_register_one(struct device_node *, 395extern struct samsung_clk_provider __init *samsung_cmu_register_one(
396 struct device_node *,
396 struct samsung_cmu_info *); 397 struct samsung_cmu_info *);
397 398
398extern unsigned long _get_rate(const char *clk_name); 399extern unsigned long _get_rate(const char *clk_name);
diff --git a/drivers/clk/shmobile/Makefile b/drivers/clk/shmobile/Makefile
index f83980f2b956..0689d7fb2666 100644
--- a/drivers/clk/shmobile/Makefile
+++ b/drivers/clk/shmobile/Makefile
@@ -1,9 +1,11 @@
1obj-$(CONFIG_ARCH_EMEV2) += clk-emev2.o 1obj-$(CONFIG_ARCH_EMEV2) += clk-emev2.o
2obj-$(CONFIG_ARCH_R7S72100) += clk-rz.o 2obj-$(CONFIG_ARCH_R7S72100) += clk-rz.o
3obj-$(CONFIG_ARCH_R8A73A4) += clk-r8a73a4.o
3obj-$(CONFIG_ARCH_R8A7740) += clk-r8a7740.o 4obj-$(CONFIG_ARCH_R8A7740) += clk-r8a7740.o
4obj-$(CONFIG_ARCH_R8A7779) += clk-r8a7779.o 5obj-$(CONFIG_ARCH_R8A7779) += clk-r8a7779.o
5obj-$(CONFIG_ARCH_R8A7790) += clk-rcar-gen2.o 6obj-$(CONFIG_ARCH_R8A7790) += clk-rcar-gen2.o
6obj-$(CONFIG_ARCH_R8A7791) += clk-rcar-gen2.o 7obj-$(CONFIG_ARCH_R8A7791) += clk-rcar-gen2.o
8obj-$(CONFIG_ARCH_R8A7793) += clk-rcar-gen2.o
7obj-$(CONFIG_ARCH_R8A7794) += clk-rcar-gen2.o 9obj-$(CONFIG_ARCH_R8A7794) += clk-rcar-gen2.o
8obj-$(CONFIG_ARCH_SH73A0) += clk-sh73a0.o 10obj-$(CONFIG_ARCH_SH73A0) += clk-sh73a0.o
9obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += clk-div6.o 11obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += clk-div6.o
diff --git a/drivers/clk/shmobile/clk-div6.c b/drivers/clk/shmobile/clk-div6.c
index 639241e31e03..036a692c7219 100644
--- a/drivers/clk/shmobile/clk-div6.c
+++ b/drivers/clk/shmobile/clk-div6.c
@@ -54,12 +54,19 @@ static int cpg_div6_clock_enable(struct clk_hw *hw)
54static void cpg_div6_clock_disable(struct clk_hw *hw) 54static void cpg_div6_clock_disable(struct clk_hw *hw)
55{ 55{
56 struct div6_clock *clock = to_div6_clock(hw); 56 struct div6_clock *clock = to_div6_clock(hw);
57 u32 val;
57 58
58 /* DIV6 clocks require the divisor field to be non-zero when stopping 59 val = clk_readl(clock->reg);
59 * the clock. 60 val |= CPG_DIV6_CKSTP;
61 /*
62 * DIV6 clocks require the divisor field to be non-zero when stopping
63 * the clock. However, some clocks (e.g. ZB on sh73a0) fail to be
64 * re-enabled later if the divisor field is changed when stopping the
65 * clock
60 */ 66 */
61 clk_writel(clk_readl(clock->reg) | CPG_DIV6_CKSTP | CPG_DIV6_DIV_MASK, 67 if (!(val & CPG_DIV6_DIV_MASK))
62 clock->reg); 68 val |= CPG_DIV6_DIV_MASK;
69 clk_writel(val, clock->reg);
63} 70}
64 71
65static int cpg_div6_clock_is_enabled(struct clk_hw *hw) 72static int cpg_div6_clock_is_enabled(struct clk_hw *hw)
@@ -83,6 +90,9 @@ static unsigned int cpg_div6_clock_calc_div(unsigned long rate,
83{ 90{
84 unsigned int div; 91 unsigned int div;
85 92
93 if (!rate)
94 rate = 1;
95
86 div = DIV_ROUND_CLOSEST(parent_rate, rate); 96 div = DIV_ROUND_CLOSEST(parent_rate, rate);
87 return clamp_t(unsigned int, div, 1, 64); 97 return clamp_t(unsigned int, div, 1, 64);
88} 98}
diff --git a/drivers/clk/shmobile/clk-r8a73a4.c b/drivers/clk/shmobile/clk-r8a73a4.c
new file mode 100644
index 000000000000..29b9a0b0012a
--- /dev/null
+++ b/drivers/clk/shmobile/clk-r8a73a4.c
@@ -0,0 +1,241 @@
1/*
2 * r8a73a4 Core CPG Clocks
3 *
4 * Copyright (C) 2014 Ulrich Hecht
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 */
10
11#include <linux/clk-provider.h>
12#include <linux/clkdev.h>
13#include <linux/clk/shmobile.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/of.h>
17#include <linux/of_address.h>
18#include <linux/spinlock.h>
19
20struct r8a73a4_cpg {
21 struct clk_onecell_data data;
22 spinlock_t lock;
23 void __iomem *reg;
24};
25
26#define CPG_CKSCR 0xc0
27#define CPG_FRQCRA 0x00
28#define CPG_FRQCRB 0x04
29#define CPG_FRQCRC 0xe0
30#define CPG_PLL0CR 0xd8
31#define CPG_PLL1CR 0x28
32#define CPG_PLL2CR 0x2c
33#define CPG_PLL2HCR 0xe4
34#define CPG_PLL2SCR 0xf4
35
36#define CLK_ENABLE_ON_INIT BIT(0)
37
38struct div4_clk {
39 const char *name;
40 unsigned int reg;
41 unsigned int shift;
42};
43
44static struct div4_clk div4_clks[] = {
45 { "i", CPG_FRQCRA, 20 },
46 { "m3", CPG_FRQCRA, 12 },
47 { "b", CPG_FRQCRA, 8 },
48 { "m1", CPG_FRQCRA, 4 },
49 { "m2", CPG_FRQCRA, 0 },
50 { "zx", CPG_FRQCRB, 12 },
51 { "zs", CPG_FRQCRB, 8 },
52 { "hp", CPG_FRQCRB, 4 },
53 { NULL, 0, 0 },
54};
55
56static const struct clk_div_table div4_div_table[] = {
57 { 0, 2 }, { 1, 3 }, { 2, 4 }, { 3, 6 }, { 4, 8 }, { 5, 12 },
58 { 6, 16 }, { 7, 18 }, { 8, 24 }, { 10, 36 }, { 11, 48 },
59 { 12, 10 }, { 0, 0 }
60};
61
62static struct clk * __init
63r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
64 const char *name)
65{
66 const struct clk_div_table *table = NULL;
67 const char *parent_name;
68 unsigned int shift, reg;
69 unsigned int mult = 1;
70 unsigned int div = 1;
71
72
73 if (!strcmp(name, "main")) {
74 u32 ckscr = clk_readl(cpg->reg + CPG_CKSCR);
75
76 switch ((ckscr >> 28) & 3) {
77 case 0: /* extal1 */
78 parent_name = of_clk_get_parent_name(np, 0);
79 break;
80 case 1: /* extal1 / 2 */
81 parent_name = of_clk_get_parent_name(np, 0);
82 div = 2;
83 break;
84 case 2: /* extal2 */
85 parent_name = of_clk_get_parent_name(np, 1);
86 break;
87 case 3: /* extal2 / 2 */
88 parent_name = of_clk_get_parent_name(np, 1);
89 div = 2;
90 break;
91 }
92 } else if (!strcmp(name, "pll0")) {
93 /* PLL0/1 are configurable multiplier clocks. Register them as
94 * fixed factor clocks for now as there's no generic multiplier
95 * clock implementation and we currently have no need to change
96 * the multiplier value.
97 */
98 u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
99
100 parent_name = "main";
101 mult = ((value >> 24) & 0x7f) + 1;
102 if (value & BIT(20))
103 div = 2;
104 } else if (!strcmp(name, "pll1")) {
105 u32 value = clk_readl(cpg->reg + CPG_PLL1CR);
106
107 parent_name = "main";
108 /* XXX: enable bit? */
109 mult = ((value >> 24) & 0x7f) + 1;
110 if (value & BIT(7))
111 div = 2;
112 } else if (!strncmp(name, "pll2", 4)) {
113 u32 value, cr;
114
115 switch (name[4]) {
116 case 0:
117 cr = CPG_PLL2CR;
118 break;
119 case 's':
120 cr = CPG_PLL2SCR;
121 break;
122 case 'h':
123 cr = CPG_PLL2HCR;
124 break;
125 default:
126 return ERR_PTR(-EINVAL);
127 }
128 value = clk_readl(cpg->reg + cr);
129 switch ((value >> 5) & 7) {
130 case 0:
131 parent_name = "main";
132 div = 2;
133 break;
134 case 1:
135 parent_name = "extal2";
136 div = 2;
137 break;
138 case 3:
139 parent_name = "extal2";
140 div = 4;
141 break;
142 case 4:
143 parent_name = "main";
144 break;
145 case 5:
146 parent_name = "extal2";
147 break;
148 default:
149 pr_warn("%s: unexpected parent of %s\n", __func__,
150 name);
151 return ERR_PTR(-EINVAL);
152 }
153 /* XXX: enable bit? */
154 mult = ((value >> 24) & 0x7f) + 1;
155 } else if (!strcmp(name, "z") || !strcmp(name, "z2")) {
156 u32 shift = 8;
157
158 parent_name = "pll0";
159 if (name[1] == '2') {
160 div = 2;
161 shift = 0;
162 }
163 div *= 32;
164 mult = 0x20 - ((clk_readl(cpg->reg + CPG_FRQCRC) >> shift)
165 & 0x1f);
166 } else {
167 struct div4_clk *c;
168
169 for (c = div4_clks; c->name; c++) {
170 if (!strcmp(name, c->name))
171 break;
172 }
173 if (!c->name)
174 return ERR_PTR(-EINVAL);
175
176 parent_name = "pll1";
177 table = div4_div_table;
178 reg = c->reg;
179 shift = c->shift;
180 }
181
182 if (!table) {
183 return clk_register_fixed_factor(NULL, name, parent_name, 0,
184 mult, div);
185 } else {
186 return clk_register_divider_table(NULL, name, parent_name, 0,
187 cpg->reg + reg, shift, 4, 0,
188 table, &cpg->lock);
189 }
190}
191
192static void __init r8a73a4_cpg_clocks_init(struct device_node *np)
193{
194 struct r8a73a4_cpg *cpg;
195 struct clk **clks;
196 unsigned int i;
197 int num_clks;
198
199 num_clks = of_property_count_strings(np, "clock-output-names");
200 if (num_clks < 0) {
201 pr_err("%s: failed to count clocks\n", __func__);
202 return;
203 }
204
205 cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
206 clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL);
207 if (cpg == NULL || clks == NULL) {
208 /* We're leaking memory on purpose, there's no point in cleaning
209 * up as the system won't boot anyway.
210 */
211 return;
212 }
213
214 spin_lock_init(&cpg->lock);
215
216 cpg->data.clks = clks;
217 cpg->data.clk_num = num_clks;
218
219 cpg->reg = of_iomap(np, 0);
220 if (WARN_ON(cpg->reg == NULL))
221 return;
222
223 for (i = 0; i < num_clks; ++i) {
224 const char *name;
225 struct clk *clk;
226
227 of_property_read_string_index(np, "clock-output-names", i,
228 &name);
229
230 clk = r8a73a4_cpg_register_clock(np, cpg, name);
231 if (IS_ERR(clk))
232 pr_err("%s: failed to register %s %s clock (%ld)\n",
233 __func__, np->name, name, PTR_ERR(clk));
234 else
235 cpg->data.clks[i] = clk;
236 }
237
238 of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
239}
240CLK_OF_DECLARE(r8a73a4_cpg_clks, "renesas,r8a73a4-cpg-clocks",
241 r8a73a4_cpg_clocks_init);
diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/shmobile/clk-rcar-gen2.c
index e996425d06a9..acfb6d7dbd6b 100644
--- a/drivers/clk/shmobile/clk-rcar-gen2.c
+++ b/drivers/clk/shmobile/clk-rcar-gen2.c
@@ -33,6 +33,8 @@ struct rcar_gen2_cpg {
33#define CPG_FRQCRC 0x000000e0 33#define CPG_FRQCRC 0x000000e0
34#define CPG_FRQCRC_ZFC_MASK (0x1f << 8) 34#define CPG_FRQCRC_ZFC_MASK (0x1f << 8)
35#define CPG_FRQCRC_ZFC_SHIFT 8 35#define CPG_FRQCRC_ZFC_SHIFT 8
36#define CPG_ADSPCKCR 0x0000025c
37#define CPG_RCANCKCR 0x00000270
36 38
37/* ----------------------------------------------------------------------------- 39/* -----------------------------------------------------------------------------
38 * Z Clock 40 * Z Clock
@@ -161,6 +163,88 @@ static struct clk * __init cpg_z_clk_register(struct rcar_gen2_cpg *cpg)
161 return clk; 163 return clk;
162} 164}
163 165
166static struct clk * __init cpg_rcan_clk_register(struct rcar_gen2_cpg *cpg,
167 struct device_node *np)
168{
169 const char *parent_name = of_clk_get_parent_name(np, 1);
170 struct clk_fixed_factor *fixed;
171 struct clk_gate *gate;
172 struct clk *clk;
173
174 fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
175 if (!fixed)
176 return ERR_PTR(-ENOMEM);
177
178 fixed->mult = 1;
179 fixed->div = 6;
180
181 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
182 if (!gate) {
183 kfree(fixed);
184 return ERR_PTR(-ENOMEM);
185 }
186
187 gate->reg = cpg->reg + CPG_RCANCKCR;
188 gate->bit_idx = 8;
189 gate->flags = CLK_GATE_SET_TO_DISABLE;
190 gate->lock = &cpg->lock;
191
192 clk = clk_register_composite(NULL, "rcan", &parent_name, 1, NULL, NULL,
193 &fixed->hw, &clk_fixed_factor_ops,
194 &gate->hw, &clk_gate_ops, 0);
195 if (IS_ERR(clk)) {
196 kfree(gate);
197 kfree(fixed);
198 }
199
200 return clk;
201}
202
203/* ADSP divisors */
204static const struct clk_div_table cpg_adsp_div_table[] = {
205 { 1, 3 }, { 2, 4 }, { 3, 6 }, { 4, 8 },
206 { 5, 12 }, { 6, 16 }, { 7, 18 }, { 8, 24 },
207 { 10, 36 }, { 11, 48 }, { 0, 0 },
208};
209
210static struct clk * __init cpg_adsp_clk_register(struct rcar_gen2_cpg *cpg)
211{
212 const char *parent_name = "pll1";
213 struct clk_divider *div;
214 struct clk_gate *gate;
215 struct clk *clk;
216
217 div = kzalloc(sizeof(*div), GFP_KERNEL);
218 if (!div)
219 return ERR_PTR(-ENOMEM);
220
221 div->reg = cpg->reg + CPG_ADSPCKCR;
222 div->width = 4;
223 div->table = cpg_adsp_div_table;
224 div->lock = &cpg->lock;
225
226 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
227 if (!gate) {
228 kfree(div);
229 return ERR_PTR(-ENOMEM);
230 }
231
232 gate->reg = cpg->reg + CPG_ADSPCKCR;
233 gate->bit_idx = 8;
234 gate->flags = CLK_GATE_SET_TO_DISABLE;
235 gate->lock = &cpg->lock;
236
237 clk = clk_register_composite(NULL, "adsp", &parent_name, 1, NULL, NULL,
238 &div->hw, &clk_divider_ops,
239 &gate->hw, &clk_gate_ops, 0);
240 if (IS_ERR(clk)) {
241 kfree(gate);
242 kfree(div);
243 }
244
245 return clk;
246}
247
164/* ----------------------------------------------------------------------------- 248/* -----------------------------------------------------------------------------
165 * CPG Clock Data 249 * CPG Clock Data
166 */ 250 */
@@ -263,6 +347,10 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
263 shift = 0; 347 shift = 0;
264 } else if (!strcmp(name, "z")) { 348 } else if (!strcmp(name, "z")) {
265 return cpg_z_clk_register(cpg); 349 return cpg_z_clk_register(cpg);
350 } else if (!strcmp(name, "rcan")) {
351 return cpg_rcan_clk_register(cpg, np);
352 } else if (!strcmp(name, "adsp")) {
353 return cpg_adsp_clk_register(cpg);
266 } else { 354 } else {
267 return ERR_PTR(-EINVAL); 355 return ERR_PTR(-EINVAL);
268 } 356 }
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index 2282cef9f2ff..bf12a25eb3a2 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -37,8 +37,8 @@ static int flexgen_enable(struct clk_hw *hw)
37 struct clk_hw *pgate_hw = &flexgen->pgate.hw; 37 struct clk_hw *pgate_hw = &flexgen->pgate.hw;
38 struct clk_hw *fgate_hw = &flexgen->fgate.hw; 38 struct clk_hw *fgate_hw = &flexgen->fgate.hw;
39 39
40 pgate_hw->clk = hw->clk; 40 __clk_hw_set_clk(pgate_hw, hw);
41 fgate_hw->clk = hw->clk; 41 __clk_hw_set_clk(fgate_hw, hw);
42 42
43 clk_gate_ops.enable(pgate_hw); 43 clk_gate_ops.enable(pgate_hw);
44 44
@@ -54,7 +54,7 @@ static void flexgen_disable(struct clk_hw *hw)
54 struct clk_hw *fgate_hw = &flexgen->fgate.hw; 54 struct clk_hw *fgate_hw = &flexgen->fgate.hw;
55 55
56 /* disable only the final gate */ 56 /* disable only the final gate */
57 fgate_hw->clk = hw->clk; 57 __clk_hw_set_clk(fgate_hw, hw);
58 58
59 clk_gate_ops.disable(fgate_hw); 59 clk_gate_ops.disable(fgate_hw);
60 60
@@ -66,7 +66,7 @@ static int flexgen_is_enabled(struct clk_hw *hw)
66 struct flexgen *flexgen = to_flexgen(hw); 66 struct flexgen *flexgen = to_flexgen(hw);
67 struct clk_hw *fgate_hw = &flexgen->fgate.hw; 67 struct clk_hw *fgate_hw = &flexgen->fgate.hw;
68 68
69 fgate_hw->clk = hw->clk; 69 __clk_hw_set_clk(fgate_hw, hw);
70 70
71 if (!clk_gate_ops.is_enabled(fgate_hw)) 71 if (!clk_gate_ops.is_enabled(fgate_hw))
72 return 0; 72 return 0;
@@ -79,7 +79,7 @@ static u8 flexgen_get_parent(struct clk_hw *hw)
79 struct flexgen *flexgen = to_flexgen(hw); 79 struct flexgen *flexgen = to_flexgen(hw);
80 struct clk_hw *mux_hw = &flexgen->mux.hw; 80 struct clk_hw *mux_hw = &flexgen->mux.hw;
81 81
82 mux_hw->clk = hw->clk; 82 __clk_hw_set_clk(mux_hw, hw);
83 83
84 return clk_mux_ops.get_parent(mux_hw); 84 return clk_mux_ops.get_parent(mux_hw);
85} 85}
@@ -89,7 +89,7 @@ static int flexgen_set_parent(struct clk_hw *hw, u8 index)
89 struct flexgen *flexgen = to_flexgen(hw); 89 struct flexgen *flexgen = to_flexgen(hw);
90 struct clk_hw *mux_hw = &flexgen->mux.hw; 90 struct clk_hw *mux_hw = &flexgen->mux.hw;
91 91
92 mux_hw->clk = hw->clk; 92 __clk_hw_set_clk(mux_hw, hw);
93 93
94 return clk_mux_ops.set_parent(mux_hw, index); 94 return clk_mux_ops.set_parent(mux_hw, index);
95} 95}
@@ -124,8 +124,8 @@ unsigned long flexgen_recalc_rate(struct clk_hw *hw,
124 struct clk_hw *fdiv_hw = &flexgen->fdiv.hw; 124 struct clk_hw *fdiv_hw = &flexgen->fdiv.hw;
125 unsigned long mid_rate; 125 unsigned long mid_rate;
126 126
127 pdiv_hw->clk = hw->clk; 127 __clk_hw_set_clk(pdiv_hw, hw);
128 fdiv_hw->clk = hw->clk; 128 __clk_hw_set_clk(fdiv_hw, hw);
129 129
130 mid_rate = clk_divider_ops.recalc_rate(pdiv_hw, parent_rate); 130 mid_rate = clk_divider_ops.recalc_rate(pdiv_hw, parent_rate);
131 131
@@ -138,16 +138,27 @@ static int flexgen_set_rate(struct clk_hw *hw, unsigned long rate,
138 struct flexgen *flexgen = to_flexgen(hw); 138 struct flexgen *flexgen = to_flexgen(hw);
139 struct clk_hw *pdiv_hw = &flexgen->pdiv.hw; 139 struct clk_hw *pdiv_hw = &flexgen->pdiv.hw;
140 struct clk_hw *fdiv_hw = &flexgen->fdiv.hw; 140 struct clk_hw *fdiv_hw = &flexgen->fdiv.hw;
141 unsigned long primary_div = 0; 141 unsigned long div = 0;
142 int ret = 0; 142 int ret = 0;
143 143
144 pdiv_hw->clk = hw->clk; 144 __clk_hw_set_clk(pdiv_hw, hw);
145 fdiv_hw->clk = hw->clk; 145 __clk_hw_set_clk(fdiv_hw, hw);
146 146
147 primary_div = clk_best_div(parent_rate, rate); 147 div = clk_best_div(parent_rate, rate);
148 148
149 clk_divider_ops.set_rate(fdiv_hw, parent_rate, parent_rate); 149 /*
150 ret = clk_divider_ops.set_rate(pdiv_hw, rate, rate * primary_div); 150 * pdiv is mainly targeted for low freq results, while fdiv
151 * should be used for div <= 64. The other way round can
152 * lead to 'duty cycle' issues.
153 */
154
155 if (div <= 64) {
156 clk_divider_ops.set_rate(pdiv_hw, parent_rate, parent_rate);
157 ret = clk_divider_ops.set_rate(fdiv_hw, rate, rate * div);
158 } else {
159 clk_divider_ops.set_rate(fdiv_hw, parent_rate, parent_rate);
160 ret = clk_divider_ops.set_rate(pdiv_hw, rate, rate * div);
161 }
151 162
152 return ret; 163 return ret;
153} 164}
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index 79dc40b5cc68..9a15ec344a85 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -94,7 +94,7 @@ static int clkgena_divmux_enable(struct clk_hw *hw)
94 unsigned long timeout; 94 unsigned long timeout;
95 int ret = 0; 95 int ret = 0;
96 96
97 mux_hw->clk = hw->clk; 97 __clk_hw_set_clk(mux_hw, hw);
98 98
99 ret = clk_mux_ops.set_parent(mux_hw, genamux->muxsel); 99 ret = clk_mux_ops.set_parent(mux_hw, genamux->muxsel);
100 if (ret) 100 if (ret)
@@ -116,7 +116,7 @@ static void clkgena_divmux_disable(struct clk_hw *hw)
116 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 116 struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
117 struct clk_hw *mux_hw = &genamux->mux.hw; 117 struct clk_hw *mux_hw = &genamux->mux.hw;
118 118
119 mux_hw->clk = hw->clk; 119 __clk_hw_set_clk(mux_hw, hw);
120 120
121 clk_mux_ops.set_parent(mux_hw, CKGAX_CLKOPSRC_SWITCH_OFF); 121 clk_mux_ops.set_parent(mux_hw, CKGAX_CLKOPSRC_SWITCH_OFF);
122} 122}
@@ -126,7 +126,7 @@ static int clkgena_divmux_is_enabled(struct clk_hw *hw)
126 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 126 struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
127 struct clk_hw *mux_hw = &genamux->mux.hw; 127 struct clk_hw *mux_hw = &genamux->mux.hw;
128 128
129 mux_hw->clk = hw->clk; 129 __clk_hw_set_clk(mux_hw, hw);
130 130
131 return (s8)clk_mux_ops.get_parent(mux_hw) > 0; 131 return (s8)clk_mux_ops.get_parent(mux_hw) > 0;
132} 132}
@@ -136,7 +136,7 @@ u8 clkgena_divmux_get_parent(struct clk_hw *hw)
136 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 136 struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
137 struct clk_hw *mux_hw = &genamux->mux.hw; 137 struct clk_hw *mux_hw = &genamux->mux.hw;
138 138
139 mux_hw->clk = hw->clk; 139 __clk_hw_set_clk(mux_hw, hw);
140 140
141 genamux->muxsel = clk_mux_ops.get_parent(mux_hw); 141 genamux->muxsel = clk_mux_ops.get_parent(mux_hw);
142 if ((s8)genamux->muxsel < 0) { 142 if ((s8)genamux->muxsel < 0) {
@@ -174,7 +174,7 @@ unsigned long clkgena_divmux_recalc_rate(struct clk_hw *hw,
174 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 174 struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
175 struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; 175 struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
176 176
177 div_hw->clk = hw->clk; 177 __clk_hw_set_clk(div_hw, hw);
178 178
179 return clk_divider_ops.recalc_rate(div_hw, parent_rate); 179 return clk_divider_ops.recalc_rate(div_hw, parent_rate);
180} 180}
@@ -185,7 +185,7 @@ static int clkgena_divmux_set_rate(struct clk_hw *hw, unsigned long rate,
185 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 185 struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
186 struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; 186 struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
187 187
188 div_hw->clk = hw->clk; 188 __clk_hw_set_clk(div_hw, hw);
189 189
190 return clk_divider_ops.set_rate(div_hw, rate, parent_rate); 190 return clk_divider_ops.set_rate(div_hw, rate, parent_rate);
191} 191}
@@ -196,7 +196,7 @@ static long clkgena_divmux_round_rate(struct clk_hw *hw, unsigned long rate,
196 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 196 struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
197 struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; 197 struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
198 198
199 div_hw->clk = hw->clk; 199 __clk_hw_set_clk(div_hw, hw);
200 200
201 return clk_divider_ops.round_rate(div_hw, rate, prate); 201 return clk_divider_ops.round_rate(div_hw, rate, prate);
202} 202}
diff --git a/drivers/clk/sunxi/Makefile b/drivers/clk/sunxi/Makefile
index a66953c0f430..3a5292e3fcf8 100644
--- a/drivers/clk/sunxi/Makefile
+++ b/drivers/clk/sunxi/Makefile
@@ -8,6 +8,7 @@ obj-y += clk-a20-gmac.o
8obj-y += clk-mod0.o 8obj-y += clk-mod0.o
9obj-y += clk-sun8i-mbus.o 9obj-y += clk-sun8i-mbus.o
10obj-y += clk-sun9i-core.o 10obj-y += clk-sun9i-core.o
11obj-y += clk-sun9i-mmc.o
11 12
12obj-$(CONFIG_MFD_SUN6I_PRCM) += \ 13obj-$(CONFIG_MFD_SUN6I_PRCM) += \
13 clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \ 14 clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \
diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c
index 62e08fb58554..8c20190a3e9f 100644
--- a/drivers/clk/sunxi/clk-factors.c
+++ b/drivers/clk/sunxi/clk-factors.c
@@ -80,6 +80,8 @@ static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
80} 80}
81 81
82static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate, 82static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate,
83 unsigned long min_rate,
84 unsigned long max_rate,
83 unsigned long *best_parent_rate, 85 unsigned long *best_parent_rate,
84 struct clk_hw **best_parent_p) 86 struct clk_hw **best_parent_p)
85{ 87{
@@ -156,9 +158,10 @@ static const struct clk_ops clk_factors_ops = {
156 .set_rate = clk_factors_set_rate, 158 .set_rate = clk_factors_set_rate,
157}; 159};
158 160
159struct clk * __init sunxi_factors_register(struct device_node *node, 161struct clk *sunxi_factors_register(struct device_node *node,
160 const struct factors_data *data, 162 const struct factors_data *data,
161 spinlock_t *lock) 163 spinlock_t *lock,
164 void __iomem *reg)
162{ 165{
163 struct clk *clk; 166 struct clk *clk;
164 struct clk_factors *factors; 167 struct clk_factors *factors;
@@ -168,11 +171,8 @@ struct clk * __init sunxi_factors_register(struct device_node *node,
168 struct clk_hw *mux_hw = NULL; 171 struct clk_hw *mux_hw = NULL;
169 const char *clk_name = node->name; 172 const char *clk_name = node->name;
170 const char *parents[FACTORS_MAX_PARENTS]; 173 const char *parents[FACTORS_MAX_PARENTS];
171 void __iomem *reg;
172 int i = 0; 174 int i = 0;
173 175
174 reg = of_iomap(node, 0);
175
176 /* if we have a mux, we will have >1 parents */ 176 /* if we have a mux, we will have >1 parents */
177 while (i < FACTORS_MAX_PARENTS && 177 while (i < FACTORS_MAX_PARENTS &&
178 (parents[i] = of_clk_get_parent_name(node, i)) != NULL) 178 (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
diff --git a/drivers/clk/sunxi/clk-factors.h b/drivers/clk/sunxi/clk-factors.h
index 912238fde132..171085ab5513 100644
--- a/drivers/clk/sunxi/clk-factors.h
+++ b/drivers/clk/sunxi/clk-factors.h
@@ -36,8 +36,9 @@ struct clk_factors {
36 spinlock_t *lock; 36 spinlock_t *lock;
37}; 37};
38 38
39struct clk * __init sunxi_factors_register(struct device_node *node, 39struct clk *sunxi_factors_register(struct device_node *node,
40 const struct factors_data *data, 40 const struct factors_data *data,
41 spinlock_t *lock); 41 spinlock_t *lock,
42 void __iomem *reg);
42 43
43#endif 44#endif
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index da0524eaee94..ec8f5a1fca09 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -17,6 +17,7 @@
17#include <linux/clk-provider.h> 17#include <linux/clk-provider.h>
18#include <linux/clkdev.h> 18#include <linux/clkdev.h>
19#include <linux/of_address.h> 19#include <linux/of_address.h>
20#include <linux/platform_device.h>
20 21
21#include "clk-factors.h" 22#include "clk-factors.h"
22 23
@@ -67,7 +68,7 @@ static struct clk_factors_config sun4i_a10_mod0_config = {
67 .pwidth = 2, 68 .pwidth = 2,
68}; 69};
69 70
70static const struct factors_data sun4i_a10_mod0_data __initconst = { 71static const struct factors_data sun4i_a10_mod0_data = {
71 .enable = 31, 72 .enable = 31,
72 .mux = 24, 73 .mux = 24,
73 .muxmask = BIT(1) | BIT(0), 74 .muxmask = BIT(1) | BIT(0),
@@ -79,15 +80,95 @@ static DEFINE_SPINLOCK(sun4i_a10_mod0_lock);
79 80
80static void __init sun4i_a10_mod0_setup(struct device_node *node) 81static void __init sun4i_a10_mod0_setup(struct device_node *node)
81{ 82{
82 sunxi_factors_register(node, &sun4i_a10_mod0_data, &sun4i_a10_mod0_lock); 83 void __iomem *reg;
84
85 reg = of_iomap(node, 0);
86 if (!reg) {
87 /*
88 * This happens with mod0 clk nodes instantiated through
89 * mfd, as those do not have their resources assigned at
90 * CLK_OF_DECLARE time yet, so do not print an error.
91 */
92 return;
93 }
94
95 sunxi_factors_register(node, &sun4i_a10_mod0_data,
96 &sun4i_a10_mod0_lock, reg);
83} 97}
84CLK_OF_DECLARE(sun4i_a10_mod0, "allwinner,sun4i-a10-mod0-clk", sun4i_a10_mod0_setup); 98CLK_OF_DECLARE(sun4i_a10_mod0, "allwinner,sun4i-a10-mod0-clk", sun4i_a10_mod0_setup);
85 99
100static int sun4i_a10_mod0_clk_probe(struct platform_device *pdev)
101{
102 struct device_node *np = pdev->dev.of_node;
103 struct resource *r;
104 void __iomem *reg;
105
106 if (!np)
107 return -ENODEV;
108
109 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
110 reg = devm_ioremap_resource(&pdev->dev, r);
111 if (IS_ERR(reg))
112 return PTR_ERR(reg);
113
114 sunxi_factors_register(np, &sun4i_a10_mod0_data,
115 &sun4i_a10_mod0_lock, reg);
116 return 0;
117}
118
119static const struct of_device_id sun4i_a10_mod0_clk_dt_ids[] = {
120 { .compatible = "allwinner,sun4i-a10-mod0-clk" },
121 { /* sentinel */ }
122};
123
124static struct platform_driver sun4i_a10_mod0_clk_driver = {
125 .driver = {
126 .name = "sun4i-a10-mod0-clk",
127 .of_match_table = sun4i_a10_mod0_clk_dt_ids,
128 },
129 .probe = sun4i_a10_mod0_clk_probe,
130};
131module_platform_driver(sun4i_a10_mod0_clk_driver);
132
133static const struct factors_data sun9i_a80_mod0_data __initconst = {
134 .enable = 31,
135 .mux = 24,
136 .muxmask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
137 .table = &sun4i_a10_mod0_config,
138 .getter = sun4i_a10_get_mod0_factors,
139};
140
141static void __init sun9i_a80_mod0_setup(struct device_node *node)
142{
143 void __iomem *reg;
144
145 reg = of_io_request_and_map(node, 0, of_node_full_name(node));
146 if (IS_ERR(reg)) {
147 pr_err("Could not get registers for mod0-clk: %s\n",
148 node->name);
149 return;
150 }
151
152 sunxi_factors_register(node, &sun9i_a80_mod0_data,
153 &sun4i_a10_mod0_lock, reg);
154}
155CLK_OF_DECLARE(sun9i_a80_mod0, "allwinner,sun9i-a80-mod0-clk", sun9i_a80_mod0_setup);
156
86static DEFINE_SPINLOCK(sun5i_a13_mbus_lock); 157static DEFINE_SPINLOCK(sun5i_a13_mbus_lock);
87 158
88static void __init sun5i_a13_mbus_setup(struct device_node *node) 159static void __init sun5i_a13_mbus_setup(struct device_node *node)
89{ 160{
90 struct clk *mbus = sunxi_factors_register(node, &sun4i_a10_mod0_data, &sun5i_a13_mbus_lock); 161 struct clk *mbus;
162 void __iomem *reg;
163
164 reg = of_iomap(node, 0);
165 if (!reg) {
166 pr_err("Could not get registers for a13-mbus-clk\n");
167 return;
168 }
169
170 mbus = sunxi_factors_register(node, &sun4i_a10_mod0_data,
171 &sun5i_a13_mbus_lock, reg);
91 172
92 /* The MBUS clocks needs to be always enabled */ 173 /* The MBUS clocks needs to be always enabled */
93 __clk_get(mbus); 174 __clk_get(mbus);
@@ -95,14 +176,10 @@ static void __init sun5i_a13_mbus_setup(struct device_node *node)
95} 176}
96CLK_OF_DECLARE(sun5i_a13_mbus, "allwinner,sun5i-a13-mbus-clk", sun5i_a13_mbus_setup); 177CLK_OF_DECLARE(sun5i_a13_mbus, "allwinner,sun5i-a13-mbus-clk", sun5i_a13_mbus_setup);
97 178
98struct mmc_phase_data {
99 u8 offset;
100};
101
102struct mmc_phase { 179struct mmc_phase {
103 struct clk_hw hw; 180 struct clk_hw hw;
181 u8 offset;
104 void __iomem *reg; 182 void __iomem *reg;
105 struct mmc_phase_data *data;
106 spinlock_t *lock; 183 spinlock_t *lock;
107}; 184};
108 185
@@ -118,7 +195,7 @@ static int mmc_get_phase(struct clk_hw *hw)
118 u8 delay; 195 u8 delay;
119 196
120 value = readl(phase->reg); 197 value = readl(phase->reg);
121 delay = (value >> phase->data->offset) & 0x3; 198 delay = (value >> phase->offset) & 0x3;
122 199
123 if (!delay) 200 if (!delay)
124 return 180; 201 return 180;
@@ -206,8 +283,8 @@ static int mmc_set_phase(struct clk_hw *hw, int degrees)
206 283
207 spin_lock_irqsave(phase->lock, flags); 284 spin_lock_irqsave(phase->lock, flags);
208 value = readl(phase->reg); 285 value = readl(phase->reg);
209 value &= ~GENMASK(phase->data->offset + 3, phase->data->offset); 286 value &= ~GENMASK(phase->offset + 3, phase->offset);
210 value |= delay << phase->data->offset; 287 value |= delay << phase->offset;
211 writel(value, phase->reg); 288 writel(value, phase->reg);
212 spin_unlock_irqrestore(phase->lock, flags); 289 spin_unlock_irqrestore(phase->lock, flags);
213 290
@@ -219,66 +296,97 @@ static const struct clk_ops mmc_clk_ops = {
219 .set_phase = mmc_set_phase, 296 .set_phase = mmc_set_phase,
220}; 297};
221 298
222static void __init sun4i_a10_mmc_phase_setup(struct device_node *node, 299/*
223 struct mmc_phase_data *data) 300 * sunxi_mmc_setup - Common setup function for mmc module clocks
301 *
302 * The only difference between module clocks on different platforms is the
303 * width of the mux register bits and the valid values, which are passed in
304 * through struct factors_data. The phase clocks parts are identical.
305 */
306static void __init sunxi_mmc_setup(struct device_node *node,
307 const struct factors_data *data,
308 spinlock_t *lock)
224{ 309{
225 const char *parent_names[1] = { of_clk_get_parent_name(node, 0) }; 310 struct clk_onecell_data *clk_data;
226 struct clk_init_data init = { 311 const char *parent;
227 .num_parents = 1, 312 void __iomem *reg;
228 .parent_names = parent_names, 313 int i;
229 .ops = &mmc_clk_ops, 314
230 }; 315 reg = of_io_request_and_map(node, 0, of_node_full_name(node));
231 316 if (IS_ERR(reg)) {
232 struct mmc_phase *phase; 317 pr_err("Couldn't map the %s clock registers\n", node->name);
233 struct clk *clk;
234
235 phase = kmalloc(sizeof(*phase), GFP_KERNEL);
236 if (!phase)
237 return; 318 return;
319 }
238 320
239 phase->hw.init = &init; 321 clk_data = kmalloc(sizeof(*clk_data), GFP_KERNEL);
240 322 if (!clk_data)
241 phase->reg = of_iomap(node, 0); 323 return;
242 if (!phase->reg)
243 goto err_free;
244
245 phase->data = data;
246 phase->lock = &sun4i_a10_mod0_lock;
247
248 if (of_property_read_string(node, "clock-output-names", &init.name))
249 init.name = node->name;
250 324
251 clk = clk_register(NULL, &phase->hw); 325 clk_data->clks = kcalloc(3, sizeof(*clk_data->clks), GFP_KERNEL);
252 if (IS_ERR(clk)) 326 if (!clk_data->clks)
253 goto err_unmap; 327 goto err_free_data;
328
329 clk_data->clk_num = 3;
330 clk_data->clks[0] = sunxi_factors_register(node, data, lock, reg);
331 if (!clk_data->clks[0])
332 goto err_free_clks;
333
334 parent = __clk_get_name(clk_data->clks[0]);
335
336 for (i = 1; i < 3; i++) {
337 struct clk_init_data init = {
338 .num_parents = 1,
339 .parent_names = &parent,
340 .ops = &mmc_clk_ops,
341 };
342 struct mmc_phase *phase;
343
344 phase = kmalloc(sizeof(*phase), GFP_KERNEL);
345 if (!phase)
346 continue;
347
348 phase->hw.init = &init;
349 phase->reg = reg;
350 phase->lock = lock;
351
352 if (i == 1)
353 phase->offset = 8;
354 else
355 phase->offset = 20;
356
357 if (of_property_read_string_index(node, "clock-output-names",
358 i, &init.name))
359 init.name = node->name;
360
361 clk_data->clks[i] = clk_register(NULL, &phase->hw);
362 if (IS_ERR(clk_data->clks[i])) {
363 kfree(phase);
364 continue;
365 }
366 }
254 367
255 of_clk_add_provider(node, of_clk_src_simple_get, clk); 368 of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
256 369
257 return; 370 return;
258 371
259err_unmap: 372err_free_clks:
260 iounmap(phase->reg); 373 kfree(clk_data->clks);
261err_free: 374err_free_data:
262 kfree(phase); 375 kfree(clk_data);
263} 376}
264 377
378static DEFINE_SPINLOCK(sun4i_a10_mmc_lock);
265 379
266static struct mmc_phase_data mmc_output_clk = { 380static void __init sun4i_a10_mmc_setup(struct device_node *node)
267 .offset = 8,
268};
269
270static struct mmc_phase_data mmc_sample_clk = {
271 .offset = 20,
272};
273
274static void __init sun4i_a10_mmc_output_setup(struct device_node *node)
275{ 381{
276 sun4i_a10_mmc_phase_setup(node, &mmc_output_clk); 382 sunxi_mmc_setup(node, &sun4i_a10_mod0_data, &sun4i_a10_mmc_lock);
277} 383}
278CLK_OF_DECLARE(sun4i_a10_mmc_output, "allwinner,sun4i-a10-mmc-output-clk", sun4i_a10_mmc_output_setup); 384CLK_OF_DECLARE(sun4i_a10_mmc, "allwinner,sun4i-a10-mmc-clk", sun4i_a10_mmc_setup);
385
386static DEFINE_SPINLOCK(sun9i_a80_mmc_lock);
279 387
280static void __init sun4i_a10_mmc_sample_setup(struct device_node *node) 388static void __init sun9i_a80_mmc_setup(struct device_node *node)
281{ 389{
282 sun4i_a10_mmc_phase_setup(node, &mmc_sample_clk); 390 sunxi_mmc_setup(node, &sun9i_a80_mod0_data, &sun9i_a80_mmc_lock);
283} 391}
284CLK_OF_DECLARE(sun4i_a10_mmc_sample, "allwinner,sun4i-a10-mmc-sample-clk", sun4i_a10_mmc_sample_setup); 392CLK_OF_DECLARE(sun9i_a80_mmc, "allwinner,sun9i-a80-mmc-clk", sun9i_a80_mmc_setup);
diff --git a/drivers/clk/sunxi/clk-sun6i-ar100.c b/drivers/clk/sunxi/clk-sun6i-ar100.c
index 3d282fb8f85c..63cf149195ae 100644
--- a/drivers/clk/sunxi/clk-sun6i-ar100.c
+++ b/drivers/clk/sunxi/clk-sun6i-ar100.c
@@ -45,6 +45,8 @@ static unsigned long ar100_recalc_rate(struct clk_hw *hw,
45} 45}
46 46
47static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate, 47static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate,
48 unsigned long min_rate,
49 unsigned long max_rate,
48 unsigned long *best_parent_rate, 50 unsigned long *best_parent_rate,
49 struct clk_hw **best_parent_clk) 51 struct clk_hw **best_parent_clk)
50{ 52{
diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c
index ef49786eefd3..14cd026064bf 100644
--- a/drivers/clk/sunxi/clk-sun8i-mbus.c
+++ b/drivers/clk/sunxi/clk-sun8i-mbus.c
@@ -69,8 +69,17 @@ static DEFINE_SPINLOCK(sun8i_a23_mbus_lock);
69 69
70static void __init sun8i_a23_mbus_setup(struct device_node *node) 70static void __init sun8i_a23_mbus_setup(struct device_node *node)
71{ 71{
72 struct clk *mbus = sunxi_factors_register(node, &sun8i_a23_mbus_data, 72 struct clk *mbus;
73 &sun8i_a23_mbus_lock); 73 void __iomem *reg;
74
75 reg = of_iomap(node, 0);
76 if (!reg) {
77 pr_err("Could not get registers for a23-mbus-clk\n");
78 return;
79 }
80
81 mbus = sunxi_factors_register(node, &sun8i_a23_mbus_data,
82 &sun8i_a23_mbus_lock, reg);
74 83
75 /* The MBUS clocks needs to be always enabled */ 84 /* The MBUS clocks needs to be always enabled */
76 __clk_get(mbus); 85 __clk_get(mbus);
diff --git a/drivers/clk/sunxi/clk-sun9i-core.c b/drivers/clk/sunxi/clk-sun9i-core.c
index 3cb9036d91bb..d8da77d72861 100644
--- a/drivers/clk/sunxi/clk-sun9i-core.c
+++ b/drivers/clk/sunxi/clk-sun9i-core.c
@@ -24,50 +24,51 @@
24 24
25 25
26/** 26/**
27 * sun9i_a80_get_pll4_factors() - calculates n, p, m factors for PLL1 27 * sun9i_a80_get_pll4_factors() - calculates n, p, m factors for PLL4
28 * PLL4 rate is calculated as follows 28 * PLL4 rate is calculated as follows
29 * rate = (parent_rate * n >> p) / (m + 1); 29 * rate = (parent_rate * n >> p) / (m + 1);
30 * parent_rate is always 24Mhz 30 * parent_rate is always 24MHz
31 * 31 *
32 * p and m are named div1 and div2 in Allwinner's SDK 32 * p and m are named div1 and div2 in Allwinner's SDK
33 */ 33 */
34 34
35static void sun9i_a80_get_pll4_factors(u32 *freq, u32 parent_rate, 35static void sun9i_a80_get_pll4_factors(u32 *freq, u32 parent_rate,
36 u8 *n, u8 *k, u8 *m, u8 *p) 36 u8 *n_ret, u8 *k, u8 *m_ret, u8 *p_ret)
37{ 37{
38 int div; 38 int n;
39 int m = 1;
40 int p = 1;
39 41
40 /* Normalize value to a 6M multiple */ 42 /* Normalize value to a 6 MHz multiple (24 MHz / 4) */
41 div = DIV_ROUND_UP(*freq, 6000000); 43 n = DIV_ROUND_UP(*freq, 6000000);
42 44
43 /* divs above 256 cannot be odd */ 45 /* If n is too large switch to steps of 12 MHz */
44 if (div > 256) 46 if (n > 255) {
45 div = round_up(div, 2); 47 m = 0;
48 n = (n + 1) / 2;
49 }
50
51 /* If n is still too large switch to steps of 24 MHz */
52 if (n > 255) {
53 p = 0;
54 n = (n + 1) / 2;
55 }
46 56
47 /* divs above 512 must be a multiple of 4 */ 57 /* n must be between 12 and 255 */
48 if (div > 512) 58 if (n > 255)
49 div = round_up(div, 4); 59 n = 255;
60 else if (n < 12)
61 n = 12;
50 62
51 *freq = 6000000 * div; 63 *freq = ((24000000 * n) >> p) / (m + 1);
52 64
53 /* we were called to round the frequency, we can now return */ 65 /* we were called to round the frequency, we can now return */
54 if (n == NULL) 66 if (n_ret == NULL)
55 return; 67 return;
56 68
57 /* p will be 1 for divs under 512 */ 69 *n_ret = n;
58 if (div < 512) 70 *m_ret = m;
59 *p = 1; 71 *p_ret = p;
60 else
61 *p = 0;
62
63 /* m will be 1 if div is odd */
64 if (div & 1)
65 *m = 1;
66 else
67 *m = 0;
68
69 /* calculate a suitable n based on m and p */
70 *n = div / (*p + 1) / (*m + 1);
71} 72}
72 73
73static struct clk_factors_config sun9i_a80_pll4_config = { 74static struct clk_factors_config sun9i_a80_pll4_config = {
@@ -89,7 +90,17 @@ static DEFINE_SPINLOCK(sun9i_a80_pll4_lock);
89 90
90static void __init sun9i_a80_pll4_setup(struct device_node *node) 91static void __init sun9i_a80_pll4_setup(struct device_node *node)
91{ 92{
92 sunxi_factors_register(node, &sun9i_a80_pll4_data, &sun9i_a80_pll4_lock); 93 void __iomem *reg;
94
95 reg = of_io_request_and_map(node, 0, of_node_full_name(node));
96 if (!reg) {
97 pr_err("Could not get registers for a80-pll4-clk: %s\n",
98 node->name);
99 return;
100 }
101
102 sunxi_factors_register(node, &sun9i_a80_pll4_data,
103 &sun9i_a80_pll4_lock, reg);
93} 104}
94CLK_OF_DECLARE(sun9i_a80_pll4, "allwinner,sun9i-a80-pll4-clk", sun9i_a80_pll4_setup); 105CLK_OF_DECLARE(sun9i_a80_pll4, "allwinner,sun9i-a80-pll4-clk", sun9i_a80_pll4_setup);
95 106
@@ -139,8 +150,18 @@ static DEFINE_SPINLOCK(sun9i_a80_gt_lock);
139 150
140static void __init sun9i_a80_gt_setup(struct device_node *node) 151static void __init sun9i_a80_gt_setup(struct device_node *node)
141{ 152{
142 struct clk *gt = sunxi_factors_register(node, &sun9i_a80_gt_data, 153 void __iomem *reg;
143 &sun9i_a80_gt_lock); 154 struct clk *gt;
155
156 reg = of_io_request_and_map(node, 0, of_node_full_name(node));
157 if (!reg) {
158 pr_err("Could not get registers for a80-gt-clk: %s\n",
159 node->name);
160 return;
161 }
162
163 gt = sunxi_factors_register(node, &sun9i_a80_gt_data,
164 &sun9i_a80_gt_lock, reg);
144 165
145 /* The GT bus clock needs to be always enabled */ 166 /* The GT bus clock needs to be always enabled */
146 __clk_get(gt); 167 __clk_get(gt);
@@ -194,7 +215,17 @@ static DEFINE_SPINLOCK(sun9i_a80_ahb_lock);
194 215
195static void __init sun9i_a80_ahb_setup(struct device_node *node) 216static void __init sun9i_a80_ahb_setup(struct device_node *node)
196{ 217{
197 sunxi_factors_register(node, &sun9i_a80_ahb_data, &sun9i_a80_ahb_lock); 218 void __iomem *reg;
219
220 reg = of_io_request_and_map(node, 0, of_node_full_name(node));
221 if (!reg) {
222 pr_err("Could not get registers for a80-ahb-clk: %s\n",
223 node->name);
224 return;
225 }
226
227 sunxi_factors_register(node, &sun9i_a80_ahb_data,
228 &sun9i_a80_ahb_lock, reg);
198} 229}
199CLK_OF_DECLARE(sun9i_a80_ahb, "allwinner,sun9i-a80-ahb-clk", sun9i_a80_ahb_setup); 230CLK_OF_DECLARE(sun9i_a80_ahb, "allwinner,sun9i-a80-ahb-clk", sun9i_a80_ahb_setup);
200 231
@@ -210,7 +241,17 @@ static DEFINE_SPINLOCK(sun9i_a80_apb0_lock);
210 241
211static void __init sun9i_a80_apb0_setup(struct device_node *node) 242static void __init sun9i_a80_apb0_setup(struct device_node *node)
212{ 243{
213 sunxi_factors_register(node, &sun9i_a80_apb0_data, &sun9i_a80_apb0_lock); 244 void __iomem *reg;
245
246 reg = of_io_request_and_map(node, 0, of_node_full_name(node));
247 if (!reg) {
248 pr_err("Could not get registers for a80-apb0-clk: %s\n",
249 node->name);
250 return;
251 }
252
253 sunxi_factors_register(node, &sun9i_a80_apb0_data,
254 &sun9i_a80_apb0_lock, reg);
214} 255}
215CLK_OF_DECLARE(sun9i_a80_apb0, "allwinner,sun9i-a80-apb0-clk", sun9i_a80_apb0_setup); 256CLK_OF_DECLARE(sun9i_a80_apb0, "allwinner,sun9i-a80-apb0-clk", sun9i_a80_apb0_setup);
216 257
@@ -266,6 +307,16 @@ static DEFINE_SPINLOCK(sun9i_a80_apb1_lock);
266 307
267static void __init sun9i_a80_apb1_setup(struct device_node *node) 308static void __init sun9i_a80_apb1_setup(struct device_node *node)
268{ 309{
269 sunxi_factors_register(node, &sun9i_a80_apb1_data, &sun9i_a80_apb1_lock); 310 void __iomem *reg;
311
312 reg = of_io_request_and_map(node, 0, of_node_full_name(node));
313 if (!reg) {
314 pr_err("Could not get registers for a80-apb1-clk: %s\n",
315 node->name);
316 return;
317 }
318
319 sunxi_factors_register(node, &sun9i_a80_apb1_data,
320 &sun9i_a80_apb1_lock, reg);
270} 321}
271CLK_OF_DECLARE(sun9i_a80_apb1, "allwinner,sun9i-a80-apb1-clk", sun9i_a80_apb1_setup); 322CLK_OF_DECLARE(sun9i_a80_apb1, "allwinner,sun9i-a80-apb1-clk", sun9i_a80_apb1_setup);
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
new file mode 100644
index 000000000000..710c273648d7
--- /dev/null
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -0,0 +1,219 @@
1/*
2 * Copyright 2015 Chen-Yu Tsai
3 *
4 * Chen-Yu Tsai <wens@csie.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/clk-provider.h>
18#include <linux/clkdev.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/of_device.h>
22#include <linux/reset.h>
23#include <linux/platform_device.h>
24#include <linux/reset-controller.h>
25#include <linux/spinlock.h>
26
27#define SUN9I_MMC_WIDTH 4
28
29#define SUN9I_MMC_GATE_BIT 16
30#define SUN9I_MMC_RESET_BIT 18
31
32struct sun9i_mmc_clk_data {
33 spinlock_t lock;
34 void __iomem *membase;
35 struct clk *clk;
36 struct reset_control *reset;
37 struct clk_onecell_data clk_data;
38 struct reset_controller_dev rcdev;
39};
40
41static int sun9i_mmc_reset_assert(struct reset_controller_dev *rcdev,
42 unsigned long id)
43{
44 struct sun9i_mmc_clk_data *data = container_of(rcdev,
45 struct sun9i_mmc_clk_data,
46 rcdev);
47 unsigned long flags;
48 void __iomem *reg = data->membase + SUN9I_MMC_WIDTH * id;
49 u32 val;
50
51 clk_prepare_enable(data->clk);
52 spin_lock_irqsave(&data->lock, flags);
53
54 val = readl(reg);
55 writel(val & ~BIT(SUN9I_MMC_RESET_BIT), reg);
56
57 spin_unlock_irqrestore(&data->lock, flags);
58 clk_disable_unprepare(data->clk);
59
60 return 0;
61}
62
63static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev,
64 unsigned long id)
65{
66 struct sun9i_mmc_clk_data *data = container_of(rcdev,
67 struct sun9i_mmc_clk_data,
68 rcdev);
69 unsigned long flags;
70 void __iomem *reg = data->membase + SUN9I_MMC_WIDTH * id;
71 u32 val;
72
73 clk_prepare_enable(data->clk);
74 spin_lock_irqsave(&data->lock, flags);
75
76 val = readl(reg);
77 writel(val | BIT(SUN9I_MMC_RESET_BIT), reg);
78
79 spin_unlock_irqrestore(&data->lock, flags);
80 clk_disable_unprepare(data->clk);
81
82 return 0;
83}
84
85static struct reset_control_ops sun9i_mmc_reset_ops = {
86 .assert = sun9i_mmc_reset_assert,
87 .deassert = sun9i_mmc_reset_deassert,
88};
89
90static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
91{
92 struct device_node *np = pdev->dev.of_node;
93 struct sun9i_mmc_clk_data *data;
94 struct clk_onecell_data *clk_data;
95 const char *clk_name = np->name;
96 const char *clk_parent;
97 struct resource *r;
98 int count, i, ret;
99
100 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
101 if (!data)
102 return -ENOMEM;
103
104 spin_lock_init(&data->lock);
105
106 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
107 /* one clock/reset pair per word */
108 count = DIV_ROUND_UP((r->end - r->start + 1), SUN9I_MMC_WIDTH);
109 data->membase = devm_ioremap_resource(&pdev->dev, r);
110 if (IS_ERR(data->membase))
111 return PTR_ERR(data->membase);
112
113 clk_data = &data->clk_data;
114 clk_data->clk_num = count;
115 clk_data->clks = devm_kcalloc(&pdev->dev, count, sizeof(struct clk *),
116 GFP_KERNEL);
117 if (!clk_data->clks)
118 return -ENOMEM;
119
120 data->clk = devm_clk_get(&pdev->dev, NULL);
121 if (IS_ERR(data->clk)) {
122 dev_err(&pdev->dev, "Could not get clock\n");
123 return PTR_ERR(data->clk);
124 }
125
126 data->reset = devm_reset_control_get(&pdev->dev, NULL);
127 if (IS_ERR(data->reset)) {
128 dev_err(&pdev->dev, "Could not get reset control\n");
129 return PTR_ERR(data->reset);
130 }
131
132 ret = reset_control_deassert(data->reset);
133 if (ret) {
134 dev_err(&pdev->dev, "Reset deassert err %d\n", ret);
135 return ret;
136 }
137
138 clk_parent = __clk_get_name(data->clk);
139 for (i = 0; i < count; i++) {
140 of_property_read_string_index(np, "clock-output-names",
141 i, &clk_name);
142
143 clk_data->clks[i] = clk_register_gate(&pdev->dev, clk_name,
144 clk_parent, 0,
145 data->membase + SUN9I_MMC_WIDTH * i,
146 SUN9I_MMC_GATE_BIT, 0,
147 &data->lock);
148
149 if (IS_ERR(clk_data->clks[i])) {
150 ret = PTR_ERR(clk_data->clks[i]);
151 goto err_clk_register;
152 }
153 }
154
155 ret = of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
156 if (ret)
157 goto err_clk_provider;
158
159 data->rcdev.owner = THIS_MODULE;
160 data->rcdev.nr_resets = count;
161 data->rcdev.ops = &sun9i_mmc_reset_ops;
162 data->rcdev.of_node = pdev->dev.of_node;
163
164 ret = reset_controller_register(&data->rcdev);
165 if (ret)
166 goto err_rc_reg;
167
168 platform_set_drvdata(pdev, data);
169
170 return 0;
171
172err_rc_reg:
173 of_clk_del_provider(np);
174
175err_clk_provider:
176 for (i = 0; i < count; i++)
177 clk_unregister(clk_data->clks[i]);
178
179err_clk_register:
180 reset_control_assert(data->reset);
181
182 return ret;
183}
184
185static int sun9i_a80_mmc_config_clk_remove(struct platform_device *pdev)
186{
187 struct device_node *np = pdev->dev.of_node;
188 struct sun9i_mmc_clk_data *data = platform_get_drvdata(pdev);
189 struct clk_onecell_data *clk_data = &data->clk_data;
190 int i;
191
192 reset_controller_unregister(&data->rcdev);
193 of_clk_del_provider(np);
194 for (i = 0; i < clk_data->clk_num; i++)
195 clk_unregister(clk_data->clks[i]);
196
197 reset_control_assert(data->reset);
198
199 return 0;
200}
201
202static const struct of_device_id sun9i_a80_mmc_config_clk_dt_ids[] = {
203 { .compatible = "allwinner,sun9i-a80-mmc-config-clk" },
204 { /* sentinel */ }
205};
206
207static struct platform_driver sun9i_a80_mmc_config_clk_driver = {
208 .driver = {
209 .name = "sun9i-a80-mmc-config-clk",
210 .of_match_table = sun9i_a80_mmc_config_clk_dt_ids,
211 },
212 .probe = sun9i_a80_mmc_config_clk_probe,
213 .remove = sun9i_a80_mmc_config_clk_remove,
214};
215module_platform_driver(sun9i_a80_mmc_config_clk_driver);
216
217MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
218MODULE_DESCRIPTION("Allwinner A80 MMC clock/reset Driver");
219MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 1818f404538d..379324eb5486 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -20,11 +20,221 @@
20#include <linux/of_address.h> 20#include <linux/of_address.h>
21#include <linux/reset-controller.h> 21#include <linux/reset-controller.h>
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23#include <linux/log2.h>
23 24
24#include "clk-factors.h" 25#include "clk-factors.h"
25 26
26static DEFINE_SPINLOCK(clk_lock); 27static DEFINE_SPINLOCK(clk_lock);
27 28
29/**
30 * sun6i_a31_ahb1_clk_setup() - Setup function for a31 ahb1 composite clk
31 */
32
33#define SUN6I_AHB1_MAX_PARENTS 4
34#define SUN6I_AHB1_MUX_PARENT_PLL6 3
35#define SUN6I_AHB1_MUX_SHIFT 12
36/* un-shifted mask is what mux_clk expects */
37#define SUN6I_AHB1_MUX_MASK 0x3
38#define SUN6I_AHB1_MUX_GET_PARENT(reg) ((reg >> SUN6I_AHB1_MUX_SHIFT) & \
39 SUN6I_AHB1_MUX_MASK)
40
41#define SUN6I_AHB1_DIV_SHIFT 4
42#define SUN6I_AHB1_DIV_MASK (0x3 << SUN6I_AHB1_DIV_SHIFT)
43#define SUN6I_AHB1_DIV_GET(reg) ((reg & SUN6I_AHB1_DIV_MASK) >> \
44 SUN6I_AHB1_DIV_SHIFT)
45#define SUN6I_AHB1_DIV_SET(reg, div) ((reg & ~SUN6I_AHB1_DIV_MASK) | \
46 (div << SUN6I_AHB1_DIV_SHIFT))
47#define SUN6I_AHB1_PLL6_DIV_SHIFT 6
48#define SUN6I_AHB1_PLL6_DIV_MASK (0x3 << SUN6I_AHB1_PLL6_DIV_SHIFT)
49#define SUN6I_AHB1_PLL6_DIV_GET(reg) ((reg & SUN6I_AHB1_PLL6_DIV_MASK) >> \
50 SUN6I_AHB1_PLL6_DIV_SHIFT)
51#define SUN6I_AHB1_PLL6_DIV_SET(reg, div) ((reg & ~SUN6I_AHB1_PLL6_DIV_MASK) | \
52 (div << SUN6I_AHB1_PLL6_DIV_SHIFT))
53
54struct sun6i_ahb1_clk {
55 struct clk_hw hw;
56 void __iomem *reg;
57};
58
59#define to_sun6i_ahb1_clk(_hw) container_of(_hw, struct sun6i_ahb1_clk, hw)
60
61static unsigned long sun6i_ahb1_clk_recalc_rate(struct clk_hw *hw,
62 unsigned long parent_rate)
63{
64 struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw);
65 unsigned long rate;
66 u32 reg;
67
68 /* Fetch the register value */
69 reg = readl(ahb1->reg);
70
71 /* apply pre-divider first if parent is pll6 */
72 if (SUN6I_AHB1_MUX_GET_PARENT(reg) == SUN6I_AHB1_MUX_PARENT_PLL6)
73 parent_rate /= SUN6I_AHB1_PLL6_DIV_GET(reg) + 1;
74
75 /* clk divider */
76 rate = parent_rate >> SUN6I_AHB1_DIV_GET(reg);
77
78 return rate;
79}
80
81static long sun6i_ahb1_clk_round(unsigned long rate, u8 *divp, u8 *pre_divp,
82 u8 parent, unsigned long parent_rate)
83{
84 u8 div, calcp, calcm = 1;
85
86 /*
87 * clock can only divide, so we will never be able to achieve
88 * frequencies higher than the parent frequency
89 */
90 if (parent_rate && rate > parent_rate)
91 rate = parent_rate;
92
93 div = DIV_ROUND_UP(parent_rate, rate);
94
95 /* calculate pre-divider if parent is pll6 */
96 if (parent == SUN6I_AHB1_MUX_PARENT_PLL6) {
97 if (div < 4)
98 calcp = 0;
99 else if (div / 2 < 4)
100 calcp = 1;
101 else if (div / 4 < 4)
102 calcp = 2;
103 else
104 calcp = 3;
105
106 calcm = DIV_ROUND_UP(div, 1 << calcp);
107 } else {
108 calcp = __roundup_pow_of_two(div);
109 calcp = calcp > 3 ? 3 : calcp;
110 }
111
112 /* we were asked to pass back divider values */
113 if (divp) {
114 *divp = calcp;
115 *pre_divp = calcm - 1;
116 }
117
118 return (parent_rate / calcm) >> calcp;
119}
120
121static long sun6i_ahb1_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
122 unsigned long min_rate,
123 unsigned long max_rate,
124 unsigned long *best_parent_rate,
125 struct clk_hw **best_parent_clk)
126{
127 struct clk *clk = hw->clk, *parent, *best_parent = NULL;
128 int i, num_parents;
129 unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
130
131 /* find the parent that can help provide the fastest rate <= rate */
132 num_parents = __clk_get_num_parents(clk);
133 for (i = 0; i < num_parents; i++) {
134 parent = clk_get_parent_by_index(clk, i);
135 if (!parent)
136 continue;
137 if (__clk_get_flags(clk) & CLK_SET_RATE_PARENT)
138 parent_rate = __clk_round_rate(parent, rate);
139 else
140 parent_rate = __clk_get_rate(parent);
141
142 child_rate = sun6i_ahb1_clk_round(rate, NULL, NULL, i,
143 parent_rate);
144
145 if (child_rate <= rate && child_rate > best_child_rate) {
146 best_parent = parent;
147 best = parent_rate;
148 best_child_rate = child_rate;
149 }
150 }
151
152 if (best_parent)
153 *best_parent_clk = __clk_get_hw(best_parent);
154 *best_parent_rate = best;
155
156 return best_child_rate;
157}
158
159static int sun6i_ahb1_clk_set_rate(struct clk_hw *hw, unsigned long rate,
160 unsigned long parent_rate)
161{
162 struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw);
163 unsigned long flags;
164 u8 div, pre_div, parent;
165 u32 reg;
166
167 spin_lock_irqsave(&clk_lock, flags);
168
169 reg = readl(ahb1->reg);
170
171 /* need to know which parent is used to apply pre-divider */
172 parent = SUN6I_AHB1_MUX_GET_PARENT(reg);
173 sun6i_ahb1_clk_round(rate, &div, &pre_div, parent, parent_rate);
174
175 reg = SUN6I_AHB1_DIV_SET(reg, div);
176 reg = SUN6I_AHB1_PLL6_DIV_SET(reg, pre_div);
177 writel(reg, ahb1->reg);
178
179 spin_unlock_irqrestore(&clk_lock, flags);
180
181 return 0;
182}
183
184static const struct clk_ops sun6i_ahb1_clk_ops = {
185 .determine_rate = sun6i_ahb1_clk_determine_rate,
186 .recalc_rate = sun6i_ahb1_clk_recalc_rate,
187 .set_rate = sun6i_ahb1_clk_set_rate,
188};
189
190static void __init sun6i_ahb1_clk_setup(struct device_node *node)
191{
192 struct clk *clk;
193 struct sun6i_ahb1_clk *ahb1;
194 struct clk_mux *mux;
195 const char *clk_name = node->name;
196 const char *parents[SUN6I_AHB1_MAX_PARENTS];
197 void __iomem *reg;
198 int i = 0;
199
200 reg = of_io_request_and_map(node, 0, of_node_full_name(node));
201
202 /* we have a mux, we will have >1 parents */
203 while (i < SUN6I_AHB1_MAX_PARENTS &&
204 (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
205 i++;
206
207 of_property_read_string(node, "clock-output-names", &clk_name);
208
209 ahb1 = kzalloc(sizeof(struct sun6i_ahb1_clk), GFP_KERNEL);
210 if (!ahb1)
211 return;
212
213 mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
214 if (!mux) {
215 kfree(ahb1);
216 return;
217 }
218
219 /* set up clock properties */
220 mux->reg = reg;
221 mux->shift = SUN6I_AHB1_MUX_SHIFT;
222 mux->mask = SUN6I_AHB1_MUX_MASK;
223 mux->lock = &clk_lock;
224 ahb1->reg = reg;
225
226 clk = clk_register_composite(NULL, clk_name, parents, i,
227 &mux->hw, &clk_mux_ops,
228 &ahb1->hw, &sun6i_ahb1_clk_ops,
229 NULL, NULL, 0);
230
231 if (!IS_ERR(clk)) {
232 of_clk_add_provider(node, of_clk_src_simple_get, clk);
233 clk_register_clkdev(clk, clk_name, NULL);
234 }
235}
236CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-clk", sun6i_ahb1_clk_setup);
237
28/* Maximum number of parents our clocks have */ 238/* Maximum number of parents our clocks have */
29#define SUNXI_MAX_PARENTS 5 239#define SUNXI_MAX_PARENTS 5
30 240
@@ -355,43 +565,6 @@ static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate,
355} 565}
356 566
357/** 567/**
358 * clk_sunxi_mmc_phase_control() - configures MMC clock phase control
359 */
360
361void clk_sunxi_mmc_phase_control(struct clk *clk, u8 sample, u8 output)
362{
363 #define to_clk_composite(_hw) container_of(_hw, struct clk_composite, hw)
364 #define to_clk_factors(_hw) container_of(_hw, struct clk_factors, hw)
365
366 struct clk_hw *hw = __clk_get_hw(clk);
367 struct clk_composite *composite = to_clk_composite(hw);
368 struct clk_hw *rate_hw = composite->rate_hw;
369 struct clk_factors *factors = to_clk_factors(rate_hw);
370 unsigned long flags = 0;
371 u32 reg;
372
373 if (factors->lock)
374 spin_lock_irqsave(factors->lock, flags);
375
376 reg = readl(factors->reg);
377
378 /* set sample clock phase control */
379 reg &= ~(0x7 << 20);
380 reg |= ((sample & 0x7) << 20);
381
382 /* set output clock phase control */
383 reg &= ~(0x7 << 8);
384 reg |= ((output & 0x7) << 8);
385
386 writel(reg, factors->reg);
387
388 if (factors->lock)
389 spin_unlock_irqrestore(factors->lock, flags);
390}
391EXPORT_SYMBOL(clk_sunxi_mmc_phase_control);
392
393
394/**
395 * sunxi_factors_clk_setup() - Setup function for factor clocks 568 * sunxi_factors_clk_setup() - Setup function for factor clocks
396 */ 569 */
397 570
@@ -413,6 +586,7 @@ static struct clk_factors_config sun6i_a31_pll1_config = {
413 .kwidth = 2, 586 .kwidth = 2,
414 .mshift = 0, 587 .mshift = 0,
415 .mwidth = 2, 588 .mwidth = 2,
589 .n_start = 1,
416}; 590};
417 591
418static struct clk_factors_config sun8i_a23_pll1_config = { 592static struct clk_factors_config sun8i_a23_pll1_config = {
@@ -520,7 +694,16 @@ static const struct factors_data sun7i_a20_out_data __initconst = {
520static struct clk * __init sunxi_factors_clk_setup(struct device_node *node, 694static struct clk * __init sunxi_factors_clk_setup(struct device_node *node,
521 const struct factors_data *data) 695 const struct factors_data *data)
522{ 696{
523 return sunxi_factors_register(node, data, &clk_lock); 697 void __iomem *reg;
698
699 reg = of_iomap(node, 0);
700 if (!reg) {
701 pr_err("Could not get registers for factors-clk: %s\n",
702 node->name);
703 return NULL;
704 }
705
706 return sunxi_factors_register(node, data, &clk_lock, reg);
524} 707}
525 708
526 709
@@ -561,7 +744,7 @@ static void __init sunxi_mux_clk_setup(struct device_node *node,
561 of_property_read_string(node, "clock-output-names", &clk_name); 744 of_property_read_string(node, "clock-output-names", &clk_name);
562 745
563 clk = clk_register_mux(NULL, clk_name, parents, i, 746 clk = clk_register_mux(NULL, clk_name, parents, i,
564 CLK_SET_RATE_NO_REPARENT, reg, 747 CLK_SET_RATE_PARENT, reg,
565 data->shift, SUNXI_MUX_GATE_WIDTH, 748 data->shift, SUNXI_MUX_GATE_WIDTH,
566 0, &clk_lock); 749 0, &clk_lock);
567 750
@@ -1217,7 +1400,6 @@ CLK_OF_DECLARE(sun7i_a20_clk_init, "allwinner,sun7i-a20", sun5i_init_clocks);
1217 1400
1218static const char *sun6i_critical_clocks[] __initdata = { 1401static const char *sun6i_critical_clocks[] __initdata = {
1219 "cpu", 1402 "cpu",
1220 "ahb1_sdram",
1221}; 1403};
1222 1404
1223static void __init sun6i_init_clocks(struct device_node *node) 1405static void __init sun6i_init_clocks(struct device_node *node)
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index f7dfb72884a4..edb8358fa6ce 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o
15obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o 15obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o
16obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o 16obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o
17obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124.o 17obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124.o
18obj-$(CONFIG_ARCH_TEGRA_132_SOC) += clk-tegra124.o
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index 0011d547a9f7..60738cc954cb 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -64,10 +64,8 @@ enum clk_id {
64 tegra_clk_disp2, 64 tegra_clk_disp2,
65 tegra_clk_dp2, 65 tegra_clk_dp2,
66 tegra_clk_dpaux, 66 tegra_clk_dpaux,
67 tegra_clk_dsia,
68 tegra_clk_dsialp, 67 tegra_clk_dsialp,
69 tegra_clk_dsia_mux, 68 tegra_clk_dsia_mux,
70 tegra_clk_dsib,
71 tegra_clk_dsiblp, 69 tegra_clk_dsiblp,
72 tegra_clk_dsib_mux, 70 tegra_clk_dsib_mux,
73 tegra_clk_dtv, 71 tegra_clk_dtv,
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
index 9e899c18af86..d84ae49d0e05 100644
--- a/drivers/clk/tegra/clk-periph.c
+++ b/drivers/clk/tegra/clk-periph.c
@@ -28,7 +28,7 @@ static u8 clk_periph_get_parent(struct clk_hw *hw)
28 const struct clk_ops *mux_ops = periph->mux_ops; 28 const struct clk_ops *mux_ops = periph->mux_ops;
29 struct clk_hw *mux_hw = &periph->mux.hw; 29 struct clk_hw *mux_hw = &periph->mux.hw;
30 30
31 mux_hw->clk = hw->clk; 31 __clk_hw_set_clk(mux_hw, hw);
32 32
33 return mux_ops->get_parent(mux_hw); 33 return mux_ops->get_parent(mux_hw);
34} 34}
@@ -39,7 +39,7 @@ static int clk_periph_set_parent(struct clk_hw *hw, u8 index)
39 const struct clk_ops *mux_ops = periph->mux_ops; 39 const struct clk_ops *mux_ops = periph->mux_ops;
40 struct clk_hw *mux_hw = &periph->mux.hw; 40 struct clk_hw *mux_hw = &periph->mux.hw;
41 41
42 mux_hw->clk = hw->clk; 42 __clk_hw_set_clk(mux_hw, hw);
43 43
44 return mux_ops->set_parent(mux_hw, index); 44 return mux_ops->set_parent(mux_hw, index);
45} 45}
@@ -51,7 +51,7 @@ static unsigned long clk_periph_recalc_rate(struct clk_hw *hw,
51 const struct clk_ops *div_ops = periph->div_ops; 51 const struct clk_ops *div_ops = periph->div_ops;
52 struct clk_hw *div_hw = &periph->divider.hw; 52 struct clk_hw *div_hw = &periph->divider.hw;
53 53
54 div_hw->clk = hw->clk; 54 __clk_hw_set_clk(div_hw, hw);
55 55
56 return div_ops->recalc_rate(div_hw, parent_rate); 56 return div_ops->recalc_rate(div_hw, parent_rate);
57} 57}
@@ -63,7 +63,7 @@ static long clk_periph_round_rate(struct clk_hw *hw, unsigned long rate,
63 const struct clk_ops *div_ops = periph->div_ops; 63 const struct clk_ops *div_ops = periph->div_ops;
64 struct clk_hw *div_hw = &periph->divider.hw; 64 struct clk_hw *div_hw = &periph->divider.hw;
65 65
66 div_hw->clk = hw->clk; 66 __clk_hw_set_clk(div_hw, hw);
67 67
68 return div_ops->round_rate(div_hw, rate, prate); 68 return div_ops->round_rate(div_hw, rate, prate);
69} 69}
@@ -75,7 +75,7 @@ static int clk_periph_set_rate(struct clk_hw *hw, unsigned long rate,
75 const struct clk_ops *div_ops = periph->div_ops; 75 const struct clk_ops *div_ops = periph->div_ops;
76 struct clk_hw *div_hw = &periph->divider.hw; 76 struct clk_hw *div_hw = &periph->divider.hw;
77 77
78 div_hw->clk = hw->clk; 78 __clk_hw_set_clk(div_hw, hw);
79 79
80 return div_ops->set_rate(div_hw, rate, parent_rate); 80 return div_ops->set_rate(div_hw, rate, parent_rate);
81} 81}
@@ -86,7 +86,7 @@ static int clk_periph_is_enabled(struct clk_hw *hw)
86 const struct clk_ops *gate_ops = periph->gate_ops; 86 const struct clk_ops *gate_ops = periph->gate_ops;
87 struct clk_hw *gate_hw = &periph->gate.hw; 87 struct clk_hw *gate_hw = &periph->gate.hw;
88 88
89 gate_hw->clk = hw->clk; 89 __clk_hw_set_clk(gate_hw, hw);
90 90
91 return gate_ops->is_enabled(gate_hw); 91 return gate_ops->is_enabled(gate_hw);
92} 92}
@@ -97,7 +97,7 @@ static int clk_periph_enable(struct clk_hw *hw)
97 const struct clk_ops *gate_ops = periph->gate_ops; 97 const struct clk_ops *gate_ops = periph->gate_ops;
98 struct clk_hw *gate_hw = &periph->gate.hw; 98 struct clk_hw *gate_hw = &periph->gate.hw;
99 99
100 gate_hw->clk = hw->clk; 100 __clk_hw_set_clk(gate_hw, hw);
101 101
102 return gate_ops->enable(gate_hw); 102 return gate_ops->enable(gate_hw);
103} 103}
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index c7c6d8fb32fb..bfef9abdf232 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -816,7 +816,9 @@ const struct clk_ops tegra_clk_plle_ops = {
816 .enable = clk_plle_enable, 816 .enable = clk_plle_enable,
817}; 817};
818 818
819#if defined(CONFIG_ARCH_TEGRA_114_SOC) || defined(CONFIG_ARCH_TEGRA_124_SOC) 819#if defined(CONFIG_ARCH_TEGRA_114_SOC) || \
820 defined(CONFIG_ARCH_TEGRA_124_SOC) || \
821 defined(CONFIG_ARCH_TEGRA_132_SOC)
820 822
821static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params, 823static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params,
822 unsigned long parent_rate) 824 unsigned long parent_rate)
@@ -1505,7 +1507,9 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
1505 return clk; 1507 return clk;
1506} 1508}
1507 1509
1508#if defined(CONFIG_ARCH_TEGRA_114_SOC) || defined(CONFIG_ARCH_TEGRA_124_SOC) 1510#if defined(CONFIG_ARCH_TEGRA_114_SOC) || \
1511 defined(CONFIG_ARCH_TEGRA_124_SOC) || \
1512 defined(CONFIG_ARCH_TEGRA_132_SOC)
1509static const struct clk_ops tegra_clk_pllxc_ops = { 1513static const struct clk_ops tegra_clk_pllxc_ops = {
1510 .is_enabled = clk_pll_is_enabled, 1514 .is_enabled = clk_pll_is_enabled,
1511 .enable = clk_pll_iddq_enable, 1515 .enable = clk_pll_iddq_enable,
@@ -1565,7 +1569,7 @@ struct clk *tegra_clk_register_pllxc(const char *name, const char *parent_name,
1565 parent = __clk_lookup(parent_name); 1569 parent = __clk_lookup(parent_name);
1566 if (!parent) { 1570 if (!parent) {
1567 WARN(1, "parent clk %s of %s must be registered first\n", 1571 WARN(1, "parent clk %s of %s must be registered first\n",
1568 name, parent_name); 1572 parent_name, name);
1569 return ERR_PTR(-EINVAL); 1573 return ERR_PTR(-EINVAL);
1570 } 1574 }
1571 1575
@@ -1665,7 +1669,7 @@ struct clk *tegra_clk_register_pllm(const char *name, const char *parent_name,
1665 parent = __clk_lookup(parent_name); 1669 parent = __clk_lookup(parent_name);
1666 if (!parent) { 1670 if (!parent) {
1667 WARN(1, "parent clk %s of %s must be registered first\n", 1671 WARN(1, "parent clk %s of %s must be registered first\n",
1668 name, parent_name); 1672 parent_name, name);
1669 return ERR_PTR(-EINVAL); 1673 return ERR_PTR(-EINVAL);
1670 } 1674 }
1671 1675
@@ -1706,7 +1710,7 @@ struct clk *tegra_clk_register_pllc(const char *name, const char *parent_name,
1706 parent = __clk_lookup(parent_name); 1710 parent = __clk_lookup(parent_name);
1707 if (!parent) { 1711 if (!parent) {
1708 WARN(1, "parent clk %s of %s must be registered first\n", 1712 WARN(1, "parent clk %s of %s must be registered first\n",
1709 name, parent_name); 1713 parent_name, name);
1710 return ERR_PTR(-EINVAL); 1714 return ERR_PTR(-EINVAL);
1711 } 1715 }
1712 1716
@@ -1802,7 +1806,7 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name,
1802} 1806}
1803#endif 1807#endif
1804 1808
1805#ifdef CONFIG_ARCH_TEGRA_124_SOC 1809#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
1806static const struct clk_ops tegra_clk_pllss_ops = { 1810static const struct clk_ops tegra_clk_pllss_ops = {
1807 .is_enabled = clk_pll_is_enabled, 1811 .is_enabled = clk_pll_is_enabled,
1808 .enable = clk_pll_iddq_enable, 1812 .enable = clk_pll_iddq_enable,
@@ -1830,7 +1834,7 @@ struct clk *tegra_clk_register_pllss(const char *name, const char *parent_name,
1830 parent = __clk_lookup(parent_name); 1834 parent = __clk_lookup(parent_name);
1831 if (!parent) { 1835 if (!parent) {
1832 WARN(1, "parent clk %s of %s must be registered first\n", 1836 WARN(1, "parent clk %s of %s must be registered first\n",
1833 name, parent_name); 1837 parent_name, name);
1834 return ERR_PTR(-EINVAL); 1838 return ERR_PTR(-EINVAL);
1835 } 1839 }
1836 1840
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 37f32c49674e..cef0727b9eec 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -434,10 +434,10 @@ static struct tegra_periph_init_data periph_clks[] = {
434 MUX("hda", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA, 125, TEGRA_PERIPH_ON_APB, tegra_clk_hda), 434 MUX("hda", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA, 125, TEGRA_PERIPH_ON_APB, tegra_clk_hda),
435 MUX("hda2codec_2x", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA2CODEC_2X, 111, TEGRA_PERIPH_ON_APB, tegra_clk_hda2codec_2x), 435 MUX("hda2codec_2x", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA2CODEC_2X, 111, TEGRA_PERIPH_ON_APB, tegra_clk_hda2codec_2x),
436 MUX("vfir", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_VFIR, 7, TEGRA_PERIPH_ON_APB, tegra_clk_vfir), 436 MUX("vfir", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_VFIR, 7, TEGRA_PERIPH_ON_APB, tegra_clk_vfir),
437 MUX("sdmmc1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1), 437 MUX("sdmmc1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC1, 14, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc1),
438 MUX("sdmmc2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2), 438 MUX("sdmmc2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC2, 9, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc2),
439 MUX("sdmmc3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3), 439 MUX("sdmmc3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc3),
440 MUX("sdmmc4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4), 440 MUX("sdmmc4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc4),
441 MUX("la", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_LA, 76, TEGRA_PERIPH_ON_APB, tegra_clk_la), 441 MUX("la", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_LA, 76, TEGRA_PERIPH_ON_APB, tegra_clk_la),
442 MUX("trace", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_TRACE, 77, TEGRA_PERIPH_ON_APB, tegra_clk_trace), 442 MUX("trace", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_TRACE, 77, TEGRA_PERIPH_ON_APB, tegra_clk_trace),
443 MUX("owr", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_OWR, 71, TEGRA_PERIPH_ON_APB, tegra_clk_owr), 443 MUX("owr", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_OWR, 71, TEGRA_PERIPH_ON_APB, tegra_clk_owr),
@@ -470,10 +470,10 @@ static struct tegra_periph_init_data periph_clks[] = {
470 MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1), 470 MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1),
471 MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1), 471 MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1),
472 MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 165, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2), 472 MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 165, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2),
473 MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1_8), 473 MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc1_8),
474 MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2_8), 474 MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc2_8),
475 MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3_8), 475 MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc3_8),
476 MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4_8), 476 MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc4_8),
477 MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8), 477 MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8),
478 MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8), 478 MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8),
479 MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8), 479 MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8),
@@ -537,8 +537,6 @@ static struct tegra_periph_init_data gate_clks[] = {
537 GATE("xusb_host", "xusb_host_src", 89, 0, tegra_clk_xusb_host, 0), 537 GATE("xusb_host", "xusb_host_src", 89, 0, tegra_clk_xusb_host, 0),
538 GATE("xusb_ss", "xusb_ss_src", 156, 0, tegra_clk_xusb_ss, 0), 538 GATE("xusb_ss", "xusb_ss_src", 156, 0, tegra_clk_xusb_ss, 0),
539 GATE("xusb_dev", "xusb_dev_src", 95, 0, tegra_clk_xusb_dev, 0), 539 GATE("xusb_dev", "xusb_dev_src", 95, 0, tegra_clk_xusb_dev, 0),
540 GATE("dsia", "dsia_mux", 48, 0, tegra_clk_dsia, 0),
541 GATE("dsib", "dsib_mux", 82, 0, tegra_clk_dsib, 0),
542 GATE("emc", "emc_mux", 57, 0, tegra_clk_emc, CLK_IGNORE_UNUSED), 540 GATE("emc", "emc_mux", 57, 0, tegra_clk_emc, CLK_IGNORE_UNUSED),
543 GATE("sata_cold", "clk_m", 129, TEGRA_PERIPH_ON_APB, tegra_clk_sata_cold, 0), 541 GATE("sata_cold", "clk_m", 129, TEGRA_PERIPH_ON_APB, tegra_clk_sata_cold, 0),
544 GATE("ispb", "clk_m", 3, 0, tegra_clk_ispb, 0), 542 GATE("ispb", "clk_m", 3, 0, tegra_clk_ispb, 0),
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 0b03d2cf7264..d0766423a5d6 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -715,7 +715,6 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
715 [tegra_clk_sbc2_8] = { .dt_id = TEGRA114_CLK_SBC2, .present = true }, 715 [tegra_clk_sbc2_8] = { .dt_id = TEGRA114_CLK_SBC2, .present = true },
716 [tegra_clk_sbc3_8] = { .dt_id = TEGRA114_CLK_SBC3, .present = true }, 716 [tegra_clk_sbc3_8] = { .dt_id = TEGRA114_CLK_SBC3, .present = true },
717 [tegra_clk_i2c5] = { .dt_id = TEGRA114_CLK_I2C5, .present = true }, 717 [tegra_clk_i2c5] = { .dt_id = TEGRA114_CLK_I2C5, .present = true },
718 [tegra_clk_dsia] = { .dt_id = TEGRA114_CLK_DSIA, .present = true },
719 [tegra_clk_mipi] = { .dt_id = TEGRA114_CLK_MIPI, .present = true }, 718 [tegra_clk_mipi] = { .dt_id = TEGRA114_CLK_MIPI, .present = true },
720 [tegra_clk_hdmi] = { .dt_id = TEGRA114_CLK_HDMI, .present = true }, 719 [tegra_clk_hdmi] = { .dt_id = TEGRA114_CLK_HDMI, .present = true },
721 [tegra_clk_csi] = { .dt_id = TEGRA114_CLK_CSI, .present = true }, 720 [tegra_clk_csi] = { .dt_id = TEGRA114_CLK_CSI, .present = true },
@@ -739,7 +738,6 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
739 [tegra_clk_dtv] = { .dt_id = TEGRA114_CLK_DTV, .present = true }, 738 [tegra_clk_dtv] = { .dt_id = TEGRA114_CLK_DTV, .present = true },
740 [tegra_clk_ndspeed] = { .dt_id = TEGRA114_CLK_NDSPEED, .present = true }, 739 [tegra_clk_ndspeed] = { .dt_id = TEGRA114_CLK_NDSPEED, .present = true },
741 [tegra_clk_i2cslow] = { .dt_id = TEGRA114_CLK_I2CSLOW, .present = true }, 740 [tegra_clk_i2cslow] = { .dt_id = TEGRA114_CLK_I2CSLOW, .present = true },
742 [tegra_clk_dsib] = { .dt_id = TEGRA114_CLK_DSIB, .present = true },
743 [tegra_clk_tsec] = { .dt_id = TEGRA114_CLK_TSEC, .present = true }, 741 [tegra_clk_tsec] = { .dt_id = TEGRA114_CLK_TSEC, .present = true },
744 [tegra_clk_xusb_host] = { .dt_id = TEGRA114_CLK_XUSB_HOST, .present = true }, 742 [tegra_clk_xusb_host] = { .dt_id = TEGRA114_CLK_XUSB_HOST, .present = true },
745 [tegra_clk_msenc] = { .dt_id = TEGRA114_CLK_MSENC, .present = true }, 743 [tegra_clk_msenc] = { .dt_id = TEGRA114_CLK_MSENC, .present = true },
@@ -1224,6 +1222,14 @@ static __init void tegra114_periph_clk_init(void __iomem *clk_base,
1224 clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock); 1222 clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock);
1225 clks[TEGRA114_CLK_DSIB_MUX] = clk; 1223 clks[TEGRA114_CLK_DSIB_MUX] = clk;
1226 1224
1225 clk = tegra_clk_register_periph_gate("dsia", "dsia_mux", 0, clk_base,
1226 0, 48, periph_clk_enb_refcnt);
1227 clks[TEGRA114_CLK_DSIA] = clk;
1228
1229 clk = tegra_clk_register_periph_gate("dsib", "dsib_mux", 0, clk_base,
1230 0, 82, periph_clk_enb_refcnt);
1231 clks[TEGRA114_CLK_DSIB] = clk;
1232
1227 /* emc mux */ 1233 /* emc mux */
1228 clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm, 1234 clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
1229 ARRAY_SIZE(mux_pllmcp_clkm), 1235 ARRAY_SIZE(mux_pllmcp_clkm),
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index f5f9baca7bb6..9a893f2fe8e9 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2012-2014 NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 5 * under the terms and conditions of the GNU General Public License,
@@ -28,6 +28,14 @@
28#include "clk.h" 28#include "clk.h"
29#include "clk-id.h" 29#include "clk-id.h"
30 30
31/*
32 * TEGRA124_CAR_BANK_COUNT: the number of peripheral clock register
33 * banks present in the Tegra124/132 CAR IP block. The banks are
34 * identified by single letters, e.g.: L, H, U, V, W, X. See
35 * periph_regs[] in drivers/clk/tegra/clk.c
36 */
37#define TEGRA124_CAR_BANK_COUNT 6
38
31#define CLK_SOURCE_CSITE 0x1d4 39#define CLK_SOURCE_CSITE 0x1d4
32#define CLK_SOURCE_EMC 0x19c 40#define CLK_SOURCE_EMC 0x19c
33 41
@@ -128,7 +136,6 @@ static unsigned long osc_freq;
128static unsigned long pll_ref_freq; 136static unsigned long pll_ref_freq;
129 137
130static DEFINE_SPINLOCK(pll_d_lock); 138static DEFINE_SPINLOCK(pll_d_lock);
131static DEFINE_SPINLOCK(pll_d2_lock);
132static DEFINE_SPINLOCK(pll_e_lock); 139static DEFINE_SPINLOCK(pll_e_lock);
133static DEFINE_SPINLOCK(pll_re_lock); 140static DEFINE_SPINLOCK(pll_re_lock);
134static DEFINE_SPINLOCK(pll_u_lock); 141static DEFINE_SPINLOCK(pll_u_lock);
@@ -145,11 +152,6 @@ static unsigned long tegra124_input_freq[] = {
145 [12] = 260000000, 152 [12] = 260000000,
146}; 153};
147 154
148static const char *mux_plld_out0_plld2_out0[] = {
149 "pll_d_out0", "pll_d2_out0",
150};
151#define mux_plld_out0_plld2_out0_idx NULL
152
153static const char *mux_pllmcp_clkm[] = { 155static const char *mux_pllmcp_clkm[] = {
154 "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3", 156 "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3",
155}; 157};
@@ -783,7 +785,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
783 [tegra_clk_sbc2] = { .dt_id = TEGRA124_CLK_SBC2, .present = true }, 785 [tegra_clk_sbc2] = { .dt_id = TEGRA124_CLK_SBC2, .present = true },
784 [tegra_clk_sbc3] = { .dt_id = TEGRA124_CLK_SBC3, .present = true }, 786 [tegra_clk_sbc3] = { .dt_id = TEGRA124_CLK_SBC3, .present = true },
785 [tegra_clk_i2c5] = { .dt_id = TEGRA124_CLK_I2C5, .present = true }, 787 [tegra_clk_i2c5] = { .dt_id = TEGRA124_CLK_I2C5, .present = true },
786 [tegra_clk_dsia] = { .dt_id = TEGRA124_CLK_DSIA, .present = true },
787 [tegra_clk_mipi] = { .dt_id = TEGRA124_CLK_MIPI, .present = true }, 788 [tegra_clk_mipi] = { .dt_id = TEGRA124_CLK_MIPI, .present = true },
788 [tegra_clk_hdmi] = { .dt_id = TEGRA124_CLK_HDMI, .present = true }, 789 [tegra_clk_hdmi] = { .dt_id = TEGRA124_CLK_HDMI, .present = true },
789 [tegra_clk_csi] = { .dt_id = TEGRA124_CLK_CSI, .present = true }, 790 [tegra_clk_csi] = { .dt_id = TEGRA124_CLK_CSI, .present = true },
@@ -809,7 +810,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
809 [tegra_clk_soc_therm] = { .dt_id = TEGRA124_CLK_SOC_THERM, .present = true }, 810 [tegra_clk_soc_therm] = { .dt_id = TEGRA124_CLK_SOC_THERM, .present = true },
810 [tegra_clk_dtv] = { .dt_id = TEGRA124_CLK_DTV, .present = true }, 811 [tegra_clk_dtv] = { .dt_id = TEGRA124_CLK_DTV, .present = true },
811 [tegra_clk_i2cslow] = { .dt_id = TEGRA124_CLK_I2CSLOW, .present = true }, 812 [tegra_clk_i2cslow] = { .dt_id = TEGRA124_CLK_I2CSLOW, .present = true },
812 [tegra_clk_dsib] = { .dt_id = TEGRA124_CLK_DSIB, .present = true },
813 [tegra_clk_tsec] = { .dt_id = TEGRA124_CLK_TSEC, .present = true }, 813 [tegra_clk_tsec] = { .dt_id = TEGRA124_CLK_TSEC, .present = true },
814 [tegra_clk_xusb_host] = { .dt_id = TEGRA124_CLK_XUSB_HOST, .present = true }, 814 [tegra_clk_xusb_host] = { .dt_id = TEGRA124_CLK_XUSB_HOST, .present = true },
815 [tegra_clk_msenc] = { .dt_id = TEGRA124_CLK_MSENC, .present = true }, 815 [tegra_clk_msenc] = { .dt_id = TEGRA124_CLK_MSENC, .present = true },
@@ -949,8 +949,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
949 [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_1_MUX, .present = true }, 949 [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_1_MUX, .present = true },
950 [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_2_MUX, .present = true }, 950 [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_2_MUX, .present = true },
951 [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true }, 951 [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true },
952 [tegra_clk_dsia_mux] = { .dt_id = TEGRA124_CLK_DSIA_MUX, .present = true },
953 [tegra_clk_dsib_mux] = { .dt_id = TEGRA124_CLK_DSIB_MUX, .present = true },
954}; 952};
955 953
956static struct tegra_devclk devclks[] __initdata = { 954static struct tegra_devclk devclks[] __initdata = {
@@ -1112,17 +1110,17 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base,
1112 1, 2); 1110 1, 2);
1113 clks[TEGRA124_CLK_XUSB_SS_DIV2] = clk; 1111 clks[TEGRA124_CLK_XUSB_SS_DIV2] = clk;
1114 1112
1115 /* dsia mux */ 1113 clk = clk_register_gate(NULL, "plld_dsi", "plld_out0", 0,
1116 clk = clk_register_mux(NULL, "dsia_mux", mux_plld_out0_plld2_out0, 1114 clk_base + PLLD_MISC, 30, 0, &pll_d_lock);
1117 ARRAY_SIZE(mux_plld_out0_plld2_out0), 0, 1115 clks[TEGRA124_CLK_PLLD_DSI] = clk;
1118 clk_base + PLLD_BASE, 25, 1, 0, &pll_d_lock); 1116
1119 clks[TEGRA124_CLK_DSIA_MUX] = clk; 1117 clk = tegra_clk_register_periph_gate("dsia", "plld_dsi", 0, clk_base,
1118 0, 48, periph_clk_enb_refcnt);
1119 clks[TEGRA124_CLK_DSIA] = clk;
1120 1120
1121 /* dsib mux */ 1121 clk = tegra_clk_register_periph_gate("dsib", "plld_dsi", 0, clk_base,
1122 clk = clk_register_mux(NULL, "dsib_mux", mux_plld_out0_plld2_out0, 1122 0, 82, periph_clk_enb_refcnt);
1123 ARRAY_SIZE(mux_plld_out0_plld2_out0), 0, 1123 clks[TEGRA124_CLK_DSIB] = clk;
1124 clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock);
1125 clks[TEGRA124_CLK_DSIB_MUX] = clk;
1126 1124
1127 /* emc mux */ 1125 /* emc mux */
1128 clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm, 1126 clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
@@ -1351,7 +1349,7 @@ static const struct of_device_id pmc_match[] __initconst = {
1351 {}, 1349 {},
1352}; 1350};
1353 1351
1354static struct tegra_clk_init_table init_table[] __initdata = { 1352static struct tegra_clk_init_table common_init_table[] __initdata = {
1355 {TEGRA124_CLK_UARTA, TEGRA124_CLK_PLL_P, 408000000, 0}, 1353 {TEGRA124_CLK_UARTA, TEGRA124_CLK_PLL_P, 408000000, 0},
1356 {TEGRA124_CLK_UARTB, TEGRA124_CLK_PLL_P, 408000000, 0}, 1354 {TEGRA124_CLK_UARTB, TEGRA124_CLK_PLL_P, 408000000, 0},
1357 {TEGRA124_CLK_UARTC, TEGRA124_CLK_PLL_P, 408000000, 0}, 1355 {TEGRA124_CLK_UARTC, TEGRA124_CLK_PLL_P, 408000000, 0},
@@ -1368,6 +1366,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
1368 {TEGRA124_CLK_I2S4, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0}, 1366 {TEGRA124_CLK_I2S4, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0},
1369 {TEGRA124_CLK_VDE, TEGRA124_CLK_PLL_P, 0, 0}, 1367 {TEGRA124_CLK_VDE, TEGRA124_CLK_PLL_P, 0, 0},
1370 {TEGRA124_CLK_HOST1X, TEGRA124_CLK_PLL_P, 136000000, 1}, 1368 {TEGRA124_CLK_HOST1X, TEGRA124_CLK_PLL_P, 136000000, 1},
1369 {TEGRA124_CLK_DSIALP, TEGRA124_CLK_PLL_P, 68000000, 0},
1370 {TEGRA124_CLK_DSIBLP, TEGRA124_CLK_PLL_P, 68000000, 0},
1371 {TEGRA124_CLK_SCLK, TEGRA124_CLK_PLL_P_OUT2, 102000000, 1}, 1371 {TEGRA124_CLK_SCLK, TEGRA124_CLK_PLL_P_OUT2, 102000000, 1},
1372 {TEGRA124_CLK_DFLL_SOC, TEGRA124_CLK_PLL_P, 51000000, 1}, 1372 {TEGRA124_CLK_DFLL_SOC, TEGRA124_CLK_PLL_P, 51000000, 1},
1373 {TEGRA124_CLK_DFLL_REF, TEGRA124_CLK_PLL_P, 51000000, 1}, 1373 {TEGRA124_CLK_DFLL_REF, TEGRA124_CLK_PLL_P, 51000000, 1},
@@ -1385,27 +1385,73 @@ static struct tegra_clk_init_table init_table[] __initdata = {
1385 {TEGRA124_CLK_SATA, TEGRA124_CLK_PLL_P, 104000000, 0}, 1385 {TEGRA124_CLK_SATA, TEGRA124_CLK_PLL_P, 104000000, 0},
1386 {TEGRA124_CLK_SATA_OOB, TEGRA124_CLK_PLL_P, 204000000, 0}, 1386 {TEGRA124_CLK_SATA_OOB, TEGRA124_CLK_PLL_P, 204000000, 0},
1387 {TEGRA124_CLK_EMC, TEGRA124_CLK_CLK_MAX, 0, 1}, 1387 {TEGRA124_CLK_EMC, TEGRA124_CLK_CLK_MAX, 0, 1},
1388 {TEGRA124_CLK_CCLK_G, TEGRA124_CLK_CLK_MAX, 0, 1},
1389 {TEGRA124_CLK_MSELECT, TEGRA124_CLK_CLK_MAX, 0, 1}, 1388 {TEGRA124_CLK_MSELECT, TEGRA124_CLK_CLK_MAX, 0, 1},
1390 {TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1}, 1389 {TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1},
1391 {TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0}, 1390 {TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0},
1391 /* This MUST be the last entry. */
1392 {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
1393};
1394
1395static struct tegra_clk_init_table tegra124_init_table[] __initdata = {
1392 {TEGRA124_CLK_SOC_THERM, TEGRA124_CLK_PLL_P, 51000000, 0}, 1396 {TEGRA124_CLK_SOC_THERM, TEGRA124_CLK_PLL_P, 51000000, 0},
1397 {TEGRA124_CLK_CCLK_G, TEGRA124_CLK_CLK_MAX, 0, 1},
1398 /* This MUST be the last entry. */
1399 {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
1400};
1401
1402/* Tegra132 requires the SOC_THERM clock to remain active */
1403static struct tegra_clk_init_table tegra132_init_table[] __initdata = {
1404 {TEGRA124_CLK_SOC_THERM, TEGRA124_CLK_PLL_P, 51000000, 1},
1393 /* This MUST be the last entry. */ 1405 /* This MUST be the last entry. */
1394 {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0}, 1406 {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
1395}; 1407};
1396 1408
1409/**
1410 * tegra124_clock_apply_init_table - initialize clocks on Tegra124 SoCs
1411 *
1412 * Program an initial clock rate and enable or disable clocks needed
1413 * by the rest of the kernel, for Tegra124 SoCs. It is intended to be
1414 * called by assigning a pointer to it to tegra_clk_apply_init_table -
1415 * this will be called as an arch_initcall. No return value.
1416 */
1397static void __init tegra124_clock_apply_init_table(void) 1417static void __init tegra124_clock_apply_init_table(void)
1398{ 1418{
1399 tegra_init_from_table(init_table, clks, TEGRA124_CLK_CLK_MAX); 1419 tegra_init_from_table(common_init_table, clks, TEGRA124_CLK_CLK_MAX);
1420 tegra_init_from_table(tegra124_init_table, clks, TEGRA124_CLK_CLK_MAX);
1400} 1421}
1401 1422
1402static void __init tegra124_clock_init(struct device_node *np) 1423/**
1424 * tegra132_clock_apply_init_table - initialize clocks on Tegra132 SoCs
1425 *
1426 * Program an initial clock rate and enable or disable clocks needed
1427 * by the rest of the kernel, for Tegra132 SoCs. It is intended to be
1428 * called by assigning a pointer to it to tegra_clk_apply_init_table -
1429 * this will be called as an arch_initcall. No return value.
1430 */
1431static void __init tegra132_clock_apply_init_table(void)
1432{
1433 tegra_init_from_table(common_init_table, clks, TEGRA124_CLK_CLK_MAX);
1434 tegra_init_from_table(tegra132_init_table, clks, TEGRA124_CLK_CLK_MAX);
1435}
1436
1437/**
1438 * tegra124_132_clock_init_pre - clock initialization preamble for T124/T132
1439 * @np: struct device_node * of the DT node for the SoC CAR IP block
1440 *
1441 * Register most of the clocks controlled by the CAR IP block, along
1442 * with a few clocks controlled by the PMC IP block. Everything in
1443 * this function should be common to Tegra124 and Tegra132. XXX The
1444 * PMC clock initialization should probably be moved to PMC-specific
1445 * driver code. No return value.
1446 */
1447static void __init tegra124_132_clock_init_pre(struct device_node *np)
1403{ 1448{
1404 struct device_node *node; 1449 struct device_node *node;
1450 u32 plld_base;
1405 1451
1406 clk_base = of_iomap(np, 0); 1452 clk_base = of_iomap(np, 0);
1407 if (!clk_base) { 1453 if (!clk_base) {
1408 pr_err("ioremap tegra124 CAR failed\n"); 1454 pr_err("ioremap tegra124/tegra132 CAR failed\n");
1409 return; 1455 return;
1410 } 1456 }
1411 1457
@@ -1423,7 +1469,8 @@ static void __init tegra124_clock_init(struct device_node *np)
1423 return; 1469 return;
1424 } 1470 }
1425 1471
1426 clks = tegra_clk_init(clk_base, TEGRA124_CLK_CLK_MAX, 6); 1472 clks = tegra_clk_init(clk_base, TEGRA124_CLK_CLK_MAX,
1473 TEGRA124_CAR_BANK_COUNT);
1427 if (!clks) 1474 if (!clks)
1428 return; 1475 return;
1429 1476
@@ -1437,13 +1484,76 @@ static void __init tegra124_clock_init(struct device_node *np)
1437 tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks, &pll_a_params); 1484 tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks, &pll_a_params);
1438 tegra_pmc_clk_init(pmc_base, tegra124_clks); 1485 tegra_pmc_clk_init(pmc_base, tegra124_clks);
1439 1486
1487 /* For Tegra124 & Tegra132, PLLD is the only source for DSIA & DSIB */
1488 plld_base = clk_readl(clk_base + PLLD_BASE);
1489 plld_base &= ~BIT(25);
1490 clk_writel(plld_base, clk_base + PLLD_BASE);
1491}
1492
1493/**
1494 * tegra124_132_clock_init_post - clock initialization postamble for T124/T132
1495 * @np: struct device_node * of the DT node for the SoC CAR IP block
1496 *
1497 * Register most of the along with a few clocks controlled by the PMC
1498 * IP block. Everything in this function should be common to Tegra124
1499 * and Tegra132. This function must be called after
1500 * tegra124_132_clock_init_pre(), otherwise clk_base and pmc_base will
1501 * not be set. No return value.
1502 */
1503static void __init tegra124_132_clock_init_post(struct device_node *np)
1504{
1440 tegra_super_clk_gen4_init(clk_base, pmc_base, tegra124_clks, 1505 tegra_super_clk_gen4_init(clk_base, pmc_base, tegra124_clks,
1441 &pll_x_params); 1506 &pll_x_params);
1442 tegra_add_of_provider(np); 1507 tegra_add_of_provider(np);
1443 tegra_register_devclks(devclks, ARRAY_SIZE(devclks)); 1508 tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
1444 1509
1510 tegra_cpu_car_ops = &tegra124_cpu_car_ops;
1511}
1512
1513/**
1514 * tegra124_clock_init - Tegra124-specific clock initialization
1515 * @np: struct device_node * of the DT node for the SoC CAR IP block
1516 *
1517 * Register most SoC clocks for the Tegra124 system-on-chip. Most of
1518 * this code is shared between the Tegra124 and Tegra132 SoCs,
1519 * although some of the initial clock settings and CPU clocks differ.
1520 * Intended to be called by the OF init code when a DT node with the
1521 * "nvidia,tegra124-car" string is encountered, and declared with
1522 * CLK_OF_DECLARE. No return value.
1523 */
1524static void __init tegra124_clock_init(struct device_node *np)
1525{
1526 tegra124_132_clock_init_pre(np);
1445 tegra_clk_apply_init_table = tegra124_clock_apply_init_table; 1527 tegra_clk_apply_init_table = tegra124_clock_apply_init_table;
1528 tegra124_132_clock_init_post(np);
1529}
1446 1530
1447 tegra_cpu_car_ops = &tegra124_cpu_car_ops; 1531/**
1532 * tegra132_clock_init - Tegra132-specific clock initialization
1533 * @np: struct device_node * of the DT node for the SoC CAR IP block
1534 *
1535 * Register most SoC clocks for the Tegra132 system-on-chip. Most of
1536 * this code is shared between the Tegra124 and Tegra132 SoCs,
1537 * although some of the initial clock settings and CPU clocks differ.
1538 * Intended to be called by the OF init code when a DT node with the
1539 * "nvidia,tegra132-car" string is encountered, and declared with
1540 * CLK_OF_DECLARE. No return value.
1541 */
1542static void __init tegra132_clock_init(struct device_node *np)
1543{
1544 tegra124_132_clock_init_pre(np);
1545
1546 /*
1547 * On Tegra132, these clocks are controlled by the
1548 * CLUSTER_clocks IP block, located in the CPU complex
1549 */
1550 tegra124_clks[tegra_clk_cclk_g].present = false;
1551 tegra124_clks[tegra_clk_cclk_lp].present = false;
1552 tegra124_clks[tegra_clk_pll_x].present = false;
1553 tegra124_clks[tegra_clk_pll_x_out0].present = false;
1554
1555 tegra_clk_apply_init_table = tegra132_clock_apply_init_table;
1556 tegra124_132_clock_init_post(np);
1448} 1557}
1449CLK_OF_DECLARE(tegra124, "nvidia,tegra124-car", tegra124_clock_init); 1558CLK_OF_DECLARE(tegra124, "nvidia,tegra124-car", tegra124_clock_init);
1559CLK_OF_DECLARE(tegra132, "nvidia,tegra132-car", tegra132_clock_init);
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index 97dc8595c3cd..9ddb7547cb43 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -302,10 +302,13 @@ struct clk ** __init tegra_lookup_dt_id(int clk_id,
302 302
303tegra_clk_apply_init_table_func tegra_clk_apply_init_table; 303tegra_clk_apply_init_table_func tegra_clk_apply_init_table;
304 304
305void __init tegra_clocks_apply_init_table(void) 305static int __init tegra_clocks_apply_init_table(void)
306{ 306{
307 if (!tegra_clk_apply_init_table) 307 if (!tegra_clk_apply_init_table)
308 return; 308 return 0;
309 309
310 tegra_clk_apply_init_table(); 310 tegra_clk_apply_init_table();
311
312 return 0;
311} 313}
314arch_initcall(tegra_clocks_apply_init_table);
diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile
index ed4d0aaf8916..105ffd0f5e79 100644
--- a/drivers/clk/ti/Makefile
+++ b/drivers/clk/ti/Makefile
@@ -1,13 +1,17 @@
1ifneq ($(CONFIG_OF),)
2obj-y += clk.o autoidle.o clockdomain.o 1obj-y += clk.o autoidle.o clockdomain.o
3clk-common = dpll.o composite.o divider.o gate.o \ 2clk-common = dpll.o composite.o divider.o gate.o \
4 fixed-factor.o mux.o apll.o 3 fixed-factor.o mux.o apll.o
5obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o 4obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o
5obj-$(CONFIG_SOC_TI81XX) += $(clk-common) fapll.o clk-816x.o
6obj-$(CONFIG_ARCH_OMAP2) += $(clk-common) interface.o clk-2xxx.o 6obj-$(CONFIG_ARCH_OMAP2) += $(clk-common) interface.o clk-2xxx.o
7obj-$(CONFIG_ARCH_OMAP3) += $(clk-common) interface.o clk-3xxx.o 7obj-$(CONFIG_ARCH_OMAP3) += $(clk-common) interface.o \
8 clk-3xxx.o
8obj-$(CONFIG_ARCH_OMAP4) += $(clk-common) clk-44xx.o 9obj-$(CONFIG_ARCH_OMAP4) += $(clk-common) clk-44xx.o
9obj-$(CONFIG_SOC_OMAP5) += $(clk-common) clk-54xx.o 10obj-$(CONFIG_SOC_OMAP5) += $(clk-common) clk-54xx.o
10obj-$(CONFIG_SOC_DRA7XX) += $(clk-common) clk-7xx.o \ 11obj-$(CONFIG_SOC_DRA7XX) += $(clk-common) clk-7xx.o \
11 clk-dra7-atl.o 12 clk-dra7-atl.o
12obj-$(CONFIG_SOC_AM43XX) += $(clk-common) clk-43xx.o 13obj-$(CONFIG_SOC_AM43XX) += $(clk-common) clk-43xx.o
14
15ifdef CONFIG_ATAGS
16obj-$(CONFIG_ARCH_OMAP3) += clk-3xxx-legacy.o
13endif 17endif
diff --git a/drivers/clk/ti/clk-3xxx-legacy.c b/drivers/clk/ti/clk-3xxx-legacy.c
new file mode 100644
index 000000000000..e0732a4c8f26
--- /dev/null
+++ b/drivers/clk/ti/clk-3xxx-legacy.c
@@ -0,0 +1,4653 @@
1/*
2 * OMAP3 Legacy clock data
3 *
4 * Copyright (C) 2014 Texas Instruments, Inc
5 * Tero Kristo (t-kristo@ti.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation version 2.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/kernel.h>
18#include <linux/clk-provider.h>
19#include <linux/clk/ti.h>
20
21#include "clock.h"
22
23static struct ti_clk_fixed virt_12m_ck_data = {
24 .frequency = 12000000,
25};
26
27static struct ti_clk virt_12m_ck = {
28 .name = "virt_12m_ck",
29 .type = TI_CLK_FIXED,
30 .data = &virt_12m_ck_data,
31};
32
33static struct ti_clk_fixed virt_13m_ck_data = {
34 .frequency = 13000000,
35};
36
37static struct ti_clk virt_13m_ck = {
38 .name = "virt_13m_ck",
39 .type = TI_CLK_FIXED,
40 .data = &virt_13m_ck_data,
41};
42
43static struct ti_clk_fixed virt_19200000_ck_data = {
44 .frequency = 19200000,
45};
46
47static struct ti_clk virt_19200000_ck = {
48 .name = "virt_19200000_ck",
49 .type = TI_CLK_FIXED,
50 .data = &virt_19200000_ck_data,
51};
52
53static struct ti_clk_fixed virt_26000000_ck_data = {
54 .frequency = 26000000,
55};
56
57static struct ti_clk virt_26000000_ck = {
58 .name = "virt_26000000_ck",
59 .type = TI_CLK_FIXED,
60 .data = &virt_26000000_ck_data,
61};
62
63static struct ti_clk_fixed virt_38_4m_ck_data = {
64 .frequency = 38400000,
65};
66
67static struct ti_clk virt_38_4m_ck = {
68 .name = "virt_38_4m_ck",
69 .type = TI_CLK_FIXED,
70 .data = &virt_38_4m_ck_data,
71};
72
73static struct ti_clk_fixed virt_16_8m_ck_data = {
74 .frequency = 16800000,
75};
76
77static struct ti_clk virt_16_8m_ck = {
78 .name = "virt_16_8m_ck",
79 .type = TI_CLK_FIXED,
80 .data = &virt_16_8m_ck_data,
81};
82
83static const char *osc_sys_ck_parents[] = {
84 "virt_12m_ck",
85 "virt_13m_ck",
86 "virt_19200000_ck",
87 "virt_26000000_ck",
88 "virt_38_4m_ck",
89 "virt_16_8m_ck",
90};
91
92static struct ti_clk_mux osc_sys_ck_data = {
93 .num_parents = ARRAY_SIZE(osc_sys_ck_parents),
94 .reg = 0xd40,
95 .module = TI_CLKM_PRM,
96 .parents = osc_sys_ck_parents,
97};
98
99static struct ti_clk osc_sys_ck = {
100 .name = "osc_sys_ck",
101 .type = TI_CLK_MUX,
102 .data = &osc_sys_ck_data,
103};
104
105static struct ti_clk_divider sys_ck_data = {
106 .parent = "osc_sys_ck",
107 .bit_shift = 6,
108 .max_div = 3,
109 .reg = 0x1270,
110 .module = TI_CLKM_PRM,
111 .flags = CLKF_INDEX_STARTS_AT_ONE,
112};
113
114static struct ti_clk sys_ck = {
115 .name = "sys_ck",
116 .type = TI_CLK_DIVIDER,
117 .data = &sys_ck_data,
118};
119
120static const char *dpll3_ck_parents[] = {
121 "sys_ck",
122 "sys_ck",
123};
124
125static struct ti_clk_dpll dpll3_ck_data = {
126 .num_parents = ARRAY_SIZE(dpll3_ck_parents),
127 .control_reg = 0xd00,
128 .idlest_reg = 0xd20,
129 .mult_div1_reg = 0xd40,
130 .autoidle_reg = 0xd30,
131 .module = TI_CLKM_CM,
132 .parents = dpll3_ck_parents,
133 .flags = CLKF_CORE,
134 .freqsel_mask = 0xf0,
135 .div1_mask = 0x7f00,
136 .idlest_mask = 0x1,
137 .auto_recal_bit = 0x3,
138 .max_divider = 0x80,
139 .min_divider = 0x1,
140 .recal_en_bit = 0x5,
141 .max_multiplier = 0x7ff,
142 .enable_mask = 0x7,
143 .mult_mask = 0x7ff0000,
144 .recal_st_bit = 0x5,
145 .autoidle_mask = 0x7,
146};
147
148static struct ti_clk dpll3_ck = {
149 .name = "dpll3_ck",
150 .clkdm_name = "dpll3_clkdm",
151 .type = TI_CLK_DPLL,
152 .data = &dpll3_ck_data,
153};
154
155static struct ti_clk_divider dpll3_m2_ck_data = {
156 .parent = "dpll3_ck",
157 .bit_shift = 27,
158 .max_div = 31,
159 .reg = 0xd40,
160 .module = TI_CLKM_CM,
161 .flags = CLKF_INDEX_STARTS_AT_ONE,
162};
163
164static struct ti_clk dpll3_m2_ck = {
165 .name = "dpll3_m2_ck",
166 .type = TI_CLK_DIVIDER,
167 .data = &dpll3_m2_ck_data,
168};
169
170static struct ti_clk_fixed_factor core_ck_data = {
171 .parent = "dpll3_m2_ck",
172 .div = 1,
173 .mult = 1,
174};
175
176static struct ti_clk core_ck = {
177 .name = "core_ck",
178 .type = TI_CLK_FIXED_FACTOR,
179 .data = &core_ck_data,
180};
181
182static struct ti_clk_divider l3_ick_data = {
183 .parent = "core_ck",
184 .max_div = 3,
185 .reg = 0xa40,
186 .module = TI_CLKM_CM,
187 .flags = CLKF_INDEX_STARTS_AT_ONE,
188};
189
190static struct ti_clk l3_ick = {
191 .name = "l3_ick",
192 .type = TI_CLK_DIVIDER,
193 .data = &l3_ick_data,
194};
195
196static struct ti_clk_fixed_factor security_l3_ick_data = {
197 .parent = "l3_ick",
198 .div = 1,
199 .mult = 1,
200};
201
202static struct ti_clk security_l3_ick = {
203 .name = "security_l3_ick",
204 .type = TI_CLK_FIXED_FACTOR,
205 .data = &security_l3_ick_data,
206};
207
208static struct ti_clk_fixed_factor wkup_l4_ick_data = {
209 .parent = "sys_ck",
210 .div = 1,
211 .mult = 1,
212};
213
214static struct ti_clk wkup_l4_ick = {
215 .name = "wkup_l4_ick",
216 .type = TI_CLK_FIXED_FACTOR,
217 .data = &wkup_l4_ick_data,
218};
219
220static struct ti_clk_gate usim_ick_data = {
221 .parent = "wkup_l4_ick",
222 .bit_shift = 9,
223 .reg = 0xc10,
224 .module = TI_CLKM_CM,
225 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
226};
227
228static struct ti_clk usim_ick = {
229 .name = "usim_ick",
230 .clkdm_name = "wkup_clkdm",
231 .type = TI_CLK_GATE,
232 .data = &usim_ick_data,
233};
234
235static struct ti_clk_gate dss2_alwon_fck_data = {
236 .parent = "sys_ck",
237 .bit_shift = 1,
238 .reg = 0xe00,
239 .module = TI_CLKM_CM,
240};
241
242static struct ti_clk dss2_alwon_fck = {
243 .name = "dss2_alwon_fck",
244 .clkdm_name = "dss_clkdm",
245 .type = TI_CLK_GATE,
246 .data = &dss2_alwon_fck_data,
247};
248
249static struct ti_clk_divider l4_ick_data = {
250 .parent = "l3_ick",
251 .bit_shift = 2,
252 .max_div = 3,
253 .reg = 0xa40,
254 .module = TI_CLKM_CM,
255 .flags = CLKF_INDEX_STARTS_AT_ONE,
256};
257
258static struct ti_clk l4_ick = {
259 .name = "l4_ick",
260 .type = TI_CLK_DIVIDER,
261 .data = &l4_ick_data,
262};
263
264static struct ti_clk_fixed_factor core_l4_ick_data = {
265 .parent = "l4_ick",
266 .div = 1,
267 .mult = 1,
268};
269
270static struct ti_clk core_l4_ick = {
271 .name = "core_l4_ick",
272 .type = TI_CLK_FIXED_FACTOR,
273 .data = &core_l4_ick_data,
274};
275
276static struct ti_clk_gate mmchs2_ick_data = {
277 .parent = "core_l4_ick",
278 .bit_shift = 25,
279 .reg = 0xa10,
280 .module = TI_CLKM_CM,
281 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
282};
283
284static struct ti_clk mmchs2_ick = {
285 .name = "mmchs2_ick",
286 .clkdm_name = "core_l4_clkdm",
287 .type = TI_CLK_GATE,
288 .data = &mmchs2_ick_data,
289};
290
291static const char *dpll4_ck_parents[] = {
292 "sys_ck",
293 "sys_ck",
294};
295
296static struct ti_clk_dpll dpll4_ck_data = {
297 .num_parents = ARRAY_SIZE(dpll4_ck_parents),
298 .control_reg = 0xd00,
299 .idlest_reg = 0xd20,
300 .mult_div1_reg = 0xd44,
301 .autoidle_reg = 0xd30,
302 .module = TI_CLKM_CM,
303 .parents = dpll4_ck_parents,
304 .flags = CLKF_PER,
305 .freqsel_mask = 0xf00000,
306 .modes = 0x82,
307 .div1_mask = 0x7f,
308 .idlest_mask = 0x2,
309 .auto_recal_bit = 0x13,
310 .max_divider = 0x80,
311 .min_divider = 0x1,
312 .recal_en_bit = 0x6,
313 .max_multiplier = 0x7ff,
314 .enable_mask = 0x70000,
315 .mult_mask = 0x7ff00,
316 .recal_st_bit = 0x6,
317 .autoidle_mask = 0x38,
318};
319
320static struct ti_clk dpll4_ck = {
321 .name = "dpll4_ck",
322 .clkdm_name = "dpll4_clkdm",
323 .type = TI_CLK_DPLL,
324 .data = &dpll4_ck_data,
325};
326
327static struct ti_clk_divider dpll4_m2_ck_data = {
328 .parent = "dpll4_ck",
329 .max_div = 63,
330 .reg = 0xd48,
331 .module = TI_CLKM_CM,
332 .flags = CLKF_INDEX_STARTS_AT_ONE,
333};
334
335static struct ti_clk dpll4_m2_ck = {
336 .name = "dpll4_m2_ck",
337 .type = TI_CLK_DIVIDER,
338 .data = &dpll4_m2_ck_data,
339};
340
341static struct ti_clk_fixed_factor dpll4_m2x2_mul_ck_data = {
342 .parent = "dpll4_m2_ck",
343 .div = 1,
344 .mult = 2,
345};
346
347static struct ti_clk dpll4_m2x2_mul_ck = {
348 .name = "dpll4_m2x2_mul_ck",
349 .type = TI_CLK_FIXED_FACTOR,
350 .data = &dpll4_m2x2_mul_ck_data,
351};
352
353static struct ti_clk_gate dpll4_m2x2_ck_data = {
354 .parent = "dpll4_m2x2_mul_ck",
355 .bit_shift = 0x1b,
356 .reg = 0xd00,
357 .module = TI_CLKM_CM,
358 .flags = CLKF_SET_BIT_TO_DISABLE,
359};
360
361static struct ti_clk dpll4_m2x2_ck = {
362 .name = "dpll4_m2x2_ck",
363 .type = TI_CLK_GATE,
364 .data = &dpll4_m2x2_ck_data,
365};
366
367static struct ti_clk_fixed_factor omap_96m_alwon_fck_data = {
368 .parent = "dpll4_m2x2_ck",
369 .div = 1,
370 .mult = 1,
371};
372
373static struct ti_clk omap_96m_alwon_fck = {
374 .name = "omap_96m_alwon_fck",
375 .type = TI_CLK_FIXED_FACTOR,
376 .data = &omap_96m_alwon_fck_data,
377};
378
379static struct ti_clk_fixed_factor cm_96m_fck_data = {
380 .parent = "omap_96m_alwon_fck",
381 .div = 1,
382 .mult = 1,
383};
384
385static struct ti_clk cm_96m_fck = {
386 .name = "cm_96m_fck",
387 .type = TI_CLK_FIXED_FACTOR,
388 .data = &cm_96m_fck_data,
389};
390
391static const char *omap_96m_fck_parents[] = {
392 "cm_96m_fck",
393 "sys_ck",
394};
395
396static struct ti_clk_mux omap_96m_fck_data = {
397 .bit_shift = 6,
398 .num_parents = ARRAY_SIZE(omap_96m_fck_parents),
399 .reg = 0xd40,
400 .module = TI_CLKM_CM,
401 .parents = omap_96m_fck_parents,
402};
403
404static struct ti_clk omap_96m_fck = {
405 .name = "omap_96m_fck",
406 .type = TI_CLK_MUX,
407 .data = &omap_96m_fck_data,
408};
409
410static struct ti_clk_fixed_factor core_96m_fck_data = {
411 .parent = "omap_96m_fck",
412 .div = 1,
413 .mult = 1,
414};
415
416static struct ti_clk core_96m_fck = {
417 .name = "core_96m_fck",
418 .type = TI_CLK_FIXED_FACTOR,
419 .data = &core_96m_fck_data,
420};
421
422static struct ti_clk_gate mspro_fck_data = {
423 .parent = "core_96m_fck",
424 .bit_shift = 23,
425 .reg = 0xa00,
426 .module = TI_CLKM_CM,
427 .flags = CLKF_WAIT,
428};
429
430static struct ti_clk mspro_fck = {
431 .name = "mspro_fck",
432 .clkdm_name = "core_l4_clkdm",
433 .type = TI_CLK_GATE,
434 .data = &mspro_fck_data,
435};
436
437static struct ti_clk_gate dss_ick_3430es2_data = {
438 .parent = "l4_ick",
439 .bit_shift = 0,
440 .reg = 0xe10,
441 .module = TI_CLKM_CM,
442 .flags = CLKF_DSS | CLKF_OMAP3 | CLKF_INTERFACE,
443};
444
445static struct ti_clk dss_ick_3430es2 = {
446 .name = "dss_ick",
447 .clkdm_name = "dss_clkdm",
448 .type = TI_CLK_GATE,
449 .data = &dss_ick_3430es2_data,
450};
451
452static struct ti_clk_gate uart4_ick_am35xx_data = {
453 .parent = "core_l4_ick",
454 .bit_shift = 23,
455 .reg = 0xa10,
456 .module = TI_CLKM_CM,
457 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
458};
459
460static struct ti_clk uart4_ick_am35xx = {
461 .name = "uart4_ick_am35xx",
462 .clkdm_name = "core_l4_clkdm",
463 .type = TI_CLK_GATE,
464 .data = &uart4_ick_am35xx_data,
465};
466
467static struct ti_clk_fixed_factor security_l4_ick2_data = {
468 .parent = "l4_ick",
469 .div = 1,
470 .mult = 1,
471};
472
473static struct ti_clk security_l4_ick2 = {
474 .name = "security_l4_ick2",
475 .type = TI_CLK_FIXED_FACTOR,
476 .data = &security_l4_ick2_data,
477};
478
479static struct ti_clk_gate aes1_ick_data = {
480 .parent = "security_l4_ick2",
481 .bit_shift = 3,
482 .reg = 0xa14,
483 .module = TI_CLKM_CM,
484 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
485};
486
487static struct ti_clk aes1_ick = {
488 .name = "aes1_ick",
489 .type = TI_CLK_GATE,
490 .data = &aes1_ick_data,
491};
492
493static const char *dpll5_ck_parents[] = {
494 "sys_ck",
495 "sys_ck",
496};
497
498static struct ti_clk_dpll dpll5_ck_data = {
499 .num_parents = ARRAY_SIZE(dpll5_ck_parents),
500 .control_reg = 0xd04,
501 .idlest_reg = 0xd24,
502 .mult_div1_reg = 0xd4c,
503 .autoidle_reg = 0xd34,
504 .module = TI_CLKM_CM,
505 .parents = dpll5_ck_parents,
506 .freqsel_mask = 0xf0,
507 .modes = 0x82,
508 .div1_mask = 0x7f,
509 .idlest_mask = 0x1,
510 .auto_recal_bit = 0x3,
511 .max_divider = 0x80,
512 .min_divider = 0x1,
513 .recal_en_bit = 0x19,
514 .max_multiplier = 0x7ff,
515 .enable_mask = 0x7,
516 .mult_mask = 0x7ff00,
517 .recal_st_bit = 0x19,
518 .autoidle_mask = 0x7,
519};
520
521static struct ti_clk dpll5_ck = {
522 .name = "dpll5_ck",
523 .clkdm_name = "dpll5_clkdm",
524 .type = TI_CLK_DPLL,
525 .data = &dpll5_ck_data,
526};
527
528static struct ti_clk_divider dpll5_m2_ck_data = {
529 .parent = "dpll5_ck",
530 .max_div = 31,
531 .reg = 0xd50,
532 .module = TI_CLKM_CM,
533 .flags = CLKF_INDEX_STARTS_AT_ONE,
534};
535
536static struct ti_clk dpll5_m2_ck = {
537 .name = "dpll5_m2_ck",
538 .type = TI_CLK_DIVIDER,
539 .data = &dpll5_m2_ck_data,
540};
541
542static struct ti_clk_gate usbhost_120m_fck_data = {
543 .parent = "dpll5_m2_ck",
544 .bit_shift = 1,
545 .reg = 0x1400,
546 .module = TI_CLKM_CM,
547};
548
549static struct ti_clk usbhost_120m_fck = {
550 .name = "usbhost_120m_fck",
551 .clkdm_name = "usbhost_clkdm",
552 .type = TI_CLK_GATE,
553 .data = &usbhost_120m_fck_data,
554};
555
556static struct ti_clk_fixed_factor cm_96m_d2_fck_data = {
557 .parent = "cm_96m_fck",
558 .div = 2,
559 .mult = 1,
560};
561
562static struct ti_clk cm_96m_d2_fck = {
563 .name = "cm_96m_d2_fck",
564 .type = TI_CLK_FIXED_FACTOR,
565 .data = &cm_96m_d2_fck_data,
566};
567
568static struct ti_clk_fixed sys_altclk_data = {
569 .frequency = 0x0,
570};
571
572static struct ti_clk sys_altclk = {
573 .name = "sys_altclk",
574 .type = TI_CLK_FIXED,
575 .data = &sys_altclk_data,
576};
577
578static const char *omap_48m_fck_parents[] = {
579 "cm_96m_d2_fck",
580 "sys_altclk",
581};
582
583static struct ti_clk_mux omap_48m_fck_data = {
584 .bit_shift = 3,
585 .num_parents = ARRAY_SIZE(omap_48m_fck_parents),
586 .reg = 0xd40,
587 .module = TI_CLKM_CM,
588 .parents = omap_48m_fck_parents,
589};
590
591static struct ti_clk omap_48m_fck = {
592 .name = "omap_48m_fck",
593 .type = TI_CLK_MUX,
594 .data = &omap_48m_fck_data,
595};
596
597static struct ti_clk_fixed_factor core_48m_fck_data = {
598 .parent = "omap_48m_fck",
599 .div = 1,
600 .mult = 1,
601};
602
603static struct ti_clk core_48m_fck = {
604 .name = "core_48m_fck",
605 .type = TI_CLK_FIXED_FACTOR,
606 .data = &core_48m_fck_data,
607};
608
609static struct ti_clk_fixed mcbsp_clks_data = {
610 .frequency = 0x0,
611};
612
613static struct ti_clk mcbsp_clks = {
614 .name = "mcbsp_clks",
615 .type = TI_CLK_FIXED,
616 .data = &mcbsp_clks_data,
617};
618
619static struct ti_clk_gate mcbsp2_gate_fck_data = {
620 .parent = "mcbsp_clks",
621 .bit_shift = 0,
622 .reg = 0x1000,
623 .module = TI_CLKM_CM,
624};
625
626static struct ti_clk_fixed_factor per_96m_fck_data = {
627 .parent = "omap_96m_alwon_fck",
628 .div = 1,
629 .mult = 1,
630};
631
632static struct ti_clk per_96m_fck = {
633 .name = "per_96m_fck",
634 .type = TI_CLK_FIXED_FACTOR,
635 .data = &per_96m_fck_data,
636};
637
638static const char *mcbsp2_mux_fck_parents[] = {
639 "per_96m_fck",
640 "mcbsp_clks",
641};
642
643static struct ti_clk_mux mcbsp2_mux_fck_data = {
644 .bit_shift = 6,
645 .num_parents = ARRAY_SIZE(mcbsp2_mux_fck_parents),
646 .reg = 0x274,
647 .module = TI_CLKM_SCRM,
648 .parents = mcbsp2_mux_fck_parents,
649};
650
651static struct ti_clk_composite mcbsp2_fck_data = {
652 .mux = &mcbsp2_mux_fck_data,
653 .gate = &mcbsp2_gate_fck_data,
654};
655
656static struct ti_clk mcbsp2_fck = {
657 .name = "mcbsp2_fck",
658 .type = TI_CLK_COMPOSITE,
659 .data = &mcbsp2_fck_data,
660};
661
662static struct ti_clk_fixed_factor dpll3_m2x2_ck_data = {
663 .parent = "dpll3_m2_ck",
664 .div = 1,
665 .mult = 2,
666};
667
668static struct ti_clk dpll3_m2x2_ck = {
669 .name = "dpll3_m2x2_ck",
670 .type = TI_CLK_FIXED_FACTOR,
671 .data = &dpll3_m2x2_ck_data,
672};
673
674static struct ti_clk_fixed_factor corex2_fck_data = {
675 .parent = "dpll3_m2x2_ck",
676 .div = 1,
677 .mult = 1,
678};
679
680static struct ti_clk corex2_fck = {
681 .name = "corex2_fck",
682 .type = TI_CLK_FIXED_FACTOR,
683 .data = &corex2_fck_data,
684};
685
686static struct ti_clk_gate ssi_ssr_gate_fck_3430es1_data = {
687 .parent = "corex2_fck",
688 .bit_shift = 0,
689 .reg = 0xa00,
690 .module = TI_CLKM_CM,
691 .flags = CLKF_NO_WAIT,
692};
693
694static int ssi_ssr_div_fck_3430es1_divs[] = {
695 0,
696 1,
697 2,
698 3,
699 4,
700 0,
701 6,
702 0,
703 8,
704};
705
706static struct ti_clk_divider ssi_ssr_div_fck_3430es1_data = {
707 .num_dividers = ARRAY_SIZE(ssi_ssr_div_fck_3430es1_divs),
708 .parent = "corex2_fck",
709 .bit_shift = 8,
710 .dividers = ssi_ssr_div_fck_3430es1_divs,
711 .reg = 0xa40,
712 .module = TI_CLKM_CM,
713};
714
715static struct ti_clk_composite ssi_ssr_fck_3430es1_data = {
716 .gate = &ssi_ssr_gate_fck_3430es1_data,
717 .divider = &ssi_ssr_div_fck_3430es1_data,
718};
719
720static struct ti_clk ssi_ssr_fck_3430es1 = {
721 .name = "ssi_ssr_fck",
722 .type = TI_CLK_COMPOSITE,
723 .data = &ssi_ssr_fck_3430es1_data,
724};
725
726static struct ti_clk_fixed_factor ssi_sst_fck_3430es1_data = {
727 .parent = "ssi_ssr_fck",
728 .div = 2,
729 .mult = 1,
730};
731
732static struct ti_clk ssi_sst_fck_3430es1 = {
733 .name = "ssi_sst_fck",
734 .type = TI_CLK_FIXED_FACTOR,
735 .data = &ssi_sst_fck_3430es1_data,
736};
737
738static struct ti_clk_fixed omap_32k_fck_data = {
739 .frequency = 32768,
740};
741
742static struct ti_clk omap_32k_fck = {
743 .name = "omap_32k_fck",
744 .type = TI_CLK_FIXED,
745 .data = &omap_32k_fck_data,
746};
747
748static struct ti_clk_fixed_factor per_32k_alwon_fck_data = {
749 .parent = "omap_32k_fck",
750 .div = 1,
751 .mult = 1,
752};
753
754static struct ti_clk per_32k_alwon_fck = {
755 .name = "per_32k_alwon_fck",
756 .type = TI_CLK_FIXED_FACTOR,
757 .data = &per_32k_alwon_fck_data,
758};
759
760static struct ti_clk_gate gpio5_dbck_data = {
761 .parent = "per_32k_alwon_fck",
762 .bit_shift = 16,
763 .reg = 0x1000,
764 .module = TI_CLKM_CM,
765};
766
767static struct ti_clk gpio5_dbck = {
768 .name = "gpio5_dbck",
769 .clkdm_name = "per_clkdm",
770 .type = TI_CLK_GATE,
771 .data = &gpio5_dbck_data,
772};
773
774static struct ti_clk_gate gpt1_ick_data = {
775 .parent = "wkup_l4_ick",
776 .bit_shift = 0,
777 .reg = 0xc10,
778 .module = TI_CLKM_CM,
779 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
780};
781
782static struct ti_clk gpt1_ick = {
783 .name = "gpt1_ick",
784 .clkdm_name = "wkup_clkdm",
785 .type = TI_CLK_GATE,
786 .data = &gpt1_ick_data,
787};
788
789static struct ti_clk_gate mcspi3_fck_data = {
790 .parent = "core_48m_fck",
791 .bit_shift = 20,
792 .reg = 0xa00,
793 .module = TI_CLKM_CM,
794 .flags = CLKF_WAIT,
795};
796
797static struct ti_clk mcspi3_fck = {
798 .name = "mcspi3_fck",
799 .clkdm_name = "core_l4_clkdm",
800 .type = TI_CLK_GATE,
801 .data = &mcspi3_fck_data,
802};
803
804static struct ti_clk_gate gpt2_gate_fck_data = {
805 .parent = "sys_ck",
806 .bit_shift = 3,
807 .reg = 0x1000,
808 .module = TI_CLKM_CM,
809};
810
811static const char *gpt2_mux_fck_parents[] = {
812 "omap_32k_fck",
813 "sys_ck",
814};
815
816static struct ti_clk_mux gpt2_mux_fck_data = {
817 .num_parents = ARRAY_SIZE(gpt2_mux_fck_parents),
818 .reg = 0x1040,
819 .module = TI_CLKM_CM,
820 .parents = gpt2_mux_fck_parents,
821};
822
823static struct ti_clk_composite gpt2_fck_data = {
824 .mux = &gpt2_mux_fck_data,
825 .gate = &gpt2_gate_fck_data,
826};
827
828static struct ti_clk gpt2_fck = {
829 .name = "gpt2_fck",
830 .type = TI_CLK_COMPOSITE,
831 .data = &gpt2_fck_data,
832};
833
834static struct ti_clk_gate gpt10_ick_data = {
835 .parent = "core_l4_ick",
836 .bit_shift = 11,
837 .reg = 0xa10,
838 .module = TI_CLKM_CM,
839 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
840};
841
842static struct ti_clk gpt10_ick = {
843 .name = "gpt10_ick",
844 .clkdm_name = "core_l4_clkdm",
845 .type = TI_CLK_GATE,
846 .data = &gpt10_ick_data,
847};
848
849static struct ti_clk_gate uart2_fck_data = {
850 .parent = "core_48m_fck",
851 .bit_shift = 14,
852 .reg = 0xa00,
853 .module = TI_CLKM_CM,
854 .flags = CLKF_WAIT,
855};
856
857static struct ti_clk uart2_fck = {
858 .name = "uart2_fck",
859 .clkdm_name = "core_l4_clkdm",
860 .type = TI_CLK_GATE,
861 .data = &uart2_fck_data,
862};
863
864static struct ti_clk_fixed_factor sr_l4_ick_data = {
865 .parent = "l4_ick",
866 .div = 1,
867 .mult = 1,
868};
869
870static struct ti_clk sr_l4_ick = {
871 .name = "sr_l4_ick",
872 .type = TI_CLK_FIXED_FACTOR,
873 .data = &sr_l4_ick_data,
874};
875
876static struct ti_clk_fixed_factor omap_96m_d8_fck_data = {
877 .parent = "omap_96m_fck",
878 .div = 8,
879 .mult = 1,
880};
881
882static struct ti_clk omap_96m_d8_fck = {
883 .name = "omap_96m_d8_fck",
884 .type = TI_CLK_FIXED_FACTOR,
885 .data = &omap_96m_d8_fck_data,
886};
887
888static struct ti_clk_divider dpll4_m5_ck_data = {
889 .parent = "dpll4_ck",
890 .max_div = 63,
891 .reg = 0xf40,
892 .module = TI_CLKM_CM,
893 .flags = CLKF_INDEX_STARTS_AT_ONE,
894};
895
896static struct ti_clk dpll4_m5_ck = {
897 .name = "dpll4_m5_ck",
898 .type = TI_CLK_DIVIDER,
899 .data = &dpll4_m5_ck_data,
900};
901
902static struct ti_clk_fixed_factor dpll4_m5x2_mul_ck_data = {
903 .parent = "dpll4_m5_ck",
904 .div = 1,
905 .mult = 2,
906 .flags = CLKF_SET_RATE_PARENT,
907};
908
909static struct ti_clk dpll4_m5x2_mul_ck = {
910 .name = "dpll4_m5x2_mul_ck",
911 .type = TI_CLK_FIXED_FACTOR,
912 .data = &dpll4_m5x2_mul_ck_data,
913};
914
915static struct ti_clk_gate dpll4_m5x2_ck_data = {
916 .parent = "dpll4_m5x2_mul_ck",
917 .bit_shift = 0x1e,
918 .reg = 0xd00,
919 .module = TI_CLKM_CM,
920 .flags = CLKF_SET_BIT_TO_DISABLE,
921};
922
923static struct ti_clk dpll4_m5x2_ck = {
924 .name = "dpll4_m5x2_ck",
925 .type = TI_CLK_GATE,
926 .data = &dpll4_m5x2_ck_data,
927};
928
929static struct ti_clk_gate cam_mclk_data = {
930 .parent = "dpll4_m5x2_ck",
931 .bit_shift = 0,
932 .reg = 0xf00,
933 .module = TI_CLKM_CM,
934 .flags = CLKF_SET_RATE_PARENT,
935};
936
937static struct ti_clk cam_mclk = {
938 .name = "cam_mclk",
939 .type = TI_CLK_GATE,
940 .data = &cam_mclk_data,
941};
942
943static struct ti_clk_gate mcbsp3_gate_fck_data = {
944 .parent = "mcbsp_clks",
945 .bit_shift = 1,
946 .reg = 0x1000,
947 .module = TI_CLKM_CM,
948};
949
950static const char *mcbsp3_mux_fck_parents[] = {
951 "per_96m_fck",
952 "mcbsp_clks",
953};
954
955static struct ti_clk_mux mcbsp3_mux_fck_data = {
956 .num_parents = ARRAY_SIZE(mcbsp3_mux_fck_parents),
957 .reg = 0x2d8,
958 .module = TI_CLKM_SCRM,
959 .parents = mcbsp3_mux_fck_parents,
960};
961
962static struct ti_clk_composite mcbsp3_fck_data = {
963 .mux = &mcbsp3_mux_fck_data,
964 .gate = &mcbsp3_gate_fck_data,
965};
966
967static struct ti_clk mcbsp3_fck = {
968 .name = "mcbsp3_fck",
969 .type = TI_CLK_COMPOSITE,
970 .data = &mcbsp3_fck_data,
971};
972
973static struct ti_clk_gate csi2_96m_fck_data = {
974 .parent = "core_96m_fck",
975 .bit_shift = 1,
976 .reg = 0xf00,
977 .module = TI_CLKM_CM,
978};
979
980static struct ti_clk csi2_96m_fck = {
981 .name = "csi2_96m_fck",
982 .clkdm_name = "cam_clkdm",
983 .type = TI_CLK_GATE,
984 .data = &csi2_96m_fck_data,
985};
986
987static struct ti_clk_gate gpt9_gate_fck_data = {
988 .parent = "sys_ck",
989 .bit_shift = 10,
990 .reg = 0x1000,
991 .module = TI_CLKM_CM,
992};
993
994static const char *gpt9_mux_fck_parents[] = {
995 "omap_32k_fck",
996 "sys_ck",
997};
998
999static struct ti_clk_mux gpt9_mux_fck_data = {
1000 .bit_shift = 7,
1001 .num_parents = ARRAY_SIZE(gpt9_mux_fck_parents),
1002 .reg = 0x1040,
1003 .module = TI_CLKM_CM,
1004 .parents = gpt9_mux_fck_parents,
1005};
1006
1007static struct ti_clk_composite gpt9_fck_data = {
1008 .mux = &gpt9_mux_fck_data,
1009 .gate = &gpt9_gate_fck_data,
1010};
1011
1012static struct ti_clk gpt9_fck = {
1013 .name = "gpt9_fck",
1014 .type = TI_CLK_COMPOSITE,
1015 .data = &gpt9_fck_data,
1016};
1017
1018static struct ti_clk_divider dpll3_m3_ck_data = {
1019 .parent = "dpll3_ck",
1020 .bit_shift = 16,
1021 .max_div = 31,
1022 .reg = 0x1140,
1023 .module = TI_CLKM_CM,
1024 .flags = CLKF_INDEX_STARTS_AT_ONE,
1025};
1026
1027static struct ti_clk dpll3_m3_ck = {
1028 .name = "dpll3_m3_ck",
1029 .type = TI_CLK_DIVIDER,
1030 .data = &dpll3_m3_ck_data,
1031};
1032
1033static struct ti_clk_fixed_factor dpll3_m3x2_mul_ck_data = {
1034 .parent = "dpll3_m3_ck",
1035 .div = 1,
1036 .mult = 2,
1037};
1038
1039static struct ti_clk dpll3_m3x2_mul_ck = {
1040 .name = "dpll3_m3x2_mul_ck",
1041 .type = TI_CLK_FIXED_FACTOR,
1042 .data = &dpll3_m3x2_mul_ck_data,
1043};
1044
1045static struct ti_clk_gate sr2_fck_data = {
1046 .parent = "sys_ck",
1047 .bit_shift = 7,
1048 .reg = 0xc00,
1049 .module = TI_CLKM_CM,
1050 .flags = CLKF_WAIT,
1051};
1052
1053static struct ti_clk sr2_fck = {
1054 .name = "sr2_fck",
1055 .clkdm_name = "wkup_clkdm",
1056 .type = TI_CLK_GATE,
1057 .data = &sr2_fck_data,
1058};
1059
1060static struct ti_clk_fixed pclk_ck_data = {
1061 .frequency = 27000000,
1062};
1063
1064static struct ti_clk pclk_ck = {
1065 .name = "pclk_ck",
1066 .type = TI_CLK_FIXED,
1067 .data = &pclk_ck_data,
1068};
1069
1070static struct ti_clk_gate wdt2_ick_data = {
1071 .parent = "wkup_l4_ick",
1072 .bit_shift = 5,
1073 .reg = 0xc10,
1074 .module = TI_CLKM_CM,
1075 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1076};
1077
1078static struct ti_clk wdt2_ick = {
1079 .name = "wdt2_ick",
1080 .clkdm_name = "wkup_clkdm",
1081 .type = TI_CLK_GATE,
1082 .data = &wdt2_ick_data,
1083};
1084
1085static struct ti_clk_fixed_factor core_l3_ick_data = {
1086 .parent = "l3_ick",
1087 .div = 1,
1088 .mult = 1,
1089};
1090
1091static struct ti_clk core_l3_ick = {
1092 .name = "core_l3_ick",
1093 .type = TI_CLK_FIXED_FACTOR,
1094 .data = &core_l3_ick_data,
1095};
1096
1097static struct ti_clk_gate mcspi4_fck_data = {
1098 .parent = "core_48m_fck",
1099 .bit_shift = 21,
1100 .reg = 0xa00,
1101 .module = TI_CLKM_CM,
1102 .flags = CLKF_WAIT,
1103};
1104
1105static struct ti_clk mcspi4_fck = {
1106 .name = "mcspi4_fck",
1107 .clkdm_name = "core_l4_clkdm",
1108 .type = TI_CLK_GATE,
1109 .data = &mcspi4_fck_data,
1110};
1111
1112static struct ti_clk_fixed_factor per_48m_fck_data = {
1113 .parent = "omap_48m_fck",
1114 .div = 1,
1115 .mult = 1,
1116};
1117
1118static struct ti_clk per_48m_fck = {
1119 .name = "per_48m_fck",
1120 .type = TI_CLK_FIXED_FACTOR,
1121 .data = &per_48m_fck_data,
1122};
1123
1124static struct ti_clk_gate uart4_fck_data = {
1125 .parent = "per_48m_fck",
1126 .bit_shift = 18,
1127 .reg = 0x1000,
1128 .module = TI_CLKM_CM,
1129 .flags = CLKF_WAIT,
1130};
1131
1132static struct ti_clk uart4_fck = {
1133 .name = "uart4_fck",
1134 .clkdm_name = "per_clkdm",
1135 .type = TI_CLK_GATE,
1136 .data = &uart4_fck_data,
1137};
1138
1139static struct ti_clk_fixed_factor omap_96m_d10_fck_data = {
1140 .parent = "omap_96m_fck",
1141 .div = 10,
1142 .mult = 1,
1143};
1144
1145static struct ti_clk omap_96m_d10_fck = {
1146 .name = "omap_96m_d10_fck",
1147 .type = TI_CLK_FIXED_FACTOR,
1148 .data = &omap_96m_d10_fck_data,
1149};
1150
1151static struct ti_clk_gate usim_gate_fck_data = {
1152 .parent = "omap_96m_fck",
1153 .bit_shift = 9,
1154 .reg = 0xc00,
1155 .module = TI_CLKM_CM,
1156};
1157
1158static struct ti_clk_fixed_factor per_l4_ick_data = {
1159 .parent = "l4_ick",
1160 .div = 1,
1161 .mult = 1,
1162};
1163
1164static struct ti_clk per_l4_ick = {
1165 .name = "per_l4_ick",
1166 .type = TI_CLK_FIXED_FACTOR,
1167 .data = &per_l4_ick_data,
1168};
1169
1170static struct ti_clk_gate gpt5_ick_data = {
1171 .parent = "per_l4_ick",
1172 .bit_shift = 6,
1173 .reg = 0x1010,
1174 .module = TI_CLKM_CM,
1175 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1176};
1177
1178static struct ti_clk gpt5_ick = {
1179 .name = "gpt5_ick",
1180 .clkdm_name = "per_clkdm",
1181 .type = TI_CLK_GATE,
1182 .data = &gpt5_ick_data,
1183};
1184
1185static struct ti_clk_gate mcspi2_ick_data = {
1186 .parent = "core_l4_ick",
1187 .bit_shift = 19,
1188 .reg = 0xa10,
1189 .module = TI_CLKM_CM,
1190 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1191};
1192
1193static struct ti_clk mcspi2_ick = {
1194 .name = "mcspi2_ick",
1195 .clkdm_name = "core_l4_clkdm",
1196 .type = TI_CLK_GATE,
1197 .data = &mcspi2_ick_data,
1198};
1199
1200static struct ti_clk_fixed_factor ssi_l4_ick_data = {
1201 .parent = "l4_ick",
1202 .div = 1,
1203 .mult = 1,
1204};
1205
1206static struct ti_clk ssi_l4_ick = {
1207 .name = "ssi_l4_ick",
1208 .clkdm_name = "core_l4_clkdm",
1209 .type = TI_CLK_FIXED_FACTOR,
1210 .data = &ssi_l4_ick_data,
1211};
1212
1213static struct ti_clk_gate ssi_ick_3430es1_data = {
1214 .parent = "ssi_l4_ick",
1215 .bit_shift = 0,
1216 .reg = 0xa10,
1217 .module = TI_CLKM_CM,
1218 .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
1219};
1220
1221static struct ti_clk ssi_ick_3430es1 = {
1222 .name = "ssi_ick",
1223 .clkdm_name = "core_l4_clkdm",
1224 .type = TI_CLK_GATE,
1225 .data = &ssi_ick_3430es1_data,
1226};
1227
1228static struct ti_clk_gate i2c2_fck_data = {
1229 .parent = "core_96m_fck",
1230 .bit_shift = 16,
1231 .reg = 0xa00,
1232 .module = TI_CLKM_CM,
1233 .flags = CLKF_WAIT,
1234};
1235
1236static struct ti_clk i2c2_fck = {
1237 .name = "i2c2_fck",
1238 .clkdm_name = "core_l4_clkdm",
1239 .type = TI_CLK_GATE,
1240 .data = &i2c2_fck_data,
1241};
1242
1243static struct ti_clk_divider dpll1_fck_data = {
1244 .parent = "core_ck",
1245 .bit_shift = 19,
1246 .max_div = 7,
1247 .reg = 0x940,
1248 .module = TI_CLKM_CM,
1249 .flags = CLKF_INDEX_STARTS_AT_ONE,
1250};
1251
1252static struct ti_clk dpll1_fck = {
1253 .name = "dpll1_fck",
1254 .type = TI_CLK_DIVIDER,
1255 .data = &dpll1_fck_data,
1256};
1257
1258static const char *dpll1_ck_parents[] = {
1259 "sys_ck",
1260 "dpll1_fck",
1261};
1262
1263static struct ti_clk_dpll dpll1_ck_data = {
1264 .num_parents = ARRAY_SIZE(dpll1_ck_parents),
1265 .control_reg = 0x904,
1266 .idlest_reg = 0x924,
1267 .mult_div1_reg = 0x940,
1268 .autoidle_reg = 0x934,
1269 .module = TI_CLKM_CM,
1270 .parents = dpll1_ck_parents,
1271 .freqsel_mask = 0xf0,
1272 .modes = 0xa0,
1273 .div1_mask = 0x7f,
1274 .idlest_mask = 0x1,
1275 .auto_recal_bit = 0x3,
1276 .max_divider = 0x80,
1277 .min_divider = 0x1,
1278 .recal_en_bit = 0x7,
1279 .max_multiplier = 0x7ff,
1280 .enable_mask = 0x7,
1281 .mult_mask = 0x7ff00,
1282 .recal_st_bit = 0x7,
1283 .autoidle_mask = 0x7,
1284};
1285
1286static struct ti_clk dpll1_ck = {
1287 .name = "dpll1_ck",
1288 .clkdm_name = "dpll1_clkdm",
1289 .type = TI_CLK_DPLL,
1290 .data = &dpll1_ck_data,
1291};
1292
1293static struct ti_clk_fixed secure_32k_fck_data = {
1294 .frequency = 32768,
1295};
1296
1297static struct ti_clk secure_32k_fck = {
1298 .name = "secure_32k_fck",
1299 .type = TI_CLK_FIXED,
1300 .data = &secure_32k_fck_data,
1301};
1302
1303static struct ti_clk_gate gpio5_ick_data = {
1304 .parent = "per_l4_ick",
1305 .bit_shift = 16,
1306 .reg = 0x1010,
1307 .module = TI_CLKM_CM,
1308 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1309};
1310
1311static struct ti_clk gpio5_ick = {
1312 .name = "gpio5_ick",
1313 .clkdm_name = "per_clkdm",
1314 .type = TI_CLK_GATE,
1315 .data = &gpio5_ick_data,
1316};
1317
1318static struct ti_clk_divider dpll4_m4_ck_data = {
1319 .parent = "dpll4_ck",
1320 .max_div = 32,
1321 .reg = 0xe40,
1322 .module = TI_CLKM_CM,
1323 .flags = CLKF_INDEX_STARTS_AT_ONE,
1324};
1325
1326static struct ti_clk dpll4_m4_ck = {
1327 .name = "dpll4_m4_ck",
1328 .type = TI_CLK_DIVIDER,
1329 .data = &dpll4_m4_ck_data,
1330};
1331
1332static struct ti_clk_fixed_factor dpll4_m4x2_mul_ck_data = {
1333 .parent = "dpll4_m4_ck",
1334 .div = 1,
1335 .mult = 2,
1336 .flags = CLKF_SET_RATE_PARENT,
1337};
1338
1339static struct ti_clk dpll4_m4x2_mul_ck = {
1340 .name = "dpll4_m4x2_mul_ck",
1341 .type = TI_CLK_FIXED_FACTOR,
1342 .data = &dpll4_m4x2_mul_ck_data,
1343};
1344
1345static struct ti_clk_gate dpll4_m4x2_ck_data = {
1346 .parent = "dpll4_m4x2_mul_ck",
1347 .bit_shift = 0x1d,
1348 .reg = 0xd00,
1349 .module = TI_CLKM_CM,
1350 .flags = CLKF_SET_RATE_PARENT | CLKF_SET_BIT_TO_DISABLE,
1351};
1352
1353static struct ti_clk dpll4_m4x2_ck = {
1354 .name = "dpll4_m4x2_ck",
1355 .type = TI_CLK_GATE,
1356 .data = &dpll4_m4x2_ck_data,
1357};
1358
1359static struct ti_clk_gate dss1_alwon_fck_3430es2_data = {
1360 .parent = "dpll4_m4x2_ck",
1361 .bit_shift = 0,
1362 .reg = 0xe00,
1363 .module = TI_CLKM_CM,
1364 .flags = CLKF_DSS | CLKF_SET_RATE_PARENT,
1365};
1366
1367static struct ti_clk dss1_alwon_fck_3430es2 = {
1368 .name = "dss1_alwon_fck",
1369 .clkdm_name = "dss_clkdm",
1370 .type = TI_CLK_GATE,
1371 .data = &dss1_alwon_fck_3430es2_data,
1372};
1373
1374static struct ti_clk_gate uart3_ick_data = {
1375 .parent = "per_l4_ick",
1376 .bit_shift = 11,
1377 .reg = 0x1010,
1378 .module = TI_CLKM_CM,
1379 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1380};
1381
1382static struct ti_clk uart3_ick = {
1383 .name = "uart3_ick",
1384 .clkdm_name = "per_clkdm",
1385 .type = TI_CLK_GATE,
1386 .data = &uart3_ick_data,
1387};
1388
1389static struct ti_clk_divider dpll4_m3_ck_data = {
1390 .parent = "dpll4_ck",
1391 .bit_shift = 8,
1392 .max_div = 32,
1393 .reg = 0xe40,
1394 .module = TI_CLKM_CM,
1395 .flags = CLKF_INDEX_STARTS_AT_ONE,
1396};
1397
1398static struct ti_clk dpll4_m3_ck = {
1399 .name = "dpll4_m3_ck",
1400 .type = TI_CLK_DIVIDER,
1401 .data = &dpll4_m3_ck_data,
1402};
1403
1404static struct ti_clk_gate mcbsp3_ick_data = {
1405 .parent = "per_l4_ick",
1406 .bit_shift = 1,
1407 .reg = 0x1010,
1408 .module = TI_CLKM_CM,
1409 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1410};
1411
1412static struct ti_clk mcbsp3_ick = {
1413 .name = "mcbsp3_ick",
1414 .clkdm_name = "per_clkdm",
1415 .type = TI_CLK_GATE,
1416 .data = &mcbsp3_ick_data,
1417};
1418
1419static struct ti_clk_gate gpio3_dbck_data = {
1420 .parent = "per_32k_alwon_fck",
1421 .bit_shift = 14,
1422 .reg = 0x1000,
1423 .module = TI_CLKM_CM,
1424};
1425
1426static struct ti_clk gpio3_dbck = {
1427 .name = "gpio3_dbck",
1428 .clkdm_name = "per_clkdm",
1429 .type = TI_CLK_GATE,
1430 .data = &gpio3_dbck_data,
1431};
1432
1433static struct ti_clk_gate fac_ick_data = {
1434 .parent = "core_l4_ick",
1435 .bit_shift = 8,
1436 .reg = 0xa10,
1437 .module = TI_CLKM_CM,
1438 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1439};
1440
1441static struct ti_clk fac_ick = {
1442 .name = "fac_ick",
1443 .clkdm_name = "core_l4_clkdm",
1444 .type = TI_CLK_GATE,
1445 .data = &fac_ick_data,
1446};
1447
1448static struct ti_clk_gate clkout2_src_gate_ck_data = {
1449 .parent = "core_ck",
1450 .bit_shift = 7,
1451 .reg = 0xd70,
1452 .module = TI_CLKM_CM,
1453 .flags = CLKF_NO_WAIT,
1454};
1455
1456static struct ti_clk_fixed_factor dpll4_m3x2_mul_ck_data = {
1457 .parent = "dpll4_m3_ck",
1458 .div = 1,
1459 .mult = 2,
1460};
1461
1462static struct ti_clk dpll4_m3x2_mul_ck = {
1463 .name = "dpll4_m3x2_mul_ck",
1464 .type = TI_CLK_FIXED_FACTOR,
1465 .data = &dpll4_m3x2_mul_ck_data,
1466};
1467
1468static struct ti_clk_gate dpll4_m3x2_ck_data = {
1469 .parent = "dpll4_m3x2_mul_ck",
1470 .bit_shift = 0x1c,
1471 .reg = 0xd00,
1472 .module = TI_CLKM_CM,
1473 .flags = CLKF_SET_BIT_TO_DISABLE,
1474};
1475
1476static struct ti_clk dpll4_m3x2_ck = {
1477 .name = "dpll4_m3x2_ck",
1478 .type = TI_CLK_GATE,
1479 .data = &dpll4_m3x2_ck_data,
1480};
1481
1482static const char *omap_54m_fck_parents[] = {
1483 "dpll4_m3x2_ck",
1484 "sys_altclk",
1485};
1486
1487static struct ti_clk_mux omap_54m_fck_data = {
1488 .bit_shift = 5,
1489 .num_parents = ARRAY_SIZE(omap_54m_fck_parents),
1490 .reg = 0xd40,
1491 .module = TI_CLKM_CM,
1492 .parents = omap_54m_fck_parents,
1493};
1494
1495static struct ti_clk omap_54m_fck = {
1496 .name = "omap_54m_fck",
1497 .type = TI_CLK_MUX,
1498 .data = &omap_54m_fck_data,
1499};
1500
1501static const char *clkout2_src_mux_ck_parents[] = {
1502 "core_ck",
1503 "sys_ck",
1504 "cm_96m_fck",
1505 "omap_54m_fck",
1506};
1507
1508static struct ti_clk_mux clkout2_src_mux_ck_data = {
1509 .num_parents = ARRAY_SIZE(clkout2_src_mux_ck_parents),
1510 .reg = 0xd70,
1511 .module = TI_CLKM_CM,
1512 .parents = clkout2_src_mux_ck_parents,
1513};
1514
1515static struct ti_clk_composite clkout2_src_ck_data = {
1516 .mux = &clkout2_src_mux_ck_data,
1517 .gate = &clkout2_src_gate_ck_data,
1518};
1519
1520static struct ti_clk clkout2_src_ck = {
1521 .name = "clkout2_src_ck",
1522 .type = TI_CLK_COMPOSITE,
1523 .data = &clkout2_src_ck_data,
1524};
1525
1526static struct ti_clk_gate i2c1_fck_data = {
1527 .parent = "core_96m_fck",
1528 .bit_shift = 15,
1529 .reg = 0xa00,
1530 .module = TI_CLKM_CM,
1531 .flags = CLKF_WAIT,
1532};
1533
1534static struct ti_clk i2c1_fck = {
1535 .name = "i2c1_fck",
1536 .clkdm_name = "core_l4_clkdm",
1537 .type = TI_CLK_GATE,
1538 .data = &i2c1_fck_data,
1539};
1540
1541static struct ti_clk_gate wdt3_fck_data = {
1542 .parent = "per_32k_alwon_fck",
1543 .bit_shift = 12,
1544 .reg = 0x1000,
1545 .module = TI_CLKM_CM,
1546 .flags = CLKF_WAIT,
1547};
1548
1549static struct ti_clk wdt3_fck = {
1550 .name = "wdt3_fck",
1551 .clkdm_name = "per_clkdm",
1552 .type = TI_CLK_GATE,
1553 .data = &wdt3_fck_data,
1554};
1555
1556static struct ti_clk_gate gpt7_gate_fck_data = {
1557 .parent = "sys_ck",
1558 .bit_shift = 8,
1559 .reg = 0x1000,
1560 .module = TI_CLKM_CM,
1561};
1562
1563static const char *gpt7_mux_fck_parents[] = {
1564 "omap_32k_fck",
1565 "sys_ck",
1566};
1567
1568static struct ti_clk_mux gpt7_mux_fck_data = {
1569 .bit_shift = 5,
1570 .num_parents = ARRAY_SIZE(gpt7_mux_fck_parents),
1571 .reg = 0x1040,
1572 .module = TI_CLKM_CM,
1573 .parents = gpt7_mux_fck_parents,
1574};
1575
1576static struct ti_clk_composite gpt7_fck_data = {
1577 .mux = &gpt7_mux_fck_data,
1578 .gate = &gpt7_gate_fck_data,
1579};
1580
1581static struct ti_clk gpt7_fck = {
1582 .name = "gpt7_fck",
1583 .type = TI_CLK_COMPOSITE,
1584 .data = &gpt7_fck_data,
1585};
1586
1587static struct ti_clk_gate usb_l4_gate_ick_data = {
1588 .parent = "l4_ick",
1589 .bit_shift = 5,
1590 .reg = 0xa10,
1591 .module = TI_CLKM_CM,
1592 .flags = CLKF_INTERFACE,
1593};
1594
1595static struct ti_clk_divider usb_l4_div_ick_data = {
1596 .parent = "l4_ick",
1597 .bit_shift = 4,
1598 .max_div = 1,
1599 .reg = 0xa40,
1600 .module = TI_CLKM_CM,
1601 .flags = CLKF_INDEX_STARTS_AT_ONE,
1602};
1603
1604static struct ti_clk_composite usb_l4_ick_data = {
1605 .gate = &usb_l4_gate_ick_data,
1606 .divider = &usb_l4_div_ick_data,
1607};
1608
1609static struct ti_clk usb_l4_ick = {
1610 .name = "usb_l4_ick",
1611 .type = TI_CLK_COMPOSITE,
1612 .data = &usb_l4_ick_data,
1613};
1614
1615static struct ti_clk_gate uart4_ick_data = {
1616 .parent = "per_l4_ick",
1617 .bit_shift = 18,
1618 .reg = 0x1010,
1619 .module = TI_CLKM_CM,
1620 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1621};
1622
1623static struct ti_clk uart4_ick = {
1624 .name = "uart4_ick",
1625 .clkdm_name = "per_clkdm",
1626 .type = TI_CLK_GATE,
1627 .data = &uart4_ick_data,
1628};
1629
1630static struct ti_clk_fixed dummy_ck_data = {
1631 .frequency = 0,
1632};
1633
1634static struct ti_clk dummy_ck = {
1635 .name = "dummy_ck",
1636 .type = TI_CLK_FIXED,
1637 .data = &dummy_ck_data,
1638};
1639
1640static const char *gpt3_mux_fck_parents[] = {
1641 "omap_32k_fck",
1642 "sys_ck",
1643};
1644
1645static struct ti_clk_mux gpt3_mux_fck_data = {
1646 .bit_shift = 1,
1647 .num_parents = ARRAY_SIZE(gpt3_mux_fck_parents),
1648 .reg = 0x1040,
1649 .module = TI_CLKM_CM,
1650 .parents = gpt3_mux_fck_parents,
1651};
1652
1653static struct ti_clk_gate gpt9_ick_data = {
1654 .parent = "per_l4_ick",
1655 .bit_shift = 10,
1656 .reg = 0x1010,
1657 .module = TI_CLKM_CM,
1658 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1659};
1660
1661static struct ti_clk gpt9_ick = {
1662 .name = "gpt9_ick",
1663 .clkdm_name = "per_clkdm",
1664 .type = TI_CLK_GATE,
1665 .data = &gpt9_ick_data,
1666};
1667
1668static struct ti_clk_gate gpt10_gate_fck_data = {
1669 .parent = "sys_ck",
1670 .bit_shift = 11,
1671 .reg = 0xa00,
1672 .module = TI_CLKM_CM,
1673};
1674
1675static struct ti_clk_gate dss_ick_3430es1_data = {
1676 .parent = "l4_ick",
1677 .bit_shift = 0,
1678 .reg = 0xe10,
1679 .module = TI_CLKM_CM,
1680 .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
1681};
1682
1683static struct ti_clk dss_ick_3430es1 = {
1684 .name = "dss_ick",
1685 .clkdm_name = "dss_clkdm",
1686 .type = TI_CLK_GATE,
1687 .data = &dss_ick_3430es1_data,
1688};
1689
1690static struct ti_clk_gate gpt11_ick_data = {
1691 .parent = "core_l4_ick",
1692 .bit_shift = 12,
1693 .reg = 0xa10,
1694 .module = TI_CLKM_CM,
1695 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1696};
1697
1698static struct ti_clk gpt11_ick = {
1699 .name = "gpt11_ick",
1700 .clkdm_name = "core_l4_clkdm",
1701 .type = TI_CLK_GATE,
1702 .data = &gpt11_ick_data,
1703};
1704
1705static struct ti_clk_divider dpll2_fck_data = {
1706 .parent = "core_ck",
1707 .bit_shift = 19,
1708 .max_div = 7,
1709 .reg = 0x40,
1710 .module = TI_CLKM_CM,
1711 .flags = CLKF_INDEX_STARTS_AT_ONE,
1712};
1713
1714static struct ti_clk dpll2_fck = {
1715 .name = "dpll2_fck",
1716 .type = TI_CLK_DIVIDER,
1717 .data = &dpll2_fck_data,
1718};
1719
1720static struct ti_clk_gate uart1_fck_data = {
1721 .parent = "core_48m_fck",
1722 .bit_shift = 13,
1723 .reg = 0xa00,
1724 .module = TI_CLKM_CM,
1725 .flags = CLKF_WAIT,
1726};
1727
1728static struct ti_clk uart1_fck = {
1729 .name = "uart1_fck",
1730 .clkdm_name = "core_l4_clkdm",
1731 .type = TI_CLK_GATE,
1732 .data = &uart1_fck_data,
1733};
1734
1735static struct ti_clk_gate hsotgusb_ick_3430es1_data = {
1736 .parent = "core_l3_ick",
1737 .bit_shift = 4,
1738 .reg = 0xa10,
1739 .module = TI_CLKM_CM,
1740 .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
1741};
1742
1743static struct ti_clk hsotgusb_ick_3430es1 = {
1744 .name = "hsotgusb_ick_3430es1",
1745 .clkdm_name = "core_l3_clkdm",
1746 .type = TI_CLK_GATE,
1747 .data = &hsotgusb_ick_3430es1_data,
1748};
1749
1750static struct ti_clk_gate gpio2_ick_data = {
1751 .parent = "per_l4_ick",
1752 .bit_shift = 13,
1753 .reg = 0x1010,
1754 .module = TI_CLKM_CM,
1755 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1756};
1757
1758static struct ti_clk gpio2_ick = {
1759 .name = "gpio2_ick",
1760 .clkdm_name = "per_clkdm",
1761 .type = TI_CLK_GATE,
1762 .data = &gpio2_ick_data,
1763};
1764
1765static struct ti_clk_gate mmchs1_ick_data = {
1766 .parent = "core_l4_ick",
1767 .bit_shift = 24,
1768 .reg = 0xa10,
1769 .module = TI_CLKM_CM,
1770 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1771};
1772
1773static struct ti_clk mmchs1_ick = {
1774 .name = "mmchs1_ick",
1775 .clkdm_name = "core_l4_clkdm",
1776 .type = TI_CLK_GATE,
1777 .data = &mmchs1_ick_data,
1778};
1779
1780static struct ti_clk_gate modem_fck_data = {
1781 .parent = "sys_ck",
1782 .bit_shift = 31,
1783 .reg = 0xa00,
1784 .module = TI_CLKM_CM,
1785 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1786};
1787
1788static struct ti_clk modem_fck = {
1789 .name = "modem_fck",
1790 .clkdm_name = "d2d_clkdm",
1791 .type = TI_CLK_GATE,
1792 .data = &modem_fck_data,
1793};
1794
1795static struct ti_clk_gate mcbsp4_ick_data = {
1796 .parent = "per_l4_ick",
1797 .bit_shift = 2,
1798 .reg = 0x1010,
1799 .module = TI_CLKM_CM,
1800 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1801};
1802
1803static struct ti_clk mcbsp4_ick = {
1804 .name = "mcbsp4_ick",
1805 .clkdm_name = "per_clkdm",
1806 .type = TI_CLK_GATE,
1807 .data = &mcbsp4_ick_data,
1808};
1809
1810static struct ti_clk_gate gpio1_ick_data = {
1811 .parent = "wkup_l4_ick",
1812 .bit_shift = 3,
1813 .reg = 0xc10,
1814 .module = TI_CLKM_CM,
1815 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
1816};
1817
1818static struct ti_clk gpio1_ick = {
1819 .name = "gpio1_ick",
1820 .clkdm_name = "wkup_clkdm",
1821 .type = TI_CLK_GATE,
1822 .data = &gpio1_ick_data,
1823};
1824
1825static const char *gpt6_mux_fck_parents[] = {
1826 "omap_32k_fck",
1827 "sys_ck",
1828};
1829
1830static struct ti_clk_mux gpt6_mux_fck_data = {
1831 .bit_shift = 4,
1832 .num_parents = ARRAY_SIZE(gpt6_mux_fck_parents),
1833 .reg = 0x1040,
1834 .module = TI_CLKM_CM,
1835 .parents = gpt6_mux_fck_parents,
1836};
1837
1838static struct ti_clk_fixed_factor dpll1_x2_ck_data = {
1839 .parent = "dpll1_ck",
1840 .div = 1,
1841 .mult = 2,
1842};
1843
1844static struct ti_clk dpll1_x2_ck = {
1845 .name = "dpll1_x2_ck",
1846 .type = TI_CLK_FIXED_FACTOR,
1847 .data = &dpll1_x2_ck_data,
1848};
1849
1850static struct ti_clk_divider dpll1_x2m2_ck_data = {
1851 .parent = "dpll1_x2_ck",
1852 .max_div = 31,
1853 .reg = 0x944,
1854 .module = TI_CLKM_CM,
1855 .flags = CLKF_INDEX_STARTS_AT_ONE,
1856};
1857
1858static struct ti_clk dpll1_x2m2_ck = {
1859 .name = "dpll1_x2m2_ck",
1860 .type = TI_CLK_DIVIDER,
1861 .data = &dpll1_x2m2_ck_data,
1862};
1863
1864static struct ti_clk_fixed_factor mpu_ck_data = {
1865 .parent = "dpll1_x2m2_ck",
1866 .div = 1,
1867 .mult = 1,
1868};
1869
1870static struct ti_clk mpu_ck = {
1871 .name = "mpu_ck",
1872 .type = TI_CLK_FIXED_FACTOR,
1873 .data = &mpu_ck_data,
1874};
1875
1876static struct ti_clk_divider arm_fck_data = {
1877 .parent = "mpu_ck",
1878 .max_div = 2,
1879 .reg = 0x924,
1880 .module = TI_CLKM_CM,
1881};
1882
1883static struct ti_clk arm_fck = {
1884 .name = "arm_fck",
1885 .type = TI_CLK_DIVIDER,
1886 .data = &arm_fck_data,
1887};
1888
1889static struct ti_clk_fixed_factor core_d3_ck_data = {
1890 .parent = "core_ck",
1891 .div = 3,
1892 .mult = 1,
1893};
1894
1895static struct ti_clk core_d3_ck = {
1896 .name = "core_d3_ck",
1897 .type = TI_CLK_FIXED_FACTOR,
1898 .data = &core_d3_ck_data,
1899};
1900
1901static struct ti_clk_gate gpt11_gate_fck_data = {
1902 .parent = "sys_ck",
1903 .bit_shift = 12,
1904 .reg = 0xa00,
1905 .module = TI_CLKM_CM,
1906};
1907
1908static const char *gpt11_mux_fck_parents[] = {
1909 "omap_32k_fck",
1910 "sys_ck",
1911};
1912
1913static struct ti_clk_mux gpt11_mux_fck_data = {
1914 .bit_shift = 7,
1915 .num_parents = ARRAY_SIZE(gpt11_mux_fck_parents),
1916 .reg = 0xa40,
1917 .module = TI_CLKM_CM,
1918 .parents = gpt11_mux_fck_parents,
1919};
1920
1921static struct ti_clk_composite gpt11_fck_data = {
1922 .mux = &gpt11_mux_fck_data,
1923 .gate = &gpt11_gate_fck_data,
1924};
1925
1926static struct ti_clk gpt11_fck = {
1927 .name = "gpt11_fck",
1928 .type = TI_CLK_COMPOSITE,
1929 .data = &gpt11_fck_data,
1930};
1931
1932static struct ti_clk_fixed_factor core_d6_ck_data = {
1933 .parent = "core_ck",
1934 .div = 6,
1935 .mult = 1,
1936};
1937
1938static struct ti_clk core_d6_ck = {
1939 .name = "core_d6_ck",
1940 .type = TI_CLK_FIXED_FACTOR,
1941 .data = &core_d6_ck_data,
1942};
1943
1944static struct ti_clk_gate uart4_fck_am35xx_data = {
1945 .parent = "core_48m_fck",
1946 .bit_shift = 23,
1947 .reg = 0xa00,
1948 .module = TI_CLKM_CM,
1949 .flags = CLKF_WAIT,
1950};
1951
1952static struct ti_clk uart4_fck_am35xx = {
1953 .name = "uart4_fck_am35xx",
1954 .clkdm_name = "core_l4_clkdm",
1955 .type = TI_CLK_GATE,
1956 .data = &uart4_fck_am35xx_data,
1957};
1958
1959static struct ti_clk_gate dpll3_m3x2_ck_data = {
1960 .parent = "dpll3_m3x2_mul_ck",
1961 .bit_shift = 0xc,
1962 .reg = 0xd00,
1963 .module = TI_CLKM_CM,
1964 .flags = CLKF_SET_BIT_TO_DISABLE,
1965};
1966
1967static struct ti_clk dpll3_m3x2_ck = {
1968 .name = "dpll3_m3x2_ck",
1969 .type = TI_CLK_GATE,
1970 .data = &dpll3_m3x2_ck_data,
1971};
1972
1973static struct ti_clk_fixed_factor emu_core_alwon_ck_data = {
1974 .parent = "dpll3_m3x2_ck",
1975 .div = 1,
1976 .mult = 1,
1977};
1978
1979static struct ti_clk emu_core_alwon_ck = {
1980 .name = "emu_core_alwon_ck",
1981 .type = TI_CLK_FIXED_FACTOR,
1982 .data = &emu_core_alwon_ck_data,
1983};
1984
1985static struct ti_clk_divider dpll4_m6_ck_data = {
1986 .parent = "dpll4_ck",
1987 .bit_shift = 24,
1988 .max_div = 63,
1989 .reg = 0x1140,
1990 .module = TI_CLKM_CM,
1991 .flags = CLKF_INDEX_STARTS_AT_ONE,
1992};
1993
1994static struct ti_clk dpll4_m6_ck = {
1995 .name = "dpll4_m6_ck",
1996 .type = TI_CLK_DIVIDER,
1997 .data = &dpll4_m6_ck_data,
1998};
1999
2000static struct ti_clk_fixed_factor dpll4_m6x2_mul_ck_data = {
2001 .parent = "dpll4_m6_ck",
2002 .div = 1,
2003 .mult = 2,
2004};
2005
2006static struct ti_clk dpll4_m6x2_mul_ck = {
2007 .name = "dpll4_m6x2_mul_ck",
2008 .type = TI_CLK_FIXED_FACTOR,
2009 .data = &dpll4_m6x2_mul_ck_data,
2010};
2011
2012static struct ti_clk_gate dpll4_m6x2_ck_data = {
2013 .parent = "dpll4_m6x2_mul_ck",
2014 .bit_shift = 0x1f,
2015 .reg = 0xd00,
2016 .module = TI_CLKM_CM,
2017 .flags = CLKF_SET_BIT_TO_DISABLE,
2018};
2019
2020static struct ti_clk dpll4_m6x2_ck = {
2021 .name = "dpll4_m6x2_ck",
2022 .type = TI_CLK_GATE,
2023 .data = &dpll4_m6x2_ck_data,
2024};
2025
2026static struct ti_clk_fixed_factor emu_per_alwon_ck_data = {
2027 .parent = "dpll4_m6x2_ck",
2028 .div = 1,
2029 .mult = 1,
2030};
2031
2032static struct ti_clk emu_per_alwon_ck = {
2033 .name = "emu_per_alwon_ck",
2034 .type = TI_CLK_FIXED_FACTOR,
2035 .data = &emu_per_alwon_ck_data,
2036};
2037
2038static struct ti_clk_fixed_factor emu_mpu_alwon_ck_data = {
2039 .parent = "mpu_ck",
2040 .div = 1,
2041 .mult = 1,
2042};
2043
2044static struct ti_clk emu_mpu_alwon_ck = {
2045 .name = "emu_mpu_alwon_ck",
2046 .type = TI_CLK_FIXED_FACTOR,
2047 .data = &emu_mpu_alwon_ck_data,
2048};
2049
2050static const char *emu_src_mux_ck_parents[] = {
2051 "sys_ck",
2052 "emu_core_alwon_ck",
2053 "emu_per_alwon_ck",
2054 "emu_mpu_alwon_ck",
2055};
2056
2057static struct ti_clk_mux emu_src_mux_ck_data = {
2058 .num_parents = ARRAY_SIZE(emu_src_mux_ck_parents),
2059 .reg = 0x1140,
2060 .module = TI_CLKM_CM,
2061 .parents = emu_src_mux_ck_parents,
2062};
2063
2064static struct ti_clk emu_src_mux_ck = {
2065 .name = "emu_src_mux_ck",
2066 .type = TI_CLK_MUX,
2067 .data = &emu_src_mux_ck_data,
2068};
2069
2070static struct ti_clk_gate emu_src_ck_data = {
2071 .parent = "emu_src_mux_ck",
2072 .flags = CLKF_CLKDM,
2073};
2074
2075static struct ti_clk emu_src_ck = {
2076 .name = "emu_src_ck",
2077 .clkdm_name = "emu_clkdm",
2078 .type = TI_CLK_GATE,
2079 .data = &emu_src_ck_data,
2080};
2081
2082static struct ti_clk_divider atclk_fck_data = {
2083 .parent = "emu_src_ck",
2084 .bit_shift = 4,
2085 .max_div = 3,
2086 .reg = 0x1140,
2087 .module = TI_CLKM_CM,
2088 .flags = CLKF_INDEX_STARTS_AT_ONE,
2089};
2090
2091static struct ti_clk atclk_fck = {
2092 .name = "atclk_fck",
2093 .type = TI_CLK_DIVIDER,
2094 .data = &atclk_fck_data,
2095};
2096
2097static struct ti_clk_gate ipss_ick_data = {
2098 .parent = "core_l3_ick",
2099 .bit_shift = 4,
2100 .reg = 0xa10,
2101 .module = TI_CLKM_CM,
2102 .flags = CLKF_AM35XX | CLKF_INTERFACE,
2103};
2104
2105static struct ti_clk ipss_ick = {
2106 .name = "ipss_ick",
2107 .clkdm_name = "core_l3_clkdm",
2108 .type = TI_CLK_GATE,
2109 .data = &ipss_ick_data,
2110};
2111
2112static struct ti_clk_gate emac_ick_data = {
2113 .parent = "ipss_ick",
2114 .bit_shift = 1,
2115 .reg = 0x59c,
2116 .module = TI_CLKM_SCRM,
2117 .flags = CLKF_AM35XX,
2118};
2119
2120static struct ti_clk emac_ick = {
2121 .name = "emac_ick",
2122 .clkdm_name = "core_l3_clkdm",
2123 .type = TI_CLK_GATE,
2124 .data = &emac_ick_data,
2125};
2126
2127static struct ti_clk_gate vpfe_ick_data = {
2128 .parent = "ipss_ick",
2129 .bit_shift = 2,
2130 .reg = 0x59c,
2131 .module = TI_CLKM_SCRM,
2132 .flags = CLKF_AM35XX,
2133};
2134
2135static struct ti_clk vpfe_ick = {
2136 .name = "vpfe_ick",
2137 .clkdm_name = "core_l3_clkdm",
2138 .type = TI_CLK_GATE,
2139 .data = &vpfe_ick_data,
2140};
2141
2142static const char *dpll2_ck_parents[] = {
2143 "sys_ck",
2144 "dpll2_fck",
2145};
2146
2147static struct ti_clk_dpll dpll2_ck_data = {
2148 .num_parents = ARRAY_SIZE(dpll2_ck_parents),
2149 .control_reg = 0x4,
2150 .idlest_reg = 0x24,
2151 .mult_div1_reg = 0x40,
2152 .autoidle_reg = 0x34,
2153 .module = TI_CLKM_CM,
2154 .parents = dpll2_ck_parents,
2155 .freqsel_mask = 0xf0,
2156 .modes = 0xa2,
2157 .div1_mask = 0x7f,
2158 .idlest_mask = 0x1,
2159 .auto_recal_bit = 0x3,
2160 .max_divider = 0x80,
2161 .min_divider = 0x1,
2162 .recal_en_bit = 0x8,
2163 .max_multiplier = 0x7ff,
2164 .enable_mask = 0x7,
2165 .mult_mask = 0x7ff00,
2166 .recal_st_bit = 0x8,
2167 .autoidle_mask = 0x7,
2168};
2169
2170static struct ti_clk dpll2_ck = {
2171 .name = "dpll2_ck",
2172 .clkdm_name = "dpll2_clkdm",
2173 .type = TI_CLK_DPLL,
2174 .data = &dpll2_ck_data,
2175};
2176
2177static struct ti_clk_divider dpll2_m2_ck_data = {
2178 .parent = "dpll2_ck",
2179 .max_div = 31,
2180 .reg = 0x44,
2181 .module = TI_CLKM_CM,
2182 .flags = CLKF_INDEX_STARTS_AT_ONE,
2183};
2184
2185static struct ti_clk dpll2_m2_ck = {
2186 .name = "dpll2_m2_ck",
2187 .type = TI_CLK_DIVIDER,
2188 .data = &dpll2_m2_ck_data,
2189};
2190
2191static const char *mcbsp4_mux_fck_parents[] = {
2192 "per_96m_fck",
2193 "mcbsp_clks",
2194};
2195
2196static struct ti_clk_mux mcbsp4_mux_fck_data = {
2197 .bit_shift = 2,
2198 .num_parents = ARRAY_SIZE(mcbsp4_mux_fck_parents),
2199 .reg = 0x2d8,
2200 .module = TI_CLKM_SCRM,
2201 .parents = mcbsp4_mux_fck_parents,
2202};
2203
2204static const char *mcbsp1_mux_fck_parents[] = {
2205 "core_96m_fck",
2206 "mcbsp_clks",
2207};
2208
2209static struct ti_clk_mux mcbsp1_mux_fck_data = {
2210 .bit_shift = 2,
2211 .num_parents = ARRAY_SIZE(mcbsp1_mux_fck_parents),
2212 .reg = 0x274,
2213 .module = TI_CLKM_SCRM,
2214 .parents = mcbsp1_mux_fck_parents,
2215};
2216
2217static struct ti_clk_gate gpt8_gate_fck_data = {
2218 .parent = "sys_ck",
2219 .bit_shift = 9,
2220 .reg = 0x1000,
2221 .module = TI_CLKM_CM,
2222};
2223
2224static struct ti_clk_gate gpt8_ick_data = {
2225 .parent = "per_l4_ick",
2226 .bit_shift = 9,
2227 .reg = 0x1010,
2228 .module = TI_CLKM_CM,
2229 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
2230};
2231
2232static struct ti_clk gpt8_ick = {
2233 .name = "gpt8_ick",
2234 .clkdm_name = "per_clkdm",
2235 .type = TI_CLK_GATE,
2236 .data = &gpt8_ick_data,
2237};
2238
2239static const char *gpt10_mux_fck_parents[] = {
2240 "omap_32k_fck",
2241 "sys_ck",
2242};
2243
2244static struct ti_clk_mux gpt10_mux_fck_data = {
2245 .bit_shift = 6,
2246 .num_parents = ARRAY_SIZE(gpt10_mux_fck_parents),
2247 .reg = 0xa40,
2248 .module = TI_CLKM_CM,
2249 .parents = gpt10_mux_fck_parents,
2250};
2251
2252static struct ti_clk_gate mmchs3_ick_data = {
2253 .parent = "core_l4_ick",
2254 .bit_shift = 30,
2255 .reg = 0xa10,
2256 .module = TI_CLKM_CM,
2257 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
2258};
2259
2260static struct ti_clk mmchs3_ick = {
2261 .name = "mmchs3_ick",
2262 .clkdm_name = "core_l4_clkdm",
2263 .type = TI_CLK_GATE,
2264 .data = &mmchs3_ick_data,
2265};
2266
2267static struct ti_clk_gate gpio3_ick_data = {
2268 .parent = "per_l4_ick",
2269 .bit_shift = 14,
2270 .reg = 0x1010,
2271 .module = TI_CLKM_CM,
2272 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
2273};
2274
2275static struct ti_clk gpio3_ick = {
2276 .name = "gpio3_ick",
2277 .clkdm_name = "per_clkdm",
2278 .type = TI_CLK_GATE,
2279 .data = &gpio3_ick_data,
2280};
2281
2282static const char *traceclk_src_fck_parents[] = {
2283 "sys_ck",
2284 "emu_core_alwon_ck",
2285 "emu_per_alwon_ck",
2286 "emu_mpu_alwon_ck",
2287};
2288
2289static struct ti_clk_mux traceclk_src_fck_data = {
2290 .bit_shift = 2,
2291 .num_parents = ARRAY_SIZE(traceclk_src_fck_parents),
2292 .reg = 0x1140,
2293 .module = TI_CLKM_CM,
2294 .parents = traceclk_src_fck_parents,
2295};
2296
2297static struct ti_clk traceclk_src_fck = {
2298 .name = "traceclk_src_fck",
2299 .type = TI_CLK_MUX,
2300 .data = &traceclk_src_fck_data,
2301};
2302
2303static struct ti_clk_divider traceclk_fck_data = {
2304 .parent = "traceclk_src_fck",
2305 .bit_shift = 11,
2306 .max_div = 7,
2307 .reg = 0x1140,
2308 .module = TI_CLKM_CM,
2309 .flags = CLKF_INDEX_STARTS_AT_ONE,
2310};
2311
2312static struct ti_clk traceclk_fck = {
2313 .name = "traceclk_fck",
2314 .type = TI_CLK_DIVIDER,
2315 .data = &traceclk_fck_data,
2316};
2317
2318static struct ti_clk_gate mcbsp5_gate_fck_data = {
2319 .parent = "mcbsp_clks",
2320 .bit_shift = 10,
2321 .reg = 0xa00,
2322 .module = TI_CLKM_CM,
2323};
2324
2325static struct ti_clk_gate sad2d_ick_data = {
2326 .parent = "l3_ick",
2327 .bit_shift = 3,
2328 .reg = 0xa10,
2329 .module = TI_CLKM_CM,
2330 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
2331};
2332
2333static struct ti_clk sad2d_ick = {
2334 .name = "sad2d_ick",
2335 .clkdm_name = "d2d_clkdm",
2336 .type = TI_CLK_GATE,
2337 .data = &sad2d_ick_data,
2338};
2339
2340static const char *gpt1_mux_fck_parents[] = {
2341 "omap_32k_fck",
2342 "sys_ck",
2343};
2344
2345static struct ti_clk_mux gpt1_mux_fck_data = {
2346 .num_parents = ARRAY_SIZE(gpt1_mux_fck_parents),
2347 .reg = 0xc40,
2348 .module = TI_CLKM_CM,
2349 .parents = gpt1_mux_fck_parents,
2350};
2351
2352static struct ti_clk_gate hecc_ck_data = {
2353 .parent = "sys_ck",
2354 .bit_shift = 3,
2355 .reg = 0x59c,
2356 .module = TI_CLKM_SCRM,
2357 .flags = CLKF_AM35XX,
2358};
2359
2360static struct ti_clk hecc_ck = {
2361 .name = "hecc_ck",
2362 .clkdm_name = "core_l3_clkdm",
2363 .type = TI_CLK_GATE,
2364 .data = &hecc_ck_data,
2365};
2366
2367static struct ti_clk_gate gpt1_gate_fck_data = {
2368 .parent = "sys_ck",
2369 .bit_shift = 0,
2370 .reg = 0xc00,
2371 .module = TI_CLKM_CM,
2372};
2373
2374static struct ti_clk_composite gpt1_fck_data = {
2375 .mux = &gpt1_mux_fck_data,
2376 .gate = &gpt1_gate_fck_data,
2377};
2378
2379static struct ti_clk gpt1_fck = {
2380 .name = "gpt1_fck",
2381 .type = TI_CLK_COMPOSITE,
2382 .data = &gpt1_fck_data,
2383};
2384
2385static struct ti_clk_gate dpll4_m2x2_ck_omap36xx_data = {
2386 .parent = "dpll4_m2x2_mul_ck",
2387 .bit_shift = 0x1b,
2388 .reg = 0xd00,
2389 .module = TI_CLKM_CM,
2390 .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
2391};
2392
2393static struct ti_clk dpll4_m2x2_ck_omap36xx = {
2394 .name = "dpll4_m2x2_ck",
2395 .type = TI_CLK_GATE,
2396 .data = &dpll4_m2x2_ck_omap36xx_data,
2397 .patch = &dpll4_m2x2_ck,
2398};
2399
2400static struct ti_clk_divider gfx_l3_fck_data = {
2401 .parent = "l3_ick",
2402 .max_div = 7,
2403 .reg = 0xb40,
2404 .module = TI_CLKM_CM,
2405 .flags = CLKF_INDEX_STARTS_AT_ONE,
2406};
2407
2408static struct ti_clk gfx_l3_fck = {
2409 .name = "gfx_l3_fck",
2410 .type = TI_CLK_DIVIDER,
2411 .data = &gfx_l3_fck_data,
2412};
2413
2414static struct ti_clk_gate gfx_cg1_ck_data = {
2415 .parent = "gfx_l3_fck",
2416 .bit_shift = 1,
2417 .reg = 0xb00,
2418 .module = TI_CLKM_CM,
2419 .flags = CLKF_WAIT,
2420};
2421
2422static struct ti_clk gfx_cg1_ck = {
2423 .name = "gfx_cg1_ck",
2424 .clkdm_name = "gfx_3430es1_clkdm",
2425 .type = TI_CLK_GATE,
2426 .data = &gfx_cg1_ck_data,
2427};
2428
2429static struct ti_clk_gate mailboxes_ick_data = {
2430 .parent = "core_l4_ick",
2431 .bit_shift = 7,
2432 .reg = 0xa10,
2433 .module = TI_CLKM_CM,
2434 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
2435};
2436
2437static struct ti_clk mailboxes_ick = {
2438 .name = "mailboxes_ick",
2439 .clkdm_name = "core_l4_clkdm",
2440 .type = TI_CLK_GATE,
2441 .data = &mailboxes_ick_data,
2442};
2443
2444static struct ti_clk_gate sha11_ick_data = {
2445 .parent = "security_l4_ick2",
2446 .bit_shift = 1,
2447 .reg = 0xa14,
2448 .module = TI_CLKM_CM,
2449 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
2450};
2451
2452static struct ti_clk sha11_ick = {
2453 .name = "sha11_ick",
2454 .type = TI_CLK_GATE,
2455 .data = &sha11_ick_data,
2456};
2457
2458static struct ti_clk_gate hsotgusb_ick_am35xx_data = {
2459 .parent = "ipss_ick",
2460 .bit_shift = 0,
2461 .reg = 0x59c,
2462 .module = TI_CLKM_SCRM,
2463 .flags = CLKF_AM35XX,
2464};
2465
2466static struct ti_clk hsotgusb_ick_am35xx = {
2467 .name = "hsotgusb_ick_am35xx",
2468 .clkdm_name = "core_l3_clkdm",
2469 .type = TI_CLK_GATE,
2470 .data = &hsotgusb_ick_am35xx_data,
2471};
2472
2473static struct ti_clk_gate mmchs3_fck_data = {
2474 .parent = "core_96m_fck",
2475 .bit_shift = 30,
2476 .reg = 0xa00,
2477 .module = TI_CLKM_CM,
2478 .flags = CLKF_WAIT,
2479};
2480
2481static struct ti_clk mmchs3_fck = {
2482 .name = "mmchs3_fck",
2483 .clkdm_name = "core_l4_clkdm",
2484 .type = TI_CLK_GATE,
2485 .data = &mmchs3_fck_data,
2486};
2487
2488static struct ti_clk_divider pclk_fck_data = {
2489 .parent = "emu_src_ck",
2490 .bit_shift = 8,
2491 .max_div = 7,
2492 .reg = 0x1140,
2493 .module = TI_CLKM_CM,
2494 .flags = CLKF_INDEX_STARTS_AT_ONE,
2495};
2496
2497static struct ti_clk pclk_fck = {
2498 .name = "pclk_fck",
2499 .type = TI_CLK_DIVIDER,
2500 .data = &pclk_fck_data,
2501};
2502
2503static const char *dpll4_ck_omap36xx_parents[] = {
2504 "sys_ck",
2505 "sys_ck",
2506};
2507
2508static struct ti_clk_dpll dpll4_ck_omap36xx_data = {
2509 .num_parents = ARRAY_SIZE(dpll4_ck_omap36xx_parents),
2510 .control_reg = 0xd00,
2511 .idlest_reg = 0xd20,
2512 .mult_div1_reg = 0xd44,
2513 .autoidle_reg = 0xd30,
2514 .module = TI_CLKM_CM,
2515 .parents = dpll4_ck_omap36xx_parents,
2516 .modes = 0x82,
2517 .div1_mask = 0x7f,
2518 .idlest_mask = 0x2,
2519 .auto_recal_bit = 0x13,
2520 .max_divider = 0x80,
2521 .min_divider = 0x1,
2522 .recal_en_bit = 0x6,
2523 .max_multiplier = 0xfff,
2524 .enable_mask = 0x70000,
2525 .mult_mask = 0xfff00,
2526 .recal_st_bit = 0x6,
2527 .autoidle_mask = 0x38,
2528 .sddiv_mask = 0xff000000,
2529 .dco_mask = 0xe00000,
2530 .flags = CLKF_PER | CLKF_J_TYPE,
2531};
2532
2533static struct ti_clk dpll4_ck_omap36xx = {
2534 .name = "dpll4_ck",
2535 .type = TI_CLK_DPLL,
2536 .data = &dpll4_ck_omap36xx_data,
2537 .patch = &dpll4_ck,
2538};
2539
2540static struct ti_clk_gate uart3_fck_data = {
2541 .parent = "per_48m_fck",
2542 .bit_shift = 11,
2543 .reg = 0x1000,
2544 .module = TI_CLKM_CM,
2545 .flags = CLKF_WAIT,
2546};
2547
2548static struct ti_clk uart3_fck = {
2549 .name = "uart3_fck",
2550 .clkdm_name = "per_clkdm",
2551 .type = TI_CLK_GATE,
2552 .data = &uart3_fck_data,
2553};
2554
2555static struct ti_clk_fixed_factor wkup_32k_fck_data = {
2556 .parent = "omap_32k_fck",
2557 .div = 1,
2558 .mult = 1,
2559};
2560
2561static struct ti_clk wkup_32k_fck = {
2562 .name = "wkup_32k_fck",
2563 .type = TI_CLK_FIXED_FACTOR,
2564 .data = &wkup_32k_fck_data,
2565};
2566
2567static struct ti_clk_gate sys_clkout1_data = {
2568 .parent = "osc_sys_ck",
2569 .bit_shift = 7,
2570 .reg = 0xd70,
2571 .module = TI_CLKM_PRM,
2572};
2573
2574static struct ti_clk sys_clkout1 = {
2575 .name = "sys_clkout1",
2576 .type = TI_CLK_GATE,
2577 .data = &sys_clkout1_data,
2578};
2579
2580static struct ti_clk_fixed_factor gpmc_fck_data = {
2581 .parent = "core_l3_ick",
2582 .div = 1,
2583 .mult = 1,
2584};
2585
2586static struct ti_clk gpmc_fck = {
2587 .name = "gpmc_fck",
2588 .type = TI_CLK_FIXED_FACTOR,
2589 .data = &gpmc_fck_data,
2590};
2591
2592static struct ti_clk_fixed_factor dpll5_m2_d20_ck_data = {
2593 .parent = "dpll5_m2_ck",
2594 .div = 20,
2595 .mult = 1,
2596};
2597
2598static struct ti_clk dpll5_m2_d20_ck = {
2599 .name = "dpll5_m2_d20_ck",
2600 .type = TI_CLK_FIXED_FACTOR,
2601 .data = &dpll5_m2_d20_ck_data,
2602};
2603
2604static struct ti_clk_gate dpll4_m5x2_ck_omap36xx_data = {
2605 .parent = "dpll4_m5x2_mul_ck",
2606 .bit_shift = 0x1e,
2607 .reg = 0xd00,
2608 .module = TI_CLKM_CM,
2609 .flags = CLKF_HSDIV | CLKF_SET_RATE_PARENT | CLKF_SET_BIT_TO_DISABLE,
2610};
2611
2612static struct ti_clk dpll4_m5x2_ck_omap36xx = {
2613 .name = "dpll4_m5x2_ck",
2614 .type = TI_CLK_GATE,
2615 .data = &dpll4_m5x2_ck_omap36xx_data,
2616 .patch = &dpll4_m5x2_ck,
2617};
2618
2619static struct ti_clk_gate ssi_ssr_gate_fck_3430es2_data = {
2620 .parent = "corex2_fck",
2621 .bit_shift = 0,
2622 .reg = 0xa00,
2623 .module = TI_CLKM_CM,
2624 .flags = CLKF_NO_WAIT,
2625};
2626
2627static struct ti_clk_gate uart1_ick_data = {
2628 .parent = "core_l4_ick",
2629 .bit_shift = 13,
2630 .reg = 0xa10,
2631 .module = TI_CLKM_CM,
2632 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
2633};
2634
2635static struct ti_clk uart1_ick = {
2636 .name = "uart1_ick",
2637 .clkdm_name = "core_l4_clkdm",
2638 .type = TI_CLK_GATE,
2639 .data = &uart1_ick_data,
2640};
2641
2642static struct ti_clk_gate iva2_ck_data = {
2643 .parent = "dpll2_m2_ck",
2644 .bit_shift = 0,
2645 .reg = 0x0,
2646 .module = TI_CLKM_CM,
2647 .flags = CLKF_WAIT,
2648};
2649
2650static struct ti_clk iva2_ck = {
2651 .name = "iva2_ck",
2652 .clkdm_name = "iva2_clkdm",
2653 .type = TI_CLK_GATE,
2654 .data = &iva2_ck_data,
2655};
2656
2657static struct ti_clk_gate pka_ick_data = {
2658 .parent = "security_l3_ick",
2659 .bit_shift = 4,
2660 .reg = 0xa14,
2661 .module = TI_CLKM_CM,
2662 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
2663};
2664
2665static struct ti_clk pka_ick = {
2666 .name = "pka_ick",
2667 .type = TI_CLK_GATE,
2668 .data = &pka_ick_data,
2669};
2670
2671static struct ti_clk_gate gpt12_ick_data = {
2672 .parent = "wkup_l4_ick",
2673 .bit_shift = 1,
2674 .reg = 0xc10,
2675 .module = TI_CLKM_CM,
2676 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
2677};
2678
2679static struct ti_clk gpt12_ick = {
2680 .name = "gpt12_ick",
2681 .clkdm_name = "wkup_clkdm",
2682 .type = TI_CLK_GATE,
2683 .data = &gpt12_ick_data,
2684};
2685
2686static const char *mcbsp5_mux_fck_parents[] = {
2687 "core_96m_fck",
2688 "mcbsp_clks",
2689};
2690
2691static struct ti_clk_mux mcbsp5_mux_fck_data = {
2692 .bit_shift = 4,
2693 .num_parents = ARRAY_SIZE(mcbsp5_mux_fck_parents),
2694 .reg = 0x2d8,
2695 .module = TI_CLKM_SCRM,
2696 .parents = mcbsp5_mux_fck_parents,
2697};
2698
2699static struct ti_clk_composite mcbsp5_fck_data = {
2700 .mux = &mcbsp5_mux_fck_data,
2701 .gate = &mcbsp5_gate_fck_data,
2702};
2703
2704static struct ti_clk mcbsp5_fck = {
2705 .name = "mcbsp5_fck",
2706 .type = TI_CLK_COMPOSITE,
2707 .data = &mcbsp5_fck_data,
2708};
2709
2710static struct ti_clk_gate usbhost_48m_fck_data = {
2711 .parent = "omap_48m_fck",
2712 .bit_shift = 0,
2713 .reg = 0x1400,
2714 .module = TI_CLKM_CM,
2715 .flags = CLKF_DSS,
2716};
2717
2718static struct ti_clk usbhost_48m_fck = {
2719 .name = "usbhost_48m_fck",
2720 .clkdm_name = "usbhost_clkdm",
2721 .type = TI_CLK_GATE,
2722 .data = &usbhost_48m_fck_data,
2723};
2724
2725static struct ti_clk_gate des1_ick_data = {
2726 .parent = "security_l4_ick2",
2727 .bit_shift = 0,
2728 .reg = 0xa14,
2729 .module = TI_CLKM_CM,
2730 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
2731};
2732
2733static struct ti_clk des1_ick = {
2734 .name = "des1_ick",
2735 .type = TI_CLK_GATE,
2736 .data = &des1_ick_data,
2737};
2738
2739static struct ti_clk_gate sgx_gate_fck_data = {
2740 .parent = "core_ck",
2741 .bit_shift = 1,
2742 .reg = 0xb00,
2743 .module = TI_CLKM_CM,
2744};
2745
2746static struct ti_clk_fixed_factor core_d4_ck_data = {
2747 .parent = "core_ck",
2748 .div = 4,
2749 .mult = 1,
2750};
2751
2752static struct ti_clk core_d4_ck = {
2753 .name = "core_d4_ck",
2754 .type = TI_CLK_FIXED_FACTOR,
2755 .data = &core_d4_ck_data,
2756};
2757
2758static struct ti_clk_fixed_factor omap_192m_alwon_fck_data = {
2759 .parent = "dpll4_m2x2_ck",
2760 .div = 1,
2761 .mult = 1,
2762};
2763
2764static struct ti_clk omap_192m_alwon_fck = {
2765 .name = "omap_192m_alwon_fck",
2766 .type = TI_CLK_FIXED_FACTOR,
2767 .data = &omap_192m_alwon_fck_data,
2768};
2769
2770static struct ti_clk_fixed_factor core_d2_ck_data = {
2771 .parent = "core_ck",
2772 .div = 2,
2773 .mult = 1,
2774};
2775
2776static struct ti_clk core_d2_ck = {
2777 .name = "core_d2_ck",
2778 .type = TI_CLK_FIXED_FACTOR,
2779 .data = &core_d2_ck_data,
2780};
2781
2782static struct ti_clk_fixed_factor corex2_d3_fck_data = {
2783 .parent = "corex2_fck",
2784 .div = 3,
2785 .mult = 1,
2786};
2787
2788static struct ti_clk corex2_d3_fck = {
2789 .name = "corex2_d3_fck",
2790 .type = TI_CLK_FIXED_FACTOR,
2791 .data = &corex2_d3_fck_data,
2792};
2793
2794static struct ti_clk_fixed_factor corex2_d5_fck_data = {
2795 .parent = "corex2_fck",
2796 .div = 5,
2797 .mult = 1,
2798};
2799
2800static struct ti_clk corex2_d5_fck = {
2801 .name = "corex2_d5_fck",
2802 .type = TI_CLK_FIXED_FACTOR,
2803 .data = &corex2_d5_fck_data,
2804};
2805
2806static const char *sgx_mux_fck_parents[] = {
2807 "core_d3_ck",
2808 "core_d4_ck",
2809 "core_d6_ck",
2810 "cm_96m_fck",
2811 "omap_192m_alwon_fck",
2812 "core_d2_ck",
2813 "corex2_d3_fck",
2814 "corex2_d5_fck",
2815};
2816
2817static struct ti_clk_mux sgx_mux_fck_data = {
2818 .num_parents = ARRAY_SIZE(sgx_mux_fck_parents),
2819 .reg = 0xb40,
2820 .module = TI_CLKM_CM,
2821 .parents = sgx_mux_fck_parents,
2822};
2823
2824static struct ti_clk_composite sgx_fck_data = {
2825 .mux = &sgx_mux_fck_data,
2826 .gate = &sgx_gate_fck_data,
2827};
2828
2829static struct ti_clk sgx_fck = {
2830 .name = "sgx_fck",
2831 .type = TI_CLK_COMPOSITE,
2832 .data = &sgx_fck_data,
2833};
2834
2835static struct ti_clk_gate mcspi1_fck_data = {
2836 .parent = "core_48m_fck",
2837 .bit_shift = 18,
2838 .reg = 0xa00,
2839 .module = TI_CLKM_CM,
2840 .flags = CLKF_WAIT,
2841};
2842
2843static struct ti_clk mcspi1_fck = {
2844 .name = "mcspi1_fck",
2845 .clkdm_name = "core_l4_clkdm",
2846 .type = TI_CLK_GATE,
2847 .data = &mcspi1_fck_data,
2848};
2849
2850static struct ti_clk_gate mmchs2_fck_data = {
2851 .parent = "core_96m_fck",
2852 .bit_shift = 25,
2853 .reg = 0xa00,
2854 .module = TI_CLKM_CM,
2855 .flags = CLKF_WAIT,
2856};
2857
2858static struct ti_clk mmchs2_fck = {
2859 .name = "mmchs2_fck",
2860 .clkdm_name = "core_l4_clkdm",
2861 .type = TI_CLK_GATE,
2862 .data = &mmchs2_fck_data,
2863};
2864
2865static struct ti_clk_gate mcspi2_fck_data = {
2866 .parent = "core_48m_fck",
2867 .bit_shift = 19,
2868 .reg = 0xa00,
2869 .module = TI_CLKM_CM,
2870 .flags = CLKF_WAIT,
2871};
2872
2873static struct ti_clk mcspi2_fck = {
2874 .name = "mcspi2_fck",
2875 .clkdm_name = "core_l4_clkdm",
2876 .type = TI_CLK_GATE,
2877 .data = &mcspi2_fck_data,
2878};
2879
2880static struct ti_clk_gate vpfe_fck_data = {
2881 .parent = "pclk_ck",
2882 .bit_shift = 10,
2883 .reg = 0x59c,
2884 .module = TI_CLKM_SCRM,
2885};
2886
2887static struct ti_clk vpfe_fck = {
2888 .name = "vpfe_fck",
2889 .type = TI_CLK_GATE,
2890 .data = &vpfe_fck_data,
2891};
2892
2893static struct ti_clk_gate gpt4_gate_fck_data = {
2894 .parent = "sys_ck",
2895 .bit_shift = 5,
2896 .reg = 0x1000,
2897 .module = TI_CLKM_CM,
2898};
2899
2900static struct ti_clk_gate mcbsp1_gate_fck_data = {
2901 .parent = "mcbsp_clks",
2902 .bit_shift = 9,
2903 .reg = 0xa00,
2904 .module = TI_CLKM_CM,
2905};
2906
2907static struct ti_clk_gate gpt5_gate_fck_data = {
2908 .parent = "sys_ck",
2909 .bit_shift = 6,
2910 .reg = 0x1000,
2911 .module = TI_CLKM_CM,
2912};
2913
2914static const char *gpt5_mux_fck_parents[] = {
2915 "omap_32k_fck",
2916 "sys_ck",
2917};
2918
2919static struct ti_clk_mux gpt5_mux_fck_data = {
2920 .bit_shift = 3,
2921 .num_parents = ARRAY_SIZE(gpt5_mux_fck_parents),
2922 .reg = 0x1040,
2923 .module = TI_CLKM_CM,
2924 .parents = gpt5_mux_fck_parents,
2925};
2926
2927static struct ti_clk_composite gpt5_fck_data = {
2928 .mux = &gpt5_mux_fck_data,
2929 .gate = &gpt5_gate_fck_data,
2930};
2931
2932static struct ti_clk gpt5_fck = {
2933 .name = "gpt5_fck",
2934 .type = TI_CLK_COMPOSITE,
2935 .data = &gpt5_fck_data,
2936};
2937
2938static struct ti_clk_gate ts_fck_data = {
2939 .parent = "omap_32k_fck",
2940 .bit_shift = 1,
2941 .reg = 0xa08,
2942 .module = TI_CLKM_CM,
2943};
2944
2945static struct ti_clk ts_fck = {
2946 .name = "ts_fck",
2947 .clkdm_name = "core_l4_clkdm",
2948 .type = TI_CLK_GATE,
2949 .data = &ts_fck_data,
2950};
2951
2952static struct ti_clk_fixed_factor wdt1_fck_data = {
2953 .parent = "secure_32k_fck",
2954 .div = 1,
2955 .mult = 1,
2956};
2957
2958static struct ti_clk wdt1_fck = {
2959 .name = "wdt1_fck",
2960 .type = TI_CLK_FIXED_FACTOR,
2961 .data = &wdt1_fck_data,
2962};
2963
2964static struct ti_clk_gate dpll4_m6x2_ck_omap36xx_data = {
2965 .parent = "dpll4_m6x2_mul_ck",
2966 .bit_shift = 0x1f,
2967 .reg = 0xd00,
2968 .module = TI_CLKM_CM,
2969 .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
2970};
2971
2972static struct ti_clk dpll4_m6x2_ck_omap36xx = {
2973 .name = "dpll4_m6x2_ck",
2974 .type = TI_CLK_GATE,
2975 .data = &dpll4_m6x2_ck_omap36xx_data,
2976 .patch = &dpll4_m6x2_ck,
2977};
2978
2979static const char *gpt4_mux_fck_parents[] = {
2980 "omap_32k_fck",
2981 "sys_ck",
2982};
2983
2984static struct ti_clk_mux gpt4_mux_fck_data = {
2985 .bit_shift = 2,
2986 .num_parents = ARRAY_SIZE(gpt4_mux_fck_parents),
2987 .reg = 0x1040,
2988 .module = TI_CLKM_CM,
2989 .parents = gpt4_mux_fck_parents,
2990};
2991
2992static struct ti_clk_gate usbhost_ick_data = {
2993 .parent = "l4_ick",
2994 .bit_shift = 0,
2995 .reg = 0x1410,
2996 .module = TI_CLKM_CM,
2997 .flags = CLKF_DSS | CLKF_OMAP3 | CLKF_INTERFACE,
2998};
2999
3000static struct ti_clk usbhost_ick = {
3001 .name = "usbhost_ick",
3002 .clkdm_name = "usbhost_clkdm",
3003 .type = TI_CLK_GATE,
3004 .data = &usbhost_ick_data,
3005};
3006
3007static struct ti_clk_gate mcbsp2_ick_data = {
3008 .parent = "per_l4_ick",
3009 .bit_shift = 0,
3010 .reg = 0x1010,
3011 .module = TI_CLKM_CM,
3012 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3013};
3014
3015static struct ti_clk mcbsp2_ick = {
3016 .name = "mcbsp2_ick",
3017 .clkdm_name = "per_clkdm",
3018 .type = TI_CLK_GATE,
3019 .data = &mcbsp2_ick_data,
3020};
3021
3022static struct ti_clk_gate omapctrl_ick_data = {
3023 .parent = "core_l4_ick",
3024 .bit_shift = 6,
3025 .reg = 0xa10,
3026 .module = TI_CLKM_CM,
3027 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3028};
3029
3030static struct ti_clk omapctrl_ick = {
3031 .name = "omapctrl_ick",
3032 .clkdm_name = "core_l4_clkdm",
3033 .type = TI_CLK_GATE,
3034 .data = &omapctrl_ick_data,
3035};
3036
3037static struct ti_clk_fixed_factor omap_96m_d4_fck_data = {
3038 .parent = "omap_96m_fck",
3039 .div = 4,
3040 .mult = 1,
3041};
3042
3043static struct ti_clk omap_96m_d4_fck = {
3044 .name = "omap_96m_d4_fck",
3045 .type = TI_CLK_FIXED_FACTOR,
3046 .data = &omap_96m_d4_fck_data,
3047};
3048
3049static struct ti_clk_gate gpt6_ick_data = {
3050 .parent = "per_l4_ick",
3051 .bit_shift = 7,
3052 .reg = 0x1010,
3053 .module = TI_CLKM_CM,
3054 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3055};
3056
3057static struct ti_clk gpt6_ick = {
3058 .name = "gpt6_ick",
3059 .clkdm_name = "per_clkdm",
3060 .type = TI_CLK_GATE,
3061 .data = &gpt6_ick_data,
3062};
3063
3064static struct ti_clk_gate dpll3_m3x2_ck_omap36xx_data = {
3065 .parent = "dpll3_m3x2_mul_ck",
3066 .bit_shift = 0xc,
3067 .reg = 0xd00,
3068 .module = TI_CLKM_CM,
3069 .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
3070};
3071
3072static struct ti_clk dpll3_m3x2_ck_omap36xx = {
3073 .name = "dpll3_m3x2_ck",
3074 .type = TI_CLK_GATE,
3075 .data = &dpll3_m3x2_ck_omap36xx_data,
3076 .patch = &dpll3_m3x2_ck,
3077};
3078
3079static struct ti_clk_gate i2c3_ick_data = {
3080 .parent = "core_l4_ick",
3081 .bit_shift = 17,
3082 .reg = 0xa10,
3083 .module = TI_CLKM_CM,
3084 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3085};
3086
3087static struct ti_clk i2c3_ick = {
3088 .name = "i2c3_ick",
3089 .clkdm_name = "core_l4_clkdm",
3090 .type = TI_CLK_GATE,
3091 .data = &i2c3_ick_data,
3092};
3093
3094static struct ti_clk_gate gpio6_ick_data = {
3095 .parent = "per_l4_ick",
3096 .bit_shift = 17,
3097 .reg = 0x1010,
3098 .module = TI_CLKM_CM,
3099 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3100};
3101
3102static struct ti_clk gpio6_ick = {
3103 .name = "gpio6_ick",
3104 .clkdm_name = "per_clkdm",
3105 .type = TI_CLK_GATE,
3106 .data = &gpio6_ick_data,
3107};
3108
3109static struct ti_clk_gate mspro_ick_data = {
3110 .parent = "core_l4_ick",
3111 .bit_shift = 23,
3112 .reg = 0xa10,
3113 .module = TI_CLKM_CM,
3114 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3115};
3116
3117static struct ti_clk mspro_ick = {
3118 .name = "mspro_ick",
3119 .clkdm_name = "core_l4_clkdm",
3120 .type = TI_CLK_GATE,
3121 .data = &mspro_ick_data,
3122};
3123
3124static struct ti_clk_composite mcbsp1_fck_data = {
3125 .mux = &mcbsp1_mux_fck_data,
3126 .gate = &mcbsp1_gate_fck_data,
3127};
3128
3129static struct ti_clk mcbsp1_fck = {
3130 .name = "mcbsp1_fck",
3131 .type = TI_CLK_COMPOSITE,
3132 .data = &mcbsp1_fck_data,
3133};
3134
3135static struct ti_clk_gate gpt3_gate_fck_data = {
3136 .parent = "sys_ck",
3137 .bit_shift = 4,
3138 .reg = 0x1000,
3139 .module = TI_CLKM_CM,
3140};
3141
3142static struct ti_clk_fixed rmii_ck_data = {
3143 .frequency = 50000000,
3144};
3145
3146static struct ti_clk rmii_ck = {
3147 .name = "rmii_ck",
3148 .type = TI_CLK_FIXED,
3149 .data = &rmii_ck_data,
3150};
3151
3152static struct ti_clk_gate gpt6_gate_fck_data = {
3153 .parent = "sys_ck",
3154 .bit_shift = 7,
3155 .reg = 0x1000,
3156 .module = TI_CLKM_CM,
3157};
3158
3159static struct ti_clk_composite gpt6_fck_data = {
3160 .mux = &gpt6_mux_fck_data,
3161 .gate = &gpt6_gate_fck_data,
3162};
3163
3164static struct ti_clk gpt6_fck = {
3165 .name = "gpt6_fck",
3166 .type = TI_CLK_COMPOSITE,
3167 .data = &gpt6_fck_data,
3168};
3169
3170static struct ti_clk_fixed_factor dpll5_m2_d4_ck_data = {
3171 .parent = "dpll5_m2_ck",
3172 .div = 4,
3173 .mult = 1,
3174};
3175
3176static struct ti_clk dpll5_m2_d4_ck = {
3177 .name = "dpll5_m2_d4_ck",
3178 .type = TI_CLK_FIXED_FACTOR,
3179 .data = &dpll5_m2_d4_ck_data,
3180};
3181
3182static struct ti_clk_fixed_factor sys_d2_ck_data = {
3183 .parent = "sys_ck",
3184 .div = 2,
3185 .mult = 1,
3186};
3187
3188static struct ti_clk sys_d2_ck = {
3189 .name = "sys_d2_ck",
3190 .type = TI_CLK_FIXED_FACTOR,
3191 .data = &sys_d2_ck_data,
3192};
3193
3194static struct ti_clk_fixed_factor omap_96m_d2_fck_data = {
3195 .parent = "omap_96m_fck",
3196 .div = 2,
3197 .mult = 1,
3198};
3199
3200static struct ti_clk omap_96m_d2_fck = {
3201 .name = "omap_96m_d2_fck",
3202 .type = TI_CLK_FIXED_FACTOR,
3203 .data = &omap_96m_d2_fck_data,
3204};
3205
3206static struct ti_clk_fixed_factor dpll5_m2_d8_ck_data = {
3207 .parent = "dpll5_m2_ck",
3208 .div = 8,
3209 .mult = 1,
3210};
3211
3212static struct ti_clk dpll5_m2_d8_ck = {
3213 .name = "dpll5_m2_d8_ck",
3214 .type = TI_CLK_FIXED_FACTOR,
3215 .data = &dpll5_m2_d8_ck_data,
3216};
3217
3218static struct ti_clk_fixed_factor dpll5_m2_d16_ck_data = {
3219 .parent = "dpll5_m2_ck",
3220 .div = 16,
3221 .mult = 1,
3222};
3223
3224static struct ti_clk dpll5_m2_d16_ck = {
3225 .name = "dpll5_m2_d16_ck",
3226 .type = TI_CLK_FIXED_FACTOR,
3227 .data = &dpll5_m2_d16_ck_data,
3228};
3229
3230static const char *usim_mux_fck_parents[] = {
3231 "sys_ck",
3232 "sys_d2_ck",
3233 "omap_96m_d2_fck",
3234 "omap_96m_d4_fck",
3235 "omap_96m_d8_fck",
3236 "omap_96m_d10_fck",
3237 "dpll5_m2_d4_ck",
3238 "dpll5_m2_d8_ck",
3239 "dpll5_m2_d16_ck",
3240 "dpll5_m2_d20_ck",
3241};
3242
3243static struct ti_clk_mux usim_mux_fck_data = {
3244 .bit_shift = 3,
3245 .num_parents = ARRAY_SIZE(usim_mux_fck_parents),
3246 .reg = 0xc40,
3247 .module = TI_CLKM_CM,
3248 .parents = usim_mux_fck_parents,
3249 .flags = CLKF_INDEX_STARTS_AT_ONE,
3250};
3251
3252static struct ti_clk_composite usim_fck_data = {
3253 .mux = &usim_mux_fck_data,
3254 .gate = &usim_gate_fck_data,
3255};
3256
3257static struct ti_clk usim_fck = {
3258 .name = "usim_fck",
3259 .type = TI_CLK_COMPOSITE,
3260 .data = &usim_fck_data,
3261};
3262
3263static int ssi_ssr_div_fck_3430es2_divs[] = {
3264 0,
3265 1,
3266 2,
3267 3,
3268 4,
3269 0,
3270 6,
3271 0,
3272 8,
3273};
3274
3275static struct ti_clk_divider ssi_ssr_div_fck_3430es2_data = {
3276 .num_dividers = ARRAY_SIZE(ssi_ssr_div_fck_3430es2_divs),
3277 .parent = "corex2_fck",
3278 .bit_shift = 8,
3279 .dividers = ssi_ssr_div_fck_3430es2_divs,
3280 .reg = 0xa40,
3281 .module = TI_CLKM_CM,
3282};
3283
3284static struct ti_clk_composite ssi_ssr_fck_3430es2_data = {
3285 .gate = &ssi_ssr_gate_fck_3430es2_data,
3286 .divider = &ssi_ssr_div_fck_3430es2_data,
3287};
3288
3289static struct ti_clk ssi_ssr_fck_3430es2 = {
3290 .name = "ssi_ssr_fck",
3291 .type = TI_CLK_COMPOSITE,
3292 .data = &ssi_ssr_fck_3430es2_data,
3293};
3294
3295static struct ti_clk_gate dss1_alwon_fck_3430es1_data = {
3296 .parent = "dpll4_m4x2_ck",
3297 .bit_shift = 0,
3298 .reg = 0xe00,
3299 .module = TI_CLKM_CM,
3300 .flags = CLKF_SET_RATE_PARENT,
3301};
3302
3303static struct ti_clk dss1_alwon_fck_3430es1 = {
3304 .name = "dss1_alwon_fck",
3305 .clkdm_name = "dss_clkdm",
3306 .type = TI_CLK_GATE,
3307 .data = &dss1_alwon_fck_3430es1_data,
3308};
3309
3310static struct ti_clk_gate gpt3_ick_data = {
3311 .parent = "per_l4_ick",
3312 .bit_shift = 4,
3313 .reg = 0x1010,
3314 .module = TI_CLKM_CM,
3315 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3316};
3317
3318static struct ti_clk gpt3_ick = {
3319 .name = "gpt3_ick",
3320 .clkdm_name = "per_clkdm",
3321 .type = TI_CLK_GATE,
3322 .data = &gpt3_ick_data,
3323};
3324
3325static struct ti_clk_fixed_factor omap_12m_fck_data = {
3326 .parent = "omap_48m_fck",
3327 .div = 4,
3328 .mult = 1,
3329};
3330
3331static struct ti_clk omap_12m_fck = {
3332 .name = "omap_12m_fck",
3333 .type = TI_CLK_FIXED_FACTOR,
3334 .data = &omap_12m_fck_data,
3335};
3336
3337static struct ti_clk_fixed_factor core_12m_fck_data = {
3338 .parent = "omap_12m_fck",
3339 .div = 1,
3340 .mult = 1,
3341};
3342
3343static struct ti_clk core_12m_fck = {
3344 .name = "core_12m_fck",
3345 .type = TI_CLK_FIXED_FACTOR,
3346 .data = &core_12m_fck_data,
3347};
3348
3349static struct ti_clk_gate hdq_fck_data = {
3350 .parent = "core_12m_fck",
3351 .bit_shift = 22,
3352 .reg = 0xa00,
3353 .module = TI_CLKM_CM,
3354 .flags = CLKF_WAIT,
3355};
3356
3357static struct ti_clk hdq_fck = {
3358 .name = "hdq_fck",
3359 .clkdm_name = "core_l4_clkdm",
3360 .type = TI_CLK_GATE,
3361 .data = &hdq_fck_data,
3362};
3363
3364static struct ti_clk_gate usbtll_fck_data = {
3365 .parent = "dpll5_m2_ck",
3366 .bit_shift = 2,
3367 .reg = 0xa08,
3368 .module = TI_CLKM_CM,
3369 .flags = CLKF_WAIT,
3370};
3371
3372static struct ti_clk usbtll_fck = {
3373 .name = "usbtll_fck",
3374 .clkdm_name = "core_l4_clkdm",
3375 .type = TI_CLK_GATE,
3376 .data = &usbtll_fck_data,
3377};
3378
3379static struct ti_clk_gate hsotgusb_fck_am35xx_data = {
3380 .parent = "sys_ck",
3381 .bit_shift = 8,
3382 .reg = 0x59c,
3383 .module = TI_CLKM_SCRM,
3384};
3385
3386static struct ti_clk hsotgusb_fck_am35xx = {
3387 .name = "hsotgusb_fck_am35xx",
3388 .clkdm_name = "core_l3_clkdm",
3389 .type = TI_CLK_GATE,
3390 .data = &hsotgusb_fck_am35xx_data,
3391};
3392
3393static struct ti_clk_gate hsotgusb_ick_3430es2_data = {
3394 .parent = "core_l3_ick",
3395 .bit_shift = 4,
3396 .reg = 0xa10,
3397 .module = TI_CLKM_CM,
3398 .flags = CLKF_HSOTGUSB | CLKF_OMAP3 | CLKF_INTERFACE,
3399};
3400
3401static struct ti_clk hsotgusb_ick_3430es2 = {
3402 .name = "hsotgusb_ick_3430es2",
3403 .clkdm_name = "core_l3_clkdm",
3404 .type = TI_CLK_GATE,
3405 .data = &hsotgusb_ick_3430es2_data,
3406};
3407
3408static struct ti_clk_gate gfx_l3_ck_data = {
3409 .parent = "l3_ick",
3410 .bit_shift = 0,
3411 .reg = 0xb10,
3412 .module = TI_CLKM_CM,
3413 .flags = CLKF_WAIT,
3414};
3415
3416static struct ti_clk gfx_l3_ck = {
3417 .name = "gfx_l3_ck",
3418 .clkdm_name = "gfx_3430es1_clkdm",
3419 .type = TI_CLK_GATE,
3420 .data = &gfx_l3_ck_data,
3421};
3422
3423static struct ti_clk_fixed_factor gfx_l3_ick_data = {
3424 .parent = "gfx_l3_ck",
3425 .div = 1,
3426 .mult = 1,
3427};
3428
3429static struct ti_clk gfx_l3_ick = {
3430 .name = "gfx_l3_ick",
3431 .type = TI_CLK_FIXED_FACTOR,
3432 .data = &gfx_l3_ick_data,
3433};
3434
3435static struct ti_clk_gate mcbsp1_ick_data = {
3436 .parent = "core_l4_ick",
3437 .bit_shift = 9,
3438 .reg = 0xa10,
3439 .module = TI_CLKM_CM,
3440 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3441};
3442
3443static struct ti_clk mcbsp1_ick = {
3444 .name = "mcbsp1_ick",
3445 .clkdm_name = "core_l4_clkdm",
3446 .type = TI_CLK_GATE,
3447 .data = &mcbsp1_ick_data,
3448};
3449
3450static struct ti_clk_fixed_factor gpt12_fck_data = {
3451 .parent = "secure_32k_fck",
3452 .div = 1,
3453 .mult = 1,
3454};
3455
3456static struct ti_clk gpt12_fck = {
3457 .name = "gpt12_fck",
3458 .type = TI_CLK_FIXED_FACTOR,
3459 .data = &gpt12_fck_data,
3460};
3461
3462static struct ti_clk_gate gfx_cg2_ck_data = {
3463 .parent = "gfx_l3_fck",
3464 .bit_shift = 2,
3465 .reg = 0xb00,
3466 .module = TI_CLKM_CM,
3467 .flags = CLKF_WAIT,
3468};
3469
3470static struct ti_clk gfx_cg2_ck = {
3471 .name = "gfx_cg2_ck",
3472 .clkdm_name = "gfx_3430es1_clkdm",
3473 .type = TI_CLK_GATE,
3474 .data = &gfx_cg2_ck_data,
3475};
3476
3477static struct ti_clk_gate i2c2_ick_data = {
3478 .parent = "core_l4_ick",
3479 .bit_shift = 16,
3480 .reg = 0xa10,
3481 .module = TI_CLKM_CM,
3482 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3483};
3484
3485static struct ti_clk i2c2_ick = {
3486 .name = "i2c2_ick",
3487 .clkdm_name = "core_l4_clkdm",
3488 .type = TI_CLK_GATE,
3489 .data = &i2c2_ick_data,
3490};
3491
3492static struct ti_clk_gate gpio4_dbck_data = {
3493 .parent = "per_32k_alwon_fck",
3494 .bit_shift = 15,
3495 .reg = 0x1000,
3496 .module = TI_CLKM_CM,
3497};
3498
3499static struct ti_clk gpio4_dbck = {
3500 .name = "gpio4_dbck",
3501 .clkdm_name = "per_clkdm",
3502 .type = TI_CLK_GATE,
3503 .data = &gpio4_dbck_data,
3504};
3505
3506static struct ti_clk_gate i2c3_fck_data = {
3507 .parent = "core_96m_fck",
3508 .bit_shift = 17,
3509 .reg = 0xa00,
3510 .module = TI_CLKM_CM,
3511 .flags = CLKF_WAIT,
3512};
3513
3514static struct ti_clk i2c3_fck = {
3515 .name = "i2c3_fck",
3516 .clkdm_name = "core_l4_clkdm",
3517 .type = TI_CLK_GATE,
3518 .data = &i2c3_fck_data,
3519};
3520
3521static struct ti_clk_composite gpt3_fck_data = {
3522 .mux = &gpt3_mux_fck_data,
3523 .gate = &gpt3_gate_fck_data,
3524};
3525
3526static struct ti_clk gpt3_fck = {
3527 .name = "gpt3_fck",
3528 .type = TI_CLK_COMPOSITE,
3529 .data = &gpt3_fck_data,
3530};
3531
3532static struct ti_clk_gate i2c1_ick_data = {
3533 .parent = "core_l4_ick",
3534 .bit_shift = 15,
3535 .reg = 0xa10,
3536 .module = TI_CLKM_CM,
3537 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3538};
3539
3540static struct ti_clk i2c1_ick = {
3541 .name = "i2c1_ick",
3542 .clkdm_name = "core_l4_clkdm",
3543 .type = TI_CLK_GATE,
3544 .data = &i2c1_ick_data,
3545};
3546
3547static struct ti_clk_gate omap_32ksync_ick_data = {
3548 .parent = "wkup_l4_ick",
3549 .bit_shift = 2,
3550 .reg = 0xc10,
3551 .module = TI_CLKM_CM,
3552 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3553};
3554
3555static struct ti_clk omap_32ksync_ick = {
3556 .name = "omap_32ksync_ick",
3557 .clkdm_name = "wkup_clkdm",
3558 .type = TI_CLK_GATE,
3559 .data = &omap_32ksync_ick_data,
3560};
3561
3562static struct ti_clk_gate aes2_ick_data = {
3563 .parent = "core_l4_ick",
3564 .bit_shift = 28,
3565 .reg = 0xa10,
3566 .module = TI_CLKM_CM,
3567 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3568};
3569
3570static struct ti_clk aes2_ick = {
3571 .name = "aes2_ick",
3572 .clkdm_name = "core_l4_clkdm",
3573 .type = TI_CLK_GATE,
3574 .data = &aes2_ick_data,
3575};
3576
3577static const char *gpt8_mux_fck_parents[] = {
3578 "omap_32k_fck",
3579 "sys_ck",
3580};
3581
3582static struct ti_clk_mux gpt8_mux_fck_data = {
3583 .bit_shift = 6,
3584 .num_parents = ARRAY_SIZE(gpt8_mux_fck_parents),
3585 .reg = 0x1040,
3586 .module = TI_CLKM_CM,
3587 .parents = gpt8_mux_fck_parents,
3588};
3589
3590static struct ti_clk_composite gpt8_fck_data = {
3591 .mux = &gpt8_mux_fck_data,
3592 .gate = &gpt8_gate_fck_data,
3593};
3594
3595static struct ti_clk gpt8_fck = {
3596 .name = "gpt8_fck",
3597 .type = TI_CLK_COMPOSITE,
3598 .data = &gpt8_fck_data,
3599};
3600
3601static struct ti_clk_gate mcbsp4_gate_fck_data = {
3602 .parent = "mcbsp_clks",
3603 .bit_shift = 2,
3604 .reg = 0x1000,
3605 .module = TI_CLKM_CM,
3606};
3607
3608static struct ti_clk_composite mcbsp4_fck_data = {
3609 .mux = &mcbsp4_mux_fck_data,
3610 .gate = &mcbsp4_gate_fck_data,
3611};
3612
3613static struct ti_clk mcbsp4_fck = {
3614 .name = "mcbsp4_fck",
3615 .type = TI_CLK_COMPOSITE,
3616 .data = &mcbsp4_fck_data,
3617};
3618
3619static struct ti_clk_gate gpio2_dbck_data = {
3620 .parent = "per_32k_alwon_fck",
3621 .bit_shift = 13,
3622 .reg = 0x1000,
3623 .module = TI_CLKM_CM,
3624};
3625
3626static struct ti_clk gpio2_dbck = {
3627 .name = "gpio2_dbck",
3628 .clkdm_name = "per_clkdm",
3629 .type = TI_CLK_GATE,
3630 .data = &gpio2_dbck_data,
3631};
3632
3633static struct ti_clk_gate usbtll_ick_data = {
3634 .parent = "core_l4_ick",
3635 .bit_shift = 2,
3636 .reg = 0xa18,
3637 .module = TI_CLKM_CM,
3638 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3639};
3640
3641static struct ti_clk usbtll_ick = {
3642 .name = "usbtll_ick",
3643 .clkdm_name = "core_l4_clkdm",
3644 .type = TI_CLK_GATE,
3645 .data = &usbtll_ick_data,
3646};
3647
3648static struct ti_clk_gate mcspi4_ick_data = {
3649 .parent = "core_l4_ick",
3650 .bit_shift = 21,
3651 .reg = 0xa10,
3652 .module = TI_CLKM_CM,
3653 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3654};
3655
3656static struct ti_clk mcspi4_ick = {
3657 .name = "mcspi4_ick",
3658 .clkdm_name = "core_l4_clkdm",
3659 .type = TI_CLK_GATE,
3660 .data = &mcspi4_ick_data,
3661};
3662
3663static struct ti_clk_gate dss_96m_fck_data = {
3664 .parent = "omap_96m_fck",
3665 .bit_shift = 2,
3666 .reg = 0xe00,
3667 .module = TI_CLKM_CM,
3668};
3669
3670static struct ti_clk dss_96m_fck = {
3671 .name = "dss_96m_fck",
3672 .clkdm_name = "dss_clkdm",
3673 .type = TI_CLK_GATE,
3674 .data = &dss_96m_fck_data,
3675};
3676
3677static struct ti_clk_divider rm_ick_data = {
3678 .parent = "l4_ick",
3679 .bit_shift = 1,
3680 .max_div = 3,
3681 .reg = 0xc40,
3682 .module = TI_CLKM_CM,
3683 .flags = CLKF_INDEX_STARTS_AT_ONE,
3684};
3685
3686static struct ti_clk rm_ick = {
3687 .name = "rm_ick",
3688 .type = TI_CLK_DIVIDER,
3689 .data = &rm_ick_data,
3690};
3691
3692static struct ti_clk_gate hdq_ick_data = {
3693 .parent = "core_l4_ick",
3694 .bit_shift = 22,
3695 .reg = 0xa10,
3696 .module = TI_CLKM_CM,
3697 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3698};
3699
3700static struct ti_clk hdq_ick = {
3701 .name = "hdq_ick",
3702 .clkdm_name = "core_l4_clkdm",
3703 .type = TI_CLK_GATE,
3704 .data = &hdq_ick_data,
3705};
3706
3707static struct ti_clk_fixed_factor dpll3_x2_ck_data = {
3708 .parent = "dpll3_ck",
3709 .div = 1,
3710 .mult = 2,
3711};
3712
3713static struct ti_clk dpll3_x2_ck = {
3714 .name = "dpll3_x2_ck",
3715 .type = TI_CLK_FIXED_FACTOR,
3716 .data = &dpll3_x2_ck_data,
3717};
3718
3719static struct ti_clk_gate mad2d_ick_data = {
3720 .parent = "l3_ick",
3721 .bit_shift = 3,
3722 .reg = 0xa18,
3723 .module = TI_CLKM_CM,
3724 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3725};
3726
3727static struct ti_clk mad2d_ick = {
3728 .name = "mad2d_ick",
3729 .clkdm_name = "d2d_clkdm",
3730 .type = TI_CLK_GATE,
3731 .data = &mad2d_ick_data,
3732};
3733
3734static struct ti_clk_gate fshostusb_fck_data = {
3735 .parent = "core_48m_fck",
3736 .bit_shift = 5,
3737 .reg = 0xa00,
3738 .module = TI_CLKM_CM,
3739 .flags = CLKF_WAIT,
3740};
3741
3742static struct ti_clk fshostusb_fck = {
3743 .name = "fshostusb_fck",
3744 .clkdm_name = "core_l4_clkdm",
3745 .type = TI_CLK_GATE,
3746 .data = &fshostusb_fck_data,
3747};
3748
3749static struct ti_clk_gate sr1_fck_data = {
3750 .parent = "sys_ck",
3751 .bit_shift = 6,
3752 .reg = 0xc00,
3753 .module = TI_CLKM_CM,
3754 .flags = CLKF_WAIT,
3755};
3756
3757static struct ti_clk sr1_fck = {
3758 .name = "sr1_fck",
3759 .clkdm_name = "wkup_clkdm",
3760 .type = TI_CLK_GATE,
3761 .data = &sr1_fck_data,
3762};
3763
3764static struct ti_clk_gate des2_ick_data = {
3765 .parent = "core_l4_ick",
3766 .bit_shift = 26,
3767 .reg = 0xa10,
3768 .module = TI_CLKM_CM,
3769 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3770};
3771
3772static struct ti_clk des2_ick = {
3773 .name = "des2_ick",
3774 .clkdm_name = "core_l4_clkdm",
3775 .type = TI_CLK_GATE,
3776 .data = &des2_ick_data,
3777};
3778
3779static struct ti_clk_gate sdrc_ick_data = {
3780 .parent = "core_l3_ick",
3781 .bit_shift = 1,
3782 .reg = 0xa10,
3783 .module = TI_CLKM_CM,
3784 .flags = CLKF_WAIT,
3785};
3786
3787static struct ti_clk sdrc_ick = {
3788 .name = "sdrc_ick",
3789 .clkdm_name = "core_l3_clkdm",
3790 .type = TI_CLK_GATE,
3791 .data = &sdrc_ick_data,
3792};
3793
3794static struct ti_clk_composite gpt4_fck_data = {
3795 .mux = &gpt4_mux_fck_data,
3796 .gate = &gpt4_gate_fck_data,
3797};
3798
3799static struct ti_clk gpt4_fck = {
3800 .name = "gpt4_fck",
3801 .type = TI_CLK_COMPOSITE,
3802 .data = &gpt4_fck_data,
3803};
3804
3805static struct ti_clk_gate dpll4_m3x2_ck_omap36xx_data = {
3806 .parent = "dpll4_m3x2_mul_ck",
3807 .bit_shift = 0x1c,
3808 .reg = 0xd00,
3809 .module = TI_CLKM_CM,
3810 .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
3811};
3812
3813static struct ti_clk dpll4_m3x2_ck_omap36xx = {
3814 .name = "dpll4_m3x2_ck",
3815 .type = TI_CLK_GATE,
3816 .data = &dpll4_m3x2_ck_omap36xx_data,
3817 .patch = &dpll4_m3x2_ck,
3818};
3819
3820static struct ti_clk_gate cpefuse_fck_data = {
3821 .parent = "sys_ck",
3822 .bit_shift = 0,
3823 .reg = 0xa08,
3824 .module = TI_CLKM_CM,
3825};
3826
3827static struct ti_clk cpefuse_fck = {
3828 .name = "cpefuse_fck",
3829 .clkdm_name = "core_l4_clkdm",
3830 .type = TI_CLK_GATE,
3831 .data = &cpefuse_fck_data,
3832};
3833
3834static struct ti_clk_gate mcspi3_ick_data = {
3835 .parent = "core_l4_ick",
3836 .bit_shift = 20,
3837 .reg = 0xa10,
3838 .module = TI_CLKM_CM,
3839 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3840};
3841
3842static struct ti_clk mcspi3_ick = {
3843 .name = "mcspi3_ick",
3844 .clkdm_name = "core_l4_clkdm",
3845 .type = TI_CLK_GATE,
3846 .data = &mcspi3_ick_data,
3847};
3848
3849static struct ti_clk_fixed_factor ssi_sst_fck_3430es2_data = {
3850 .parent = "ssi_ssr_fck",
3851 .div = 2,
3852 .mult = 1,
3853};
3854
3855static struct ti_clk ssi_sst_fck_3430es2 = {
3856 .name = "ssi_sst_fck",
3857 .type = TI_CLK_FIXED_FACTOR,
3858 .data = &ssi_sst_fck_3430es2_data,
3859};
3860
3861static struct ti_clk_gate gpio1_dbck_data = {
3862 .parent = "wkup_32k_fck",
3863 .bit_shift = 3,
3864 .reg = 0xc00,
3865 .module = TI_CLKM_CM,
3866};
3867
3868static struct ti_clk gpio1_dbck = {
3869 .name = "gpio1_dbck",
3870 .clkdm_name = "wkup_clkdm",
3871 .type = TI_CLK_GATE,
3872 .data = &gpio1_dbck_data,
3873};
3874
3875static struct ti_clk_gate gpt4_ick_data = {
3876 .parent = "per_l4_ick",
3877 .bit_shift = 5,
3878 .reg = 0x1010,
3879 .module = TI_CLKM_CM,
3880 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3881};
3882
3883static struct ti_clk gpt4_ick = {
3884 .name = "gpt4_ick",
3885 .clkdm_name = "per_clkdm",
3886 .type = TI_CLK_GATE,
3887 .data = &gpt4_ick_data,
3888};
3889
3890static struct ti_clk_gate gpt2_ick_data = {
3891 .parent = "per_l4_ick",
3892 .bit_shift = 3,
3893 .reg = 0x1010,
3894 .module = TI_CLKM_CM,
3895 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3896};
3897
3898static struct ti_clk gpt2_ick = {
3899 .name = "gpt2_ick",
3900 .clkdm_name = "per_clkdm",
3901 .type = TI_CLK_GATE,
3902 .data = &gpt2_ick_data,
3903};
3904
3905static struct ti_clk_gate mmchs1_fck_data = {
3906 .parent = "core_96m_fck",
3907 .bit_shift = 24,
3908 .reg = 0xa00,
3909 .module = TI_CLKM_CM,
3910 .flags = CLKF_WAIT,
3911};
3912
3913static struct ti_clk mmchs1_fck = {
3914 .name = "mmchs1_fck",
3915 .clkdm_name = "core_l4_clkdm",
3916 .type = TI_CLK_GATE,
3917 .data = &mmchs1_fck_data,
3918};
3919
3920static struct ti_clk_fixed dummy_apb_pclk_data = {
3921 .frequency = 0x0,
3922};
3923
3924static struct ti_clk dummy_apb_pclk = {
3925 .name = "dummy_apb_pclk",
3926 .type = TI_CLK_FIXED,
3927 .data = &dummy_apb_pclk_data,
3928};
3929
3930static struct ti_clk_gate gpio6_dbck_data = {
3931 .parent = "per_32k_alwon_fck",
3932 .bit_shift = 17,
3933 .reg = 0x1000,
3934 .module = TI_CLKM_CM,
3935};
3936
3937static struct ti_clk gpio6_dbck = {
3938 .name = "gpio6_dbck",
3939 .clkdm_name = "per_clkdm",
3940 .type = TI_CLK_GATE,
3941 .data = &gpio6_dbck_data,
3942};
3943
3944static struct ti_clk_gate uart2_ick_data = {
3945 .parent = "core_l4_ick",
3946 .bit_shift = 14,
3947 .reg = 0xa10,
3948 .module = TI_CLKM_CM,
3949 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3950};
3951
3952static struct ti_clk uart2_ick = {
3953 .name = "uart2_ick",
3954 .clkdm_name = "core_l4_clkdm",
3955 .type = TI_CLK_GATE,
3956 .data = &uart2_ick_data,
3957};
3958
3959static struct ti_clk_fixed_factor dpll4_x2_ck_data = {
3960 .parent = "dpll4_ck",
3961 .div = 1,
3962 .mult = 2,
3963};
3964
3965static struct ti_clk dpll4_x2_ck = {
3966 .name = "dpll4_x2_ck",
3967 .type = TI_CLK_FIXED_FACTOR,
3968 .data = &dpll4_x2_ck_data,
3969};
3970
3971static struct ti_clk_gate gpt7_ick_data = {
3972 .parent = "per_l4_ick",
3973 .bit_shift = 8,
3974 .reg = 0x1010,
3975 .module = TI_CLKM_CM,
3976 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
3977};
3978
3979static struct ti_clk gpt7_ick = {
3980 .name = "gpt7_ick",
3981 .clkdm_name = "per_clkdm",
3982 .type = TI_CLK_GATE,
3983 .data = &gpt7_ick_data,
3984};
3985
3986static struct ti_clk_gate dss_tv_fck_data = {
3987 .parent = "omap_54m_fck",
3988 .bit_shift = 2,
3989 .reg = 0xe00,
3990 .module = TI_CLKM_CM,
3991};
3992
3993static struct ti_clk dss_tv_fck = {
3994 .name = "dss_tv_fck",
3995 .clkdm_name = "dss_clkdm",
3996 .type = TI_CLK_GATE,
3997 .data = &dss_tv_fck_data,
3998};
3999
4000static struct ti_clk_gate mcbsp5_ick_data = {
4001 .parent = "core_l4_ick",
4002 .bit_shift = 10,
4003 .reg = 0xa10,
4004 .module = TI_CLKM_CM,
4005 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
4006};
4007
4008static struct ti_clk mcbsp5_ick = {
4009 .name = "mcbsp5_ick",
4010 .clkdm_name = "core_l4_clkdm",
4011 .type = TI_CLK_GATE,
4012 .data = &mcbsp5_ick_data,
4013};
4014
4015static struct ti_clk_gate mcspi1_ick_data = {
4016 .parent = "core_l4_ick",
4017 .bit_shift = 18,
4018 .reg = 0xa10,
4019 .module = TI_CLKM_CM,
4020 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
4021};
4022
4023static struct ti_clk mcspi1_ick = {
4024 .name = "mcspi1_ick",
4025 .clkdm_name = "core_l4_clkdm",
4026 .type = TI_CLK_GATE,
4027 .data = &mcspi1_ick_data,
4028};
4029
4030static struct ti_clk_gate d2d_26m_fck_data = {
4031 .parent = "sys_ck",
4032 .bit_shift = 3,
4033 .reg = 0xa00,
4034 .module = TI_CLKM_CM,
4035 .flags = CLKF_WAIT,
4036};
4037
4038static struct ti_clk d2d_26m_fck = {
4039 .name = "d2d_26m_fck",
4040 .clkdm_name = "d2d_clkdm",
4041 .type = TI_CLK_GATE,
4042 .data = &d2d_26m_fck_data,
4043};
4044
4045static struct ti_clk_gate wdt3_ick_data = {
4046 .parent = "per_l4_ick",
4047 .bit_shift = 12,
4048 .reg = 0x1010,
4049 .module = TI_CLKM_CM,
4050 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
4051};
4052
4053static struct ti_clk wdt3_ick = {
4054 .name = "wdt3_ick",
4055 .clkdm_name = "per_clkdm",
4056 .type = TI_CLK_GATE,
4057 .data = &wdt3_ick_data,
4058};
4059
4060static struct ti_clk_divider pclkx2_fck_data = {
4061 .parent = "emu_src_ck",
4062 .bit_shift = 6,
4063 .max_div = 3,
4064 .reg = 0x1140,
4065 .module = TI_CLKM_CM,
4066 .flags = CLKF_INDEX_STARTS_AT_ONE,
4067};
4068
4069static struct ti_clk pclkx2_fck = {
4070 .name = "pclkx2_fck",
4071 .type = TI_CLK_DIVIDER,
4072 .data = &pclkx2_fck_data,
4073};
4074
4075static struct ti_clk_gate sha12_ick_data = {
4076 .parent = "core_l4_ick",
4077 .bit_shift = 27,
4078 .reg = 0xa10,
4079 .module = TI_CLKM_CM,
4080 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
4081};
4082
4083static struct ti_clk sha12_ick = {
4084 .name = "sha12_ick",
4085 .clkdm_name = "core_l4_clkdm",
4086 .type = TI_CLK_GATE,
4087 .data = &sha12_ick_data,
4088};
4089
4090static struct ti_clk_gate emac_fck_data = {
4091 .parent = "rmii_ck",
4092 .bit_shift = 9,
4093 .reg = 0x59c,
4094 .module = TI_CLKM_SCRM,
4095};
4096
4097static struct ti_clk emac_fck = {
4098 .name = "emac_fck",
4099 .type = TI_CLK_GATE,
4100 .data = &emac_fck_data,
4101};
4102
4103static struct ti_clk_composite gpt10_fck_data = {
4104 .mux = &gpt10_mux_fck_data,
4105 .gate = &gpt10_gate_fck_data,
4106};
4107
4108static struct ti_clk gpt10_fck = {
4109 .name = "gpt10_fck",
4110 .type = TI_CLK_COMPOSITE,
4111 .data = &gpt10_fck_data,
4112};
4113
4114static struct ti_clk_gate wdt2_fck_data = {
4115 .parent = "wkup_32k_fck",
4116 .bit_shift = 5,
4117 .reg = 0xc00,
4118 .module = TI_CLKM_CM,
4119 .flags = CLKF_WAIT,
4120};
4121
4122static struct ti_clk wdt2_fck = {
4123 .name = "wdt2_fck",
4124 .clkdm_name = "wkup_clkdm",
4125 .type = TI_CLK_GATE,
4126 .data = &wdt2_fck_data,
4127};
4128
4129static struct ti_clk_gate cam_ick_data = {
4130 .parent = "l4_ick",
4131 .bit_shift = 0,
4132 .reg = 0xf10,
4133 .module = TI_CLKM_CM,
4134 .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
4135};
4136
4137static struct ti_clk cam_ick = {
4138 .name = "cam_ick",
4139 .clkdm_name = "cam_clkdm",
4140 .type = TI_CLK_GATE,
4141 .data = &cam_ick_data,
4142};
4143
4144static struct ti_clk_gate ssi_ick_3430es2_data = {
4145 .parent = "ssi_l4_ick",
4146 .bit_shift = 0,
4147 .reg = 0xa10,
4148 .module = TI_CLKM_CM,
4149 .flags = CLKF_SSI | CLKF_OMAP3 | CLKF_INTERFACE,
4150};
4151
4152static struct ti_clk ssi_ick_3430es2 = {
4153 .name = "ssi_ick",
4154 .clkdm_name = "core_l4_clkdm",
4155 .type = TI_CLK_GATE,
4156 .data = &ssi_ick_3430es2_data,
4157};
4158
4159static struct ti_clk_gate gpio4_ick_data = {
4160 .parent = "per_l4_ick",
4161 .bit_shift = 15,
4162 .reg = 0x1010,
4163 .module = TI_CLKM_CM,
4164 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
4165};
4166
4167static struct ti_clk gpio4_ick = {
4168 .name = "gpio4_ick",
4169 .clkdm_name = "per_clkdm",
4170 .type = TI_CLK_GATE,
4171 .data = &gpio4_ick_data,
4172};
4173
4174static struct ti_clk_gate wdt1_ick_data = {
4175 .parent = "wkup_l4_ick",
4176 .bit_shift = 4,
4177 .reg = 0xc10,
4178 .module = TI_CLKM_CM,
4179 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
4180};
4181
4182static struct ti_clk wdt1_ick = {
4183 .name = "wdt1_ick",
4184 .clkdm_name = "wkup_clkdm",
4185 .type = TI_CLK_GATE,
4186 .data = &wdt1_ick_data,
4187};
4188
4189static struct ti_clk_gate rng_ick_data = {
4190 .parent = "security_l4_ick2",
4191 .bit_shift = 2,
4192 .reg = 0xa14,
4193 .module = TI_CLKM_CM,
4194 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
4195};
4196
4197static struct ti_clk rng_ick = {
4198 .name = "rng_ick",
4199 .type = TI_CLK_GATE,
4200 .data = &rng_ick_data,
4201};
4202
4203static struct ti_clk_gate icr_ick_data = {
4204 .parent = "core_l4_ick",
4205 .bit_shift = 29,
4206 .reg = 0xa10,
4207 .module = TI_CLKM_CM,
4208 .flags = CLKF_OMAP3 | CLKF_INTERFACE,
4209};
4210
4211static struct ti_clk icr_ick = {
4212 .name = "icr_ick",
4213 .clkdm_name = "core_l4_clkdm",
4214 .type = TI_CLK_GATE,
4215 .data = &icr_ick_data,
4216};
4217
4218static struct ti_clk_gate sgx_ick_data = {
4219 .parent = "l3_ick",
4220 .bit_shift = 0,
4221 .reg = 0xb10,
4222 .module = TI_CLKM_CM,
4223 .flags = CLKF_WAIT,
4224};
4225
4226static struct ti_clk sgx_ick = {
4227 .name = "sgx_ick",
4228 .clkdm_name = "sgx_clkdm",
4229 .type = TI_CLK_GATE,
4230 .data = &sgx_ick_data,
4231};
4232
4233static struct ti_clk_divider sys_clkout2_data = {
4234 .parent = "clkout2_src_ck",
4235 .bit_shift = 3,
4236 .max_div = 64,
4237 .reg = 0xd70,
4238 .module = TI_CLKM_CM,
4239 .flags = CLKF_INDEX_POWER_OF_TWO,
4240};
4241
4242static struct ti_clk sys_clkout2 = {
4243 .name = "sys_clkout2",
4244 .type = TI_CLK_DIVIDER,
4245 .data = &sys_clkout2_data,
4246};
4247
4248static struct ti_clk_alias omap34xx_omap36xx_clks[] = {
4249 CLK(NULL, "security_l4_ick2", &security_l4_ick2),
4250 CLK(NULL, "aes1_ick", &aes1_ick),
4251 CLK("omap_rng", "ick", &rng_ick),
4252 CLK("omap3-rom-rng", "ick", &rng_ick),
4253 CLK(NULL, "sha11_ick", &sha11_ick),
4254 CLK(NULL, "des1_ick", &des1_ick),
4255 CLK(NULL, "cam_mclk", &cam_mclk),
4256 CLK(NULL, "cam_ick", &cam_ick),
4257 CLK(NULL, "csi2_96m_fck", &csi2_96m_fck),
4258 CLK(NULL, "security_l3_ick", &security_l3_ick),
4259 CLK(NULL, "pka_ick", &pka_ick),
4260 CLK(NULL, "icr_ick", &icr_ick),
4261 CLK(NULL, "des2_ick", &des2_ick),
4262 CLK(NULL, "mspro_ick", &mspro_ick),
4263 CLK(NULL, "mailboxes_ick", &mailboxes_ick),
4264 CLK(NULL, "ssi_l4_ick", &ssi_l4_ick),
4265 CLK(NULL, "sr1_fck", &sr1_fck),
4266 CLK(NULL, "sr2_fck", &sr2_fck),
4267 CLK(NULL, "sr_l4_ick", &sr_l4_ick),
4268 CLK(NULL, "dpll2_fck", &dpll2_fck),
4269 CLK(NULL, "dpll2_ck", &dpll2_ck),
4270 CLK(NULL, "dpll2_m2_ck", &dpll2_m2_ck),
4271 CLK(NULL, "iva2_ck", &iva2_ck),
4272 CLK(NULL, "modem_fck", &modem_fck),
4273 CLK(NULL, "sad2d_ick", &sad2d_ick),
4274 CLK(NULL, "mad2d_ick", &mad2d_ick),
4275 CLK(NULL, "mspro_fck", &mspro_fck),
4276 { NULL },
4277};
4278
4279static struct ti_clk_alias omap36xx_omap3430es2plus_clks[] = {
4280 CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es2),
4281 CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es2),
4282 CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es2),
4283 CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es2),
4284 CLK(NULL, "ssi_ick", &ssi_ick_3430es2),
4285 CLK(NULL, "sys_d2_ck", &sys_d2_ck),
4286 CLK(NULL, "omap_96m_d2_fck", &omap_96m_d2_fck),
4287 CLK(NULL, "omap_96m_d4_fck", &omap_96m_d4_fck),
4288 CLK(NULL, "omap_96m_d8_fck", &omap_96m_d8_fck),
4289 CLK(NULL, "omap_96m_d10_fck", &omap_96m_d10_fck),
4290 CLK(NULL, "dpll5_m2_d4_ck", &dpll5_m2_d4_ck),
4291 CLK(NULL, "dpll5_m2_d8_ck", &dpll5_m2_d8_ck),
4292 CLK(NULL, "dpll5_m2_d16_ck", &dpll5_m2_d16_ck),
4293 CLK(NULL, "dpll5_m2_d20_ck", &dpll5_m2_d20_ck),
4294 CLK(NULL, "usim_fck", &usim_fck),
4295 CLK(NULL, "usim_ick", &usim_ick),
4296 { NULL },
4297};
4298
4299static struct ti_clk_alias omap3xxx_clks[] = {
4300 CLK(NULL, "apb_pclk", &dummy_apb_pclk),
4301 CLK(NULL, "omap_32k_fck", &omap_32k_fck),
4302 CLK(NULL, "virt_12m_ck", &virt_12m_ck),
4303 CLK(NULL, "virt_13m_ck", &virt_13m_ck),
4304 CLK(NULL, "virt_19200000_ck", &virt_19200000_ck),
4305 CLK(NULL, "virt_26000000_ck", &virt_26000000_ck),
4306 CLK(NULL, "virt_38_4m_ck", &virt_38_4m_ck),
4307 CLK(NULL, "virt_16_8m_ck", &virt_16_8m_ck),
4308 CLK(NULL, "osc_sys_ck", &osc_sys_ck),
4309 CLK("twl", "fck", &osc_sys_ck),
4310 CLK(NULL, "sys_ck", &sys_ck),
4311 CLK(NULL, "timer_sys_ck", &sys_ck),
4312 CLK(NULL, "dpll4_ck", &dpll4_ck),
4313 CLK(NULL, "dpll4_m2_ck", &dpll4_m2_ck),
4314 CLK(NULL, "dpll4_m2x2_mul_ck", &dpll4_m2x2_mul_ck),
4315 CLK(NULL, "dpll4_m2x2_ck", &dpll4_m2x2_ck),
4316 CLK(NULL, "omap_96m_alwon_fck", &omap_96m_alwon_fck),
4317 CLK(NULL, "dpll3_ck", &dpll3_ck),
4318 CLK(NULL, "dpll3_m3_ck", &dpll3_m3_ck),
4319 CLK(NULL, "dpll3_m3x2_mul_ck", &dpll3_m3x2_mul_ck),
4320 CLK(NULL, "dpll3_m3x2_ck", &dpll3_m3x2_ck),
4321 CLK("etb", "emu_core_alwon_ck", &emu_core_alwon_ck),
4322 CLK(NULL, "sys_altclk", &sys_altclk),
4323 CLK(NULL, "mcbsp_clks", &mcbsp_clks),
4324 CLK(NULL, "sys_clkout1", &sys_clkout1),
4325 CLK(NULL, "dpll3_m2_ck", &dpll3_m2_ck),
4326 CLK(NULL, "core_ck", &core_ck),
4327 CLK(NULL, "dpll1_fck", &dpll1_fck),
4328 CLK(NULL, "dpll1_ck", &dpll1_ck),
4329 CLK(NULL, "cpufreq_ck", &dpll1_ck),
4330 CLK(NULL, "dpll1_x2_ck", &dpll1_x2_ck),
4331 CLK(NULL, "dpll1_x2m2_ck", &dpll1_x2m2_ck),
4332 CLK(NULL, "dpll3_x2_ck", &dpll3_x2_ck),
4333 CLK(NULL, "dpll3_m2x2_ck", &dpll3_m2x2_ck),
4334 CLK(NULL, "dpll4_x2_ck", &dpll4_x2_ck),
4335 CLK(NULL, "cm_96m_fck", &cm_96m_fck),
4336 CLK(NULL, "omap_96m_fck", &omap_96m_fck),
4337 CLK(NULL, "dpll4_m3_ck", &dpll4_m3_ck),
4338 CLK(NULL, "dpll4_m3x2_mul_ck", &dpll4_m3x2_mul_ck),
4339 CLK(NULL, "dpll4_m3x2_ck", &dpll4_m3x2_ck),
4340 CLK(NULL, "omap_54m_fck", &omap_54m_fck),
4341 CLK(NULL, "cm_96m_d2_fck", &cm_96m_d2_fck),
4342 CLK(NULL, "omap_48m_fck", &omap_48m_fck),
4343 CLK(NULL, "omap_12m_fck", &omap_12m_fck),
4344 CLK(NULL, "dpll4_m4_ck", &dpll4_m4_ck),
4345 CLK(NULL, "dpll4_m4x2_mul_ck", &dpll4_m4x2_mul_ck),
4346 CLK(NULL, "dpll4_m4x2_ck", &dpll4_m4x2_ck),
4347 CLK(NULL, "dpll4_m5_ck", &dpll4_m5_ck),
4348 CLK(NULL, "dpll4_m5x2_mul_ck", &dpll4_m5x2_mul_ck),
4349 CLK(NULL, "dpll4_m5x2_ck", &dpll4_m5x2_ck),
4350 CLK(NULL, "dpll4_m6_ck", &dpll4_m6_ck),
4351 CLK(NULL, "dpll4_m6x2_mul_ck", &dpll4_m6x2_mul_ck),
4352 CLK(NULL, "dpll4_m6x2_ck", &dpll4_m6x2_ck),
4353 CLK("etb", "emu_per_alwon_ck", &emu_per_alwon_ck),
4354 CLK(NULL, "clkout2_src_ck", &clkout2_src_ck),
4355 CLK(NULL, "sys_clkout2", &sys_clkout2),
4356 CLK(NULL, "corex2_fck", &corex2_fck),
4357 CLK(NULL, "mpu_ck", &mpu_ck),
4358 CLK(NULL, "arm_fck", &arm_fck),
4359 CLK("etb", "emu_mpu_alwon_ck", &emu_mpu_alwon_ck),
4360 CLK(NULL, "l3_ick", &l3_ick),
4361 CLK(NULL, "l4_ick", &l4_ick),
4362 CLK(NULL, "rm_ick", &rm_ick),
4363 CLK(NULL, "timer_32k_ck", &omap_32k_fck),
4364 CLK(NULL, "gpt10_fck", &gpt10_fck),
4365 CLK(NULL, "gpt11_fck", &gpt11_fck),
4366 CLK(NULL, "core_96m_fck", &core_96m_fck),
4367 CLK(NULL, "mmchs2_fck", &mmchs2_fck),
4368 CLK(NULL, "mmchs1_fck", &mmchs1_fck),
4369 CLK(NULL, "i2c3_fck", &i2c3_fck),
4370 CLK(NULL, "i2c2_fck", &i2c2_fck),
4371 CLK(NULL, "i2c1_fck", &i2c1_fck),
4372 CLK(NULL, "mcbsp5_fck", &mcbsp5_fck),
4373 CLK(NULL, "mcbsp1_fck", &mcbsp1_fck),
4374 CLK(NULL, "core_48m_fck", &core_48m_fck),
4375 CLK(NULL, "mcspi4_fck", &mcspi4_fck),
4376 CLK(NULL, "mcspi3_fck", &mcspi3_fck),
4377 CLK(NULL, "mcspi2_fck", &mcspi2_fck),
4378 CLK(NULL, "mcspi1_fck", &mcspi1_fck),
4379 CLK(NULL, "uart2_fck", &uart2_fck),
4380 CLK(NULL, "uart1_fck", &uart1_fck),
4381 CLK(NULL, "core_12m_fck", &core_12m_fck),
4382 CLK("omap_hdq.0", "fck", &hdq_fck),
4383 CLK(NULL, "hdq_fck", &hdq_fck),
4384 CLK(NULL, "core_l3_ick", &core_l3_ick),
4385 CLK(NULL, "sdrc_ick", &sdrc_ick),
4386 CLK(NULL, "gpmc_fck", &gpmc_fck),
4387 CLK(NULL, "core_l4_ick", &core_l4_ick),
4388 CLK("omap_hsmmc.1", "ick", &mmchs2_ick),
4389 CLK("omap_hsmmc.0", "ick", &mmchs1_ick),
4390 CLK(NULL, "mmchs2_ick", &mmchs2_ick),
4391 CLK(NULL, "mmchs1_ick", &mmchs1_ick),
4392 CLK("omap_hdq.0", "ick", &hdq_ick),
4393 CLK(NULL, "hdq_ick", &hdq_ick),
4394 CLK("omap2_mcspi.4", "ick", &mcspi4_ick),
4395 CLK("omap2_mcspi.3", "ick", &mcspi3_ick),
4396 CLK("omap2_mcspi.2", "ick", &mcspi2_ick),
4397 CLK("omap2_mcspi.1", "ick", &mcspi1_ick),
4398 CLK(NULL, "mcspi4_ick", &mcspi4_ick),
4399 CLK(NULL, "mcspi3_ick", &mcspi3_ick),
4400 CLK(NULL, "mcspi2_ick", &mcspi2_ick),
4401 CLK(NULL, "mcspi1_ick", &mcspi1_ick),
4402 CLK("omap_i2c.3", "ick", &i2c3_ick),
4403 CLK("omap_i2c.2", "ick", &i2c2_ick),
4404 CLK("omap_i2c.1", "ick", &i2c1_ick),
4405 CLK(NULL, "i2c3_ick", &i2c3_ick),
4406 CLK(NULL, "i2c2_ick", &i2c2_ick),
4407 CLK(NULL, "i2c1_ick", &i2c1_ick),
4408 CLK(NULL, "uart2_ick", &uart2_ick),
4409 CLK(NULL, "uart1_ick", &uart1_ick),
4410 CLK(NULL, "gpt11_ick", &gpt11_ick),
4411 CLK(NULL, "gpt10_ick", &gpt10_ick),
4412 CLK("omap-mcbsp.5", "ick", &mcbsp5_ick),
4413 CLK("omap-mcbsp.1", "ick", &mcbsp1_ick),
4414 CLK(NULL, "mcbsp5_ick", &mcbsp5_ick),
4415 CLK(NULL, "mcbsp1_ick", &mcbsp1_ick),
4416 CLK(NULL, "omapctrl_ick", &omapctrl_ick),
4417 CLK(NULL, "dss_tv_fck", &dss_tv_fck),
4418 CLK(NULL, "dss_96m_fck", &dss_96m_fck),
4419 CLK(NULL, "dss2_alwon_fck", &dss2_alwon_fck),
4420 CLK(NULL, "init_60m_fclk", &dummy_ck),
4421 CLK(NULL, "gpt1_fck", &gpt1_fck),
4422 CLK(NULL, "aes2_ick", &aes2_ick),
4423 CLK(NULL, "wkup_32k_fck", &wkup_32k_fck),
4424 CLK(NULL, "gpio1_dbck", &gpio1_dbck),
4425 CLK(NULL, "sha12_ick", &sha12_ick),
4426 CLK(NULL, "wdt2_fck", &wdt2_fck),
4427 CLK(NULL, "wkup_l4_ick", &wkup_l4_ick),
4428 CLK("omap_wdt", "ick", &wdt2_ick),
4429 CLK(NULL, "wdt2_ick", &wdt2_ick),
4430 CLK(NULL, "wdt1_ick", &wdt1_ick),
4431 CLK(NULL, "gpio1_ick", &gpio1_ick),
4432 CLK(NULL, "omap_32ksync_ick", &omap_32ksync_ick),
4433 CLK(NULL, "gpt12_ick", &gpt12_ick),
4434 CLK(NULL, "gpt1_ick", &gpt1_ick),
4435 CLK(NULL, "per_96m_fck", &per_96m_fck),
4436 CLK(NULL, "per_48m_fck", &per_48m_fck),
4437 CLK(NULL, "uart3_fck", &uart3_fck),
4438 CLK(NULL, "gpt2_fck", &gpt2_fck),
4439 CLK(NULL, "gpt3_fck", &gpt3_fck),
4440 CLK(NULL, "gpt4_fck", &gpt4_fck),
4441 CLK(NULL, "gpt5_fck", &gpt5_fck),
4442 CLK(NULL, "gpt6_fck", &gpt6_fck),
4443 CLK(NULL, "gpt7_fck", &gpt7_fck),
4444 CLK(NULL, "gpt8_fck", &gpt8_fck),
4445 CLK(NULL, "gpt9_fck", &gpt9_fck),
4446 CLK(NULL, "per_32k_alwon_fck", &per_32k_alwon_fck),
4447 CLK(NULL, "gpio6_dbck", &gpio6_dbck),
4448 CLK(NULL, "gpio5_dbck", &gpio5_dbck),
4449 CLK(NULL, "gpio4_dbck", &gpio4_dbck),
4450 CLK(NULL, "gpio3_dbck", &gpio3_dbck),
4451 CLK(NULL, "gpio2_dbck", &gpio2_dbck),
4452 CLK(NULL, "wdt3_fck", &wdt3_fck),
4453 CLK(NULL, "per_l4_ick", &per_l4_ick),
4454 CLK(NULL, "gpio6_ick", &gpio6_ick),
4455 CLK(NULL, "gpio5_ick", &gpio5_ick),
4456 CLK(NULL, "gpio4_ick", &gpio4_ick),
4457 CLK(NULL, "gpio3_ick", &gpio3_ick),
4458 CLK(NULL, "gpio2_ick", &gpio2_ick),
4459 CLK(NULL, "wdt3_ick", &wdt3_ick),
4460 CLK(NULL, "uart3_ick", &uart3_ick),
4461 CLK(NULL, "uart4_ick", &uart4_ick),
4462 CLK(NULL, "gpt9_ick", &gpt9_ick),
4463 CLK(NULL, "gpt8_ick", &gpt8_ick),
4464 CLK(NULL, "gpt7_ick", &gpt7_ick),
4465 CLK(NULL, "gpt6_ick", &gpt6_ick),
4466 CLK(NULL, "gpt5_ick", &gpt5_ick),
4467 CLK(NULL, "gpt4_ick", &gpt4_ick),
4468 CLK(NULL, "gpt3_ick", &gpt3_ick),
4469 CLK(NULL, "gpt2_ick", &gpt2_ick),
4470 CLK("omap-mcbsp.2", "ick", &mcbsp2_ick),
4471 CLK("omap-mcbsp.3", "ick", &mcbsp3_ick),
4472 CLK("omap-mcbsp.4", "ick", &mcbsp4_ick),
4473 CLK(NULL, "mcbsp4_ick", &mcbsp2_ick),
4474 CLK(NULL, "mcbsp3_ick", &mcbsp3_ick),
4475 CLK(NULL, "mcbsp2_ick", &mcbsp4_ick),
4476 CLK(NULL, "mcbsp2_fck", &mcbsp2_fck),
4477 CLK(NULL, "mcbsp3_fck", &mcbsp3_fck),
4478 CLK(NULL, "mcbsp4_fck", &mcbsp4_fck),
4479 CLK(NULL, "emu_src_mux_ck", &emu_src_mux_ck),
4480 CLK("etb", "emu_src_ck", &emu_src_ck),
4481 CLK(NULL, "emu_src_mux_ck", &emu_src_mux_ck),
4482 CLK(NULL, "emu_src_ck", &emu_src_ck),
4483 CLK(NULL, "pclk_fck", &pclk_fck),
4484 CLK(NULL, "pclkx2_fck", &pclkx2_fck),
4485 CLK(NULL, "atclk_fck", &atclk_fck),
4486 CLK(NULL, "traceclk_src_fck", &traceclk_src_fck),
4487 CLK(NULL, "traceclk_fck", &traceclk_fck),
4488 CLK(NULL, "secure_32k_fck", &secure_32k_fck),
4489 CLK(NULL, "gpt12_fck", &gpt12_fck),
4490 CLK(NULL, "wdt1_fck", &wdt1_fck),
4491 { NULL },
4492};
4493
4494static struct ti_clk_alias omap36xx_am35xx_omap3430es2plus_clks[] = {
4495 CLK(NULL, "dpll5_ck", &dpll5_ck),
4496 CLK(NULL, "dpll5_m2_ck", &dpll5_m2_ck),
4497 CLK(NULL, "core_d3_ck", &core_d3_ck),
4498 CLK(NULL, "core_d4_ck", &core_d4_ck),
4499 CLK(NULL, "core_d6_ck", &core_d6_ck),
4500 CLK(NULL, "omap_192m_alwon_fck", &omap_192m_alwon_fck),
4501 CLK(NULL, "core_d2_ck", &core_d2_ck),
4502 CLK(NULL, "corex2_d3_fck", &corex2_d3_fck),
4503 CLK(NULL, "corex2_d5_fck", &corex2_d5_fck),
4504 CLK(NULL, "sgx_fck", &sgx_fck),
4505 CLK(NULL, "sgx_ick", &sgx_ick),
4506 CLK(NULL, "cpefuse_fck", &cpefuse_fck),
4507 CLK(NULL, "ts_fck", &ts_fck),
4508 CLK(NULL, "usbtll_fck", &usbtll_fck),
4509 CLK(NULL, "usbtll_ick", &usbtll_ick),
4510 CLK("omap_hsmmc.2", "ick", &mmchs3_ick),
4511 CLK(NULL, "mmchs3_ick", &mmchs3_ick),
4512 CLK(NULL, "mmchs3_fck", &mmchs3_fck),
4513 CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es2),
4514 CLK("omapdss_dss", "ick", &dss_ick_3430es2),
4515 CLK(NULL, "dss_ick", &dss_ick_3430es2),
4516 CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck),
4517 CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck),
4518 CLK(NULL, "usbhost_ick", &usbhost_ick),
4519 { NULL },
4520};
4521
4522static struct ti_clk_alias omap3430es1_clks[] = {
4523 CLK(NULL, "gfx_l3_ck", &gfx_l3_ck),
4524 CLK(NULL, "gfx_l3_fck", &gfx_l3_fck),
4525 CLK(NULL, "gfx_l3_ick", &gfx_l3_ick),
4526 CLK(NULL, "gfx_cg1_ck", &gfx_cg1_ck),
4527 CLK(NULL, "gfx_cg2_ck", &gfx_cg2_ck),
4528 CLK(NULL, "d2d_26m_fck", &d2d_26m_fck),
4529 CLK(NULL, "fshostusb_fck", &fshostusb_fck),
4530 CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es1),
4531 CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es1),
4532 CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es1),
4533 CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es1),
4534 CLK(NULL, "fac_ick", &fac_ick),
4535 CLK(NULL, "ssi_ick", &ssi_ick_3430es1),
4536 CLK(NULL, "usb_l4_ick", &usb_l4_ick),
4537 CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es1),
4538 CLK("omapdss_dss", "ick", &dss_ick_3430es1),
4539 CLK(NULL, "dss_ick", &dss_ick_3430es1),
4540 { NULL },
4541};
4542
4543static struct ti_clk_alias omap36xx_clks[] = {
4544 CLK(NULL, "uart4_fck", &uart4_fck),
4545 { NULL },
4546};
4547
4548static struct ti_clk_alias am35xx_clks[] = {
4549 CLK(NULL, "ipss_ick", &ipss_ick),
4550 CLK(NULL, "rmii_ck", &rmii_ck),
4551 CLK(NULL, "pclk_ck", &pclk_ck),
4552 CLK(NULL, "emac_ick", &emac_ick),
4553 CLK(NULL, "emac_fck", &emac_fck),
4554 CLK("davinci_emac.0", NULL, &emac_ick),
4555 CLK("davinci_mdio.0", NULL, &emac_fck),
4556 CLK("vpfe-capture", "master", &vpfe_ick),
4557 CLK("vpfe-capture", "slave", &vpfe_fck),
4558 CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_am35xx),
4559 CLK(NULL, "hsotgusb_fck", &hsotgusb_fck_am35xx),
4560 CLK(NULL, "hecc_ck", &hecc_ck),
4561 CLK(NULL, "uart4_ick", &uart4_ick_am35xx),
4562 CLK(NULL, "uart4_fck", &uart4_fck_am35xx),
4563 { NULL },
4564};
4565
4566static struct ti_clk *omap36xx_clk_patches[] = {
4567 &dpll4_m3x2_ck_omap36xx,
4568 &dpll3_m3x2_ck_omap36xx,
4569 &dpll4_m6x2_ck_omap36xx,
4570 &dpll4_m2x2_ck_omap36xx,
4571 &dpll4_m5x2_ck_omap36xx,
4572 &dpll4_ck_omap36xx,
4573 NULL,
4574};
4575
4576static const char *enable_init_clks[] = {
4577 "sdrc_ick",
4578 "gpmc_fck",
4579 "omapctrl_ick",
4580};
4581
4582static void __init omap3_clk_legacy_common_init(void)
4583{
4584 omap2_clk_disable_autoidle_all();
4585
4586 omap2_clk_enable_init_clocks(enable_init_clks,
4587 ARRAY_SIZE(enable_init_clks));
4588
4589 pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
4590 (clk_get_rate(osc_sys_ck.clk) / 1000000),
4591 (clk_get_rate(osc_sys_ck.clk) / 100000) % 10,
4592 (clk_get_rate(core_ck.clk) / 1000000),
4593 (clk_get_rate(arm_fck.clk) / 1000000));
4594}
4595
4596int __init omap3430es1_clk_legacy_init(void)
4597{
4598 int r;
4599
4600 r = ti_clk_register_legacy_clks(omap3430es1_clks);
4601 r |= ti_clk_register_legacy_clks(omap34xx_omap36xx_clks);
4602 r |= ti_clk_register_legacy_clks(omap3xxx_clks);
4603
4604 omap3_clk_legacy_common_init();
4605
4606 return r;
4607}
4608
4609int __init omap3430_clk_legacy_init(void)
4610{
4611 int r;
4612
4613 r = ti_clk_register_legacy_clks(omap34xx_omap36xx_clks);
4614 r |= ti_clk_register_legacy_clks(omap36xx_omap3430es2plus_clks);
4615 r |= ti_clk_register_legacy_clks(omap36xx_am35xx_omap3430es2plus_clks);
4616 r |= ti_clk_register_legacy_clks(omap3xxx_clks);
4617
4618 omap3_clk_legacy_common_init();
4619 omap3_clk_lock_dpll5();
4620
4621 return r;
4622}
4623
4624int __init omap36xx_clk_legacy_init(void)
4625{
4626 int r;
4627
4628 ti_clk_patch_legacy_clks(omap36xx_clk_patches);
4629 r = ti_clk_register_legacy_clks(omap36xx_clks);
4630 r |= ti_clk_register_legacy_clks(omap36xx_omap3430es2plus_clks);
4631 r |= ti_clk_register_legacy_clks(omap34xx_omap36xx_clks);
4632 r |= ti_clk_register_legacy_clks(omap36xx_am35xx_omap3430es2plus_clks);
4633 r |= ti_clk_register_legacy_clks(omap3xxx_clks);
4634
4635 omap3_clk_legacy_common_init();
4636 omap3_clk_lock_dpll5();
4637
4638 return r;
4639}
4640
4641int __init am35xx_clk_legacy_init(void)
4642{
4643 int r;
4644
4645 r = ti_clk_register_legacy_clks(am35xx_clks);
4646 r |= ti_clk_register_legacy_clks(omap36xx_am35xx_omap3430es2plus_clks);
4647 r |= ti_clk_register_legacy_clks(omap3xxx_clks);
4648
4649 omap3_clk_legacy_common_init();
4650 omap3_clk_lock_dpll5();
4651
4652 return r;
4653}
diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
index 0d1750a8aea4..383a06e49b09 100644
--- a/drivers/clk/ti/clk-3xxx.c
+++ b/drivers/clk/ti/clk-3xxx.c
@@ -327,7 +327,6 @@ enum {
327 OMAP3_SOC_OMAP3430_ES1, 327 OMAP3_SOC_OMAP3430_ES1,
328 OMAP3_SOC_OMAP3430_ES2_PLUS, 328 OMAP3_SOC_OMAP3430_ES2_PLUS,
329 OMAP3_SOC_OMAP3630, 329 OMAP3_SOC_OMAP3630,
330 OMAP3_SOC_TI81XX,
331}; 330};
332 331
333static int __init omap3xxx_dt_clk_init(int soc_type) 332static int __init omap3xxx_dt_clk_init(int soc_type)
@@ -370,7 +369,7 @@ static int __init omap3xxx_dt_clk_init(int soc_type)
370 (clk_get_rate(clk_get_sys(NULL, "core_ck")) / 1000000), 369 (clk_get_rate(clk_get_sys(NULL, "core_ck")) / 1000000),
371 (clk_get_rate(clk_get_sys(NULL, "arm_fck")) / 1000000)); 370 (clk_get_rate(clk_get_sys(NULL, "arm_fck")) / 1000000));
372 371
373 if (soc_type != OMAP3_SOC_TI81XX && soc_type != OMAP3_SOC_OMAP3430_ES1) 372 if (soc_type != OMAP3_SOC_OMAP3430_ES1)
374 omap3_clk_lock_dpll5(); 373 omap3_clk_lock_dpll5();
375 374
376 return 0; 375 return 0;
@@ -390,8 +389,3 @@ int __init am35xx_dt_clk_init(void)
390{ 389{
391 return omap3xxx_dt_clk_init(OMAP3_SOC_AM35XX); 390 return omap3xxx_dt_clk_init(OMAP3_SOC_AM35XX);
392} 391}
393
394int __init ti81xx_dt_clk_init(void)
395{
396 return omap3xxx_dt_clk_init(OMAP3_SOC_TI81XX);
397}
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
index 02517a8206bd..4f4c87751db5 100644
--- a/drivers/clk/ti/clk-44xx.c
+++ b/drivers/clk/ti/clk-44xx.c
@@ -12,7 +12,7 @@
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/clk-private.h> 15#include <linux/clk.h>
16#include <linux/clkdev.h> 16#include <linux/clkdev.h>
17#include <linux/clk/ti.h> 17#include <linux/clk/ti.h>
18 18
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index 5e183993e3ec..14160b223548 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -12,7 +12,7 @@
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/clk-private.h> 15#include <linux/clk.h>
16#include <linux/clkdev.h> 16#include <linux/clkdev.h>
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/clk/ti.h> 18#include <linux/clk/ti.h>
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index 62ac8f6e480c..ee32f4deebf4 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -12,7 +12,7 @@
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/clk-private.h> 15#include <linux/clk.h>
16#include <linux/clkdev.h> 16#include <linux/clkdev.h>
17#include <linux/clk/ti.h> 17#include <linux/clk/ti.h>
18 18
diff --git a/drivers/clk/ti/clk-816x.c b/drivers/clk/ti/clk-816x.c
new file mode 100644
index 000000000000..9451e651a1ff
--- /dev/null
+++ b/drivers/clk/ti/clk-816x.c
@@ -0,0 +1,53 @@
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License as
4 * published by the Free Software Foundation version 2.
5 *
6 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
7 * kind, whether express or implied; without even the implied warranty
8 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 */
11
12#include <linux/kernel.h>
13#include <linux/list.h>
14#include <linux/clk-provider.h>
15#include <linux/clk/ti.h>
16
17static struct ti_dt_clk dm816x_clks[] = {
18 DT_CLK(NULL, "sys_clkin", "sys_clkin_ck"),
19 DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
20 DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"),
21 DT_CLK(NULL, "mpu_ck", "mpu_ck"),
22 DT_CLK(NULL, "timer1_fck", "timer1_fck"),
23 DT_CLK(NULL, "timer2_fck", "timer2_fck"),
24 DT_CLK(NULL, "timer3_fck", "timer3_fck"),
25 DT_CLK(NULL, "timer4_fck", "timer4_fck"),
26 DT_CLK(NULL, "timer5_fck", "timer5_fck"),
27 DT_CLK(NULL, "timer6_fck", "timer6_fck"),
28 DT_CLK(NULL, "timer7_fck", "timer7_fck"),
29 DT_CLK(NULL, "sysclk4_ck", "sysclk4_ck"),
30 DT_CLK(NULL, "sysclk5_ck", "sysclk5_ck"),
31 DT_CLK(NULL, "sysclk6_ck", "sysclk6_ck"),
32 DT_CLK(NULL, "sysclk10_ck", "sysclk10_ck"),
33 DT_CLK(NULL, "sysclk18_ck", "sysclk18_ck"),
34 DT_CLK(NULL, "sysclk24_ck", "sysclk24_ck"),
35 DT_CLK("4a100000.ethernet", "sysclk24_ck", "sysclk24_ck"),
36 { .node_name = NULL },
37};
38
39static const char *enable_init_clks[] = {
40 "ddr_pll_clk1",
41 "ddr_pll_clk2",
42 "ddr_pll_clk3",
43};
44
45int __init ti81xx_dt_clk_init(void)
46{
47 ti_dt_clocks_register(dm816x_clks);
48 omap2_clk_disable_autoidle_all();
49 omap2_clk_enable_init_clocks(enable_init_clks,
50 ARRAY_SIZE(enable_init_clks));
51
52 return 0;
53}
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index 337abe5909e1..e22b95646e09 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -22,6 +22,8 @@
22#include <linux/of_address.h> 22#include <linux/of_address.h>
23#include <linux/list.h> 23#include <linux/list.h>
24 24
25#include "clock.h"
26
25#undef pr_fmt 27#undef pr_fmt
26#define pr_fmt(fmt) "%s: " fmt, __func__ 28#define pr_fmt(fmt) "%s: " fmt, __func__
27 29
@@ -183,3 +185,128 @@ void ti_dt_clk_init_retry_clks(void)
183 retries--; 185 retries--;
184 } 186 }
185} 187}
188
189#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
190void __init ti_clk_patch_legacy_clks(struct ti_clk **patch)
191{
192 while (*patch) {
193 memcpy((*patch)->patch, *patch, sizeof(**patch));
194 patch++;
195 }
196}
197
198struct clk __init *ti_clk_register_clk(struct ti_clk *setup)
199{
200 struct clk *clk;
201 struct ti_clk_fixed *fixed;
202 struct ti_clk_fixed_factor *fixed_factor;
203 struct clk_hw *clk_hw;
204
205 if (setup->clk)
206 return setup->clk;
207
208 switch (setup->type) {
209 case TI_CLK_FIXED:
210 fixed = setup->data;
211
212 clk = clk_register_fixed_rate(NULL, setup->name, NULL,
213 CLK_IS_ROOT, fixed->frequency);
214 break;
215 case TI_CLK_MUX:
216 clk = ti_clk_register_mux(setup);
217 break;
218 case TI_CLK_DIVIDER:
219 clk = ti_clk_register_divider(setup);
220 break;
221 case TI_CLK_COMPOSITE:
222 clk = ti_clk_register_composite(setup);
223 break;
224 case TI_CLK_FIXED_FACTOR:
225 fixed_factor = setup->data;
226
227 clk = clk_register_fixed_factor(NULL, setup->name,
228 fixed_factor->parent,
229 0, fixed_factor->mult,
230 fixed_factor->div);
231 break;
232 case TI_CLK_GATE:
233 clk = ti_clk_register_gate(setup);
234 break;
235 case TI_CLK_DPLL:
236 clk = ti_clk_register_dpll(setup);
237 break;
238 default:
239 pr_err("bad type for %s!\n", setup->name);
240 clk = ERR_PTR(-EINVAL);
241 }
242
243 if (!IS_ERR(clk)) {
244 setup->clk = clk;
245 if (setup->clkdm_name) {
246 if (__clk_get_flags(clk) & CLK_IS_BASIC) {
247 pr_warn("can't setup clkdm for basic clk %s\n",
248 setup->name);
249 } else {
250 clk_hw = __clk_get_hw(clk);
251 to_clk_hw_omap(clk_hw)->clkdm_name =
252 setup->clkdm_name;
253 omap2_init_clk_clkdm(clk_hw);
254 }
255 }
256 }
257
258 return clk;
259}
260
261int __init ti_clk_register_legacy_clks(struct ti_clk_alias *clks)
262{
263 struct clk *clk;
264 bool retry;
265 struct ti_clk_alias *retry_clk;
266 struct ti_clk_alias *tmp;
267
268 while (clks->clk) {
269 clk = ti_clk_register_clk(clks->clk);
270 if (IS_ERR(clk)) {
271 if (PTR_ERR(clk) == -EAGAIN) {
272 list_add(&clks->link, &retry_list);
273 } else {
274 pr_err("register for %s failed: %ld\n",
275 clks->clk->name, PTR_ERR(clk));
276 return PTR_ERR(clk);
277 }
278 } else {
279 clks->lk.clk = clk;
280 clkdev_add(&clks->lk);
281 }
282 clks++;
283 }
284
285 retry = true;
286
287 while (!list_empty(&retry_list) && retry) {
288 retry = false;
289 list_for_each_entry_safe(retry_clk, tmp, &retry_list, link) {
290 pr_debug("retry-init: %s\n", retry_clk->clk->name);
291 clk = ti_clk_register_clk(retry_clk->clk);
292 if (IS_ERR(clk)) {
293 if (PTR_ERR(clk) == -EAGAIN) {
294 continue;
295 } else {
296 pr_err("register for %s failed: %ld\n",
297 retry_clk->clk->name,
298 PTR_ERR(clk));
299 return PTR_ERR(clk);
300 }
301 } else {
302 retry = true;
303 retry_clk->lk.clk = clk;
304 clkdev_add(&retry_clk->lk);
305 list_del(&retry_clk->link);
306 }
307 }
308 }
309
310 return 0;
311}
312#endif
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
new file mode 100644
index 000000000000..404158d2d7f8
--- /dev/null
+++ b/drivers/clk/ti/clock.h
@@ -0,0 +1,172 @@
1/*
2 * TI Clock driver internal definitions
3 *
4 * Copyright (C) 2014 Texas Instruments, Inc
5 * Tero Kristo (t-kristo@ti.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation version 2.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16#ifndef __DRIVERS_CLK_TI_CLOCK__
17#define __DRIVERS_CLK_TI_CLOCK__
18
19enum {
20 TI_CLK_FIXED,
21 TI_CLK_MUX,
22 TI_CLK_DIVIDER,
23 TI_CLK_COMPOSITE,
24 TI_CLK_FIXED_FACTOR,
25 TI_CLK_GATE,
26 TI_CLK_DPLL,
27};
28
29/* Global flags */
30#define CLKF_INDEX_POWER_OF_TWO (1 << 0)
31#define CLKF_INDEX_STARTS_AT_ONE (1 << 1)
32#define CLKF_SET_RATE_PARENT (1 << 2)
33#define CLKF_OMAP3 (1 << 3)
34#define CLKF_AM35XX (1 << 4)
35
36/* Gate flags */
37#define CLKF_SET_BIT_TO_DISABLE (1 << 5)
38#define CLKF_INTERFACE (1 << 6)
39#define CLKF_SSI (1 << 7)
40#define CLKF_DSS (1 << 8)
41#define CLKF_HSOTGUSB (1 << 9)
42#define CLKF_WAIT (1 << 10)
43#define CLKF_NO_WAIT (1 << 11)
44#define CLKF_HSDIV (1 << 12)
45#define CLKF_CLKDM (1 << 13)
46
47/* DPLL flags */
48#define CLKF_LOW_POWER_STOP (1 << 5)
49#define CLKF_LOCK (1 << 6)
50#define CLKF_LOW_POWER_BYPASS (1 << 7)
51#define CLKF_PER (1 << 8)
52#define CLKF_CORE (1 << 9)
53#define CLKF_J_TYPE (1 << 10)
54
55#define CLK(dev, con, ck) \
56 { \
57 .lk = { \
58 .dev_id = dev, \
59 .con_id = con, \
60 }, \
61 .clk = ck, \
62 }
63
64struct ti_clk {
65 const char *name;
66 const char *clkdm_name;
67 int type;
68 void *data;
69 struct ti_clk *patch;
70 struct clk *clk;
71};
72
73struct ti_clk_alias {
74 struct ti_clk *clk;
75 struct clk_lookup lk;
76 struct list_head link;
77};
78
79struct ti_clk_fixed {
80 u32 frequency;
81 u16 flags;
82};
83
84struct ti_clk_mux {
85 u8 bit_shift;
86 int num_parents;
87 u16 reg;
88 u8 module;
89 const char **parents;
90 u16 flags;
91};
92
93struct ti_clk_divider {
94 const char *parent;
95 u8 bit_shift;
96 u16 max_div;
97 u16 reg;
98 u8 module;
99 int *dividers;
100 int num_dividers;
101 u16 flags;
102};
103
104struct ti_clk_fixed_factor {
105 const char *parent;
106 u16 div;
107 u16 mult;
108 u16 flags;
109};
110
111struct ti_clk_gate {
112 const char *parent;
113 u8 bit_shift;
114 u16 reg;
115 u8 module;
116 u16 flags;
117};
118
119struct ti_clk_composite {
120 struct ti_clk_divider *divider;
121 struct ti_clk_mux *mux;
122 struct ti_clk_gate *gate;
123 u16 flags;
124};
125
126struct ti_clk_clkdm_gate {
127 const char *parent;
128 u16 flags;
129};
130
131struct ti_clk_dpll {
132 int num_parents;
133 u16 control_reg;
134 u16 idlest_reg;
135 u16 autoidle_reg;
136 u16 mult_div1_reg;
137 u8 module;
138 const char **parents;
139 u16 flags;
140 u8 modes;
141 u32 mult_mask;
142 u32 div1_mask;
143 u32 enable_mask;
144 u32 autoidle_mask;
145 u32 freqsel_mask;
146 u32 idlest_mask;
147 u32 dco_mask;
148 u32 sddiv_mask;
149 u16 max_multiplier;
150 u16 max_divider;
151 u8 min_divider;
152 u8 auto_recal_bit;
153 u8 recal_en_bit;
154 u8 recal_st_bit;
155};
156
157struct clk *ti_clk_register_gate(struct ti_clk *setup);
158struct clk *ti_clk_register_interface(struct ti_clk *setup);
159struct clk *ti_clk_register_mux(struct ti_clk *setup);
160struct clk *ti_clk_register_divider(struct ti_clk *setup);
161struct clk *ti_clk_register_composite(struct ti_clk *setup);
162struct clk *ti_clk_register_dpll(struct ti_clk *setup);
163
164struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup);
165struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup);
166struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup);
167
168void ti_clk_patch_legacy_clks(struct ti_clk **patch);
169struct clk *ti_clk_register_clk(struct ti_clk *setup);
170int ti_clk_register_legacy_clks(struct ti_clk_alias *clks);
171
172#endif
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
index 19d8980ba458..3654f61912eb 100644
--- a/drivers/clk/ti/composite.c
+++ b/drivers/clk/ti/composite.c
@@ -23,6 +23,8 @@
23#include <linux/clk/ti.h> 23#include <linux/clk/ti.h>
24#include <linux/list.h> 24#include <linux/list.h>
25 25
26#include "clock.h"
27
26#undef pr_fmt 28#undef pr_fmt
27#define pr_fmt(fmt) "%s: " fmt, __func__ 29#define pr_fmt(fmt) "%s: " fmt, __func__
28 30
@@ -116,8 +118,46 @@ static inline struct clk_hw *_get_hw(struct clk_hw_omap_comp *clk, int idx)
116 118
117#define to_clk_hw_comp(_hw) container_of(_hw, struct clk_hw_omap_comp, hw) 119#define to_clk_hw_comp(_hw) container_of(_hw, struct clk_hw_omap_comp, hw)
118 120
119static void __init ti_clk_register_composite(struct clk_hw *hw, 121#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
120 struct device_node *node) 122struct clk *ti_clk_register_composite(struct ti_clk *setup)
123{
124 struct ti_clk_composite *comp;
125 struct clk_hw *gate;
126 struct clk_hw *mux;
127 struct clk_hw *div;
128 int num_parents = 1;
129 const char **parent_names = NULL;
130 struct clk *clk;
131
132 comp = setup->data;
133
134 div = ti_clk_build_component_div(comp->divider);
135 gate = ti_clk_build_component_gate(comp->gate);
136 mux = ti_clk_build_component_mux(comp->mux);
137
138 if (div)
139 parent_names = &comp->divider->parent;
140
141 if (gate)
142 parent_names = &comp->gate->parent;
143
144 if (mux) {
145 num_parents = comp->mux->num_parents;
146 parent_names = comp->mux->parents;
147 }
148
149 clk = clk_register_composite(NULL, setup->name,
150 parent_names, num_parents, mux,
151 &ti_clk_mux_ops, div,
152 &ti_composite_divider_ops, gate,
153 &ti_composite_gate_ops, 0);
154
155 return clk;
156}
157#endif
158
159static void __init _register_composite(struct clk_hw *hw,
160 struct device_node *node)
121{ 161{
122 struct clk *clk; 162 struct clk *clk;
123 struct clk_hw_omap_comp *cclk = to_clk_hw_comp(hw); 163 struct clk_hw_omap_comp *cclk = to_clk_hw_comp(hw);
@@ -136,7 +176,7 @@ static void __init ti_clk_register_composite(struct clk_hw *hw,
136 pr_debug("component %s not ready for %s, retry\n", 176 pr_debug("component %s not ready for %s, retry\n",
137 cclk->comp_nodes[i]->name, node->name); 177 cclk->comp_nodes[i]->name, node->name);
138 if (!ti_clk_retry_init(node, hw, 178 if (!ti_clk_retry_init(node, hw,
139 ti_clk_register_composite)) 179 _register_composite))
140 return; 180 return;
141 181
142 goto cleanup; 182 goto cleanup;
@@ -216,7 +256,7 @@ static void __init of_ti_composite_clk_setup(struct device_node *node)
216 for (i = 0; i < num_clks; i++) 256 for (i = 0; i < num_clks; i++)
217 cclk->comp_nodes[i] = _get_component_node(node, i); 257 cclk->comp_nodes[i] = _get_component_node(node, i);
218 258
219 ti_clk_register_composite(&cclk->hw, node); 259 _register_composite(&cclk->hw, node);
220} 260}
221CLK_OF_DECLARE(ti_composite_clock, "ti,composite-clock", 261CLK_OF_DECLARE(ti_composite_clock, "ti,composite-clock",
222 of_ti_composite_clk_setup); 262 of_ti_composite_clk_setup);
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index bff2b5b8ff59..6211893c0980 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -21,6 +21,7 @@
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/of_address.h> 22#include <linux/of_address.h>
23#include <linux/clk/ti.h> 23#include <linux/clk/ti.h>
24#include "clock.h"
24 25
25#undef pr_fmt 26#undef pr_fmt
26#define pr_fmt(fmt) "%s: " fmt, __func__ 27#define pr_fmt(fmt) "%s: " fmt, __func__
@@ -301,6 +302,134 @@ static struct clk *_register_divider(struct device *dev, const char *name,
301} 302}
302 303
303static struct clk_div_table * 304static struct clk_div_table *
305_get_div_table_from_setup(struct ti_clk_divider *setup, u8 *width)
306{
307 int valid_div = 0;
308 struct clk_div_table *table;
309 int i;
310 int div;
311 u32 val;
312 u8 flags;
313
314 if (!setup->num_dividers) {
315 /* Clk divider table not provided, determine min/max divs */
316 flags = setup->flags;
317
318 if (flags & CLKF_INDEX_STARTS_AT_ONE)
319 val = 1;
320 else
321 val = 0;
322
323 div = 1;
324
325 while (div < setup->max_div) {
326 if (flags & CLKF_INDEX_POWER_OF_TWO)
327 div <<= 1;
328 else
329 div++;
330 val++;
331 }
332
333 *width = fls(val);
334
335 return NULL;
336 }
337
338 for (i = 0; i < setup->num_dividers; i++)
339 if (setup->dividers[i])
340 valid_div++;
341
342 table = kzalloc(sizeof(*table) * (valid_div + 1), GFP_KERNEL);
343 if (!table)
344 return ERR_PTR(-ENOMEM);
345
346 valid_div = 0;
347 *width = 0;
348
349 for (i = 0; i < setup->num_dividers; i++)
350 if (setup->dividers[i]) {
351 table[valid_div].div = setup->dividers[i];
352 table[valid_div].val = i;
353 valid_div++;
354 *width = i;
355 }
356
357 *width = fls(*width);
358
359 return table;
360}
361
362struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
363{
364 struct clk_divider *div;
365 struct clk_omap_reg *reg;
366
367 if (!setup)
368 return NULL;
369
370 div = kzalloc(sizeof(*div), GFP_KERNEL);
371 if (!div)
372 return ERR_PTR(-ENOMEM);
373
374 reg = (struct clk_omap_reg *)&div->reg;
375 reg->index = setup->module;
376 reg->offset = setup->reg;
377
378 if (setup->flags & CLKF_INDEX_STARTS_AT_ONE)
379 div->flags |= CLK_DIVIDER_ONE_BASED;
380
381 if (setup->flags & CLKF_INDEX_POWER_OF_TWO)
382 div->flags |= CLK_DIVIDER_POWER_OF_TWO;
383
384 div->table = _get_div_table_from_setup(setup, &div->width);
385
386 div->shift = setup->bit_shift;
387
388 return &div->hw;
389}
390
391struct clk *ti_clk_register_divider(struct ti_clk *setup)
392{
393 struct ti_clk_divider *div;
394 struct clk_omap_reg *reg_setup;
395 u32 reg;
396 u8 width;
397 u32 flags = 0;
398 u8 div_flags = 0;
399 struct clk_div_table *table;
400 struct clk *clk;
401
402 div = setup->data;
403
404 reg_setup = (struct clk_omap_reg *)&reg;
405
406 reg_setup->index = div->module;
407 reg_setup->offset = div->reg;
408
409 if (div->flags & CLKF_INDEX_STARTS_AT_ONE)
410 div_flags |= CLK_DIVIDER_ONE_BASED;
411
412 if (div->flags & CLKF_INDEX_POWER_OF_TWO)
413 div_flags |= CLK_DIVIDER_POWER_OF_TWO;
414
415 if (div->flags & CLKF_SET_RATE_PARENT)
416 flags |= CLK_SET_RATE_PARENT;
417
418 table = _get_div_table_from_setup(div, &width);
419 if (IS_ERR(table))
420 return (struct clk *)table;
421
422 clk = _register_divider(NULL, setup->name, div->parent,
423 flags, (void __iomem *)reg, div->bit_shift,
424 width, div_flags, table, NULL);
425
426 if (IS_ERR(clk))
427 kfree(table);
428
429 return clk;
430}
431
432static struct clk_div_table *
304__init ti_clk_get_div_table(struct device_node *node) 433__init ti_clk_get_div_table(struct device_node *node)
305{ 434{
306 struct clk_div_table *table; 435 struct clk_div_table *table;
@@ -455,7 +584,8 @@ static void __init of_ti_divider_clk_setup(struct device_node *node)
455 goto cleanup; 584 goto cleanup;
456 585
457 clk = _register_divider(NULL, node->name, parent_name, flags, reg, 586 clk = _register_divider(NULL, node->name, parent_name, flags, reg,
458 shift, width, clk_divider_flags, table, NULL); 587 shift, width, clk_divider_flags, table,
588 NULL);
459 589
460 if (!IS_ERR(clk)) { 590 if (!IS_ERR(clk)) {
461 of_clk_add_provider(node, of_clk_src_simple_get, clk); 591 of_clk_add_provider(node, of_clk_src_simple_get, clk);
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 85ac0dd501de..81dc4698dc41 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -21,6 +21,7 @@
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/of_address.h> 22#include <linux/of_address.h>
23#include <linux/clk/ti.h> 23#include <linux/clk/ti.h>
24#include "clock.h"
24 25
25#undef pr_fmt 26#undef pr_fmt
26#define pr_fmt(fmt) "%s: " fmt, __func__ 27#define pr_fmt(fmt) "%s: " fmt, __func__
@@ -130,7 +131,7 @@ static const struct clk_ops dpll_x2_ck_ops = {
130}; 131};
131 132
132/** 133/**
133 * ti_clk_register_dpll - low level registration of a DPLL clock 134 * _register_dpll - low level registration of a DPLL clock
134 * @hw: hardware clock definition for the clock 135 * @hw: hardware clock definition for the clock
135 * @node: device node for the clock 136 * @node: device node for the clock
136 * 137 *
@@ -138,8 +139,8 @@ static const struct clk_ops dpll_x2_ck_ops = {
138 * clk-bypass is missing), the clock is added to retry list and 139 * clk-bypass is missing), the clock is added to retry list and
139 * the initialization is retried on later stage. 140 * the initialization is retried on later stage.
140 */ 141 */
141static void __init ti_clk_register_dpll(struct clk_hw *hw, 142static void __init _register_dpll(struct clk_hw *hw,
142 struct device_node *node) 143 struct device_node *node)
143{ 144{
144 struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw); 145 struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
145 struct dpll_data *dd = clk_hw->dpll_data; 146 struct dpll_data *dd = clk_hw->dpll_data;
@@ -151,7 +152,7 @@ static void __init ti_clk_register_dpll(struct clk_hw *hw,
151 if (IS_ERR(dd->clk_ref) || IS_ERR(dd->clk_bypass)) { 152 if (IS_ERR(dd->clk_ref) || IS_ERR(dd->clk_bypass)) {
152 pr_debug("clk-ref or clk-bypass missing for %s, retry later\n", 153 pr_debug("clk-ref or clk-bypass missing for %s, retry later\n",
153 node->name); 154 node->name);
154 if (!ti_clk_retry_init(node, hw, ti_clk_register_dpll)) 155 if (!ti_clk_retry_init(node, hw, _register_dpll))
155 return; 156 return;
156 157
157 goto cleanup; 158 goto cleanup;
@@ -175,20 +176,118 @@ cleanup:
175 kfree(clk_hw); 176 kfree(clk_hw);
176} 177}
177 178
179#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
180void __iomem *_get_reg(u8 module, u16 offset)
181{
182 u32 reg;
183 struct clk_omap_reg *reg_setup;
184
185 reg_setup = (struct clk_omap_reg *)&reg;
186
187 reg_setup->index = module;
188 reg_setup->offset = offset;
189
190 return (void __iomem *)reg;
191}
192
193struct clk *ti_clk_register_dpll(struct ti_clk *setup)
194{
195 struct clk_hw_omap *clk_hw;
196 struct clk_init_data init = { NULL };
197 struct dpll_data *dd;
198 struct clk *clk;
199 struct ti_clk_dpll *dpll;
200 const struct clk_ops *ops = &omap3_dpll_ck_ops;
201 struct clk *clk_ref;
202 struct clk *clk_bypass;
203
204 dpll = setup->data;
205
206 if (dpll->num_parents < 2)
207 return ERR_PTR(-EINVAL);
208
209 clk_ref = clk_get_sys(NULL, dpll->parents[0]);
210 clk_bypass = clk_get_sys(NULL, dpll->parents[1]);
211
212 if (IS_ERR_OR_NULL(clk_ref) || IS_ERR_OR_NULL(clk_bypass))
213 return ERR_PTR(-EAGAIN);
214
215 dd = kzalloc(sizeof(*dd), GFP_KERNEL);
216 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
217 if (!dd || !clk_hw) {
218 clk = ERR_PTR(-ENOMEM);
219 goto cleanup;
220 }
221
222 clk_hw->dpll_data = dd;
223 clk_hw->ops = &clkhwops_omap3_dpll;
224 clk_hw->hw.init = &init;
225 clk_hw->flags = MEMMAP_ADDRESSING;
226
227 init.name = setup->name;
228 init.ops = ops;
229
230 init.num_parents = dpll->num_parents;
231 init.parent_names = dpll->parents;
232
233 dd->control_reg = _get_reg(dpll->module, dpll->control_reg);
234 dd->idlest_reg = _get_reg(dpll->module, dpll->idlest_reg);
235 dd->mult_div1_reg = _get_reg(dpll->module, dpll->mult_div1_reg);
236 dd->autoidle_reg = _get_reg(dpll->module, dpll->autoidle_reg);
237
238 dd->modes = dpll->modes;
239 dd->div1_mask = dpll->div1_mask;
240 dd->idlest_mask = dpll->idlest_mask;
241 dd->mult_mask = dpll->mult_mask;
242 dd->autoidle_mask = dpll->autoidle_mask;
243 dd->enable_mask = dpll->enable_mask;
244 dd->sddiv_mask = dpll->sddiv_mask;
245 dd->dco_mask = dpll->dco_mask;
246 dd->max_divider = dpll->max_divider;
247 dd->min_divider = dpll->min_divider;
248 dd->max_multiplier = dpll->max_multiplier;
249 dd->auto_recal_bit = dpll->auto_recal_bit;
250 dd->recal_en_bit = dpll->recal_en_bit;
251 dd->recal_st_bit = dpll->recal_st_bit;
252
253 dd->clk_ref = clk_ref;
254 dd->clk_bypass = clk_bypass;
255
256 if (dpll->flags & CLKF_CORE)
257 ops = &omap3_dpll_core_ck_ops;
258
259 if (dpll->flags & CLKF_PER)
260 ops = &omap3_dpll_per_ck_ops;
261
262 if (dpll->flags & CLKF_J_TYPE)
263 dd->flags |= DPLL_J_TYPE;
264
265 clk = clk_register(NULL, &clk_hw->hw);
266
267 if (!IS_ERR(clk))
268 return clk;
269
270cleanup:
271 kfree(dd);
272 kfree(clk_hw);
273 return clk;
274}
275#endif
276
178#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \ 277#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
179 defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \ 278 defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
180 defined(CONFIG_SOC_AM43XX) 279 defined(CONFIG_SOC_AM43XX)
181/** 280/**
182 * ti_clk_register_dpll_x2 - Registers a DPLLx2 clock 281 * _register_dpll_x2 - Registers a DPLLx2 clock
183 * @node: device node for this clock 282 * @node: device node for this clock
184 * @ops: clk_ops for this clock 283 * @ops: clk_ops for this clock
185 * @hw_ops: clk_hw_ops for this clock 284 * @hw_ops: clk_hw_ops for this clock
186 * 285 *
187 * Initializes a DPLL x 2 clock from device tree data. 286 * Initializes a DPLL x 2 clock from device tree data.
188 */ 287 */
189static void ti_clk_register_dpll_x2(struct device_node *node, 288static void _register_dpll_x2(struct device_node *node,
190 const struct clk_ops *ops, 289 const struct clk_ops *ops,
191 const struct clk_hw_omap_ops *hw_ops) 290 const struct clk_hw_omap_ops *hw_ops)
192{ 291{
193 struct clk *clk; 292 struct clk *clk;
194 struct clk_init_data init = { NULL }; 293 struct clk_init_data init = { NULL };
@@ -318,7 +417,7 @@ static void __init of_ti_dpll_setup(struct device_node *node,
318 if (dpll_mode) 417 if (dpll_mode)
319 dd->modes = dpll_mode; 418 dd->modes = dpll_mode;
320 419
321 ti_clk_register_dpll(&clk_hw->hw, node); 420 _register_dpll(&clk_hw->hw, node);
322 return; 421 return;
323 422
324cleanup: 423cleanup:
@@ -332,7 +431,7 @@ cleanup:
332 defined(CONFIG_SOC_DRA7XX) 431 defined(CONFIG_SOC_DRA7XX)
333static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node) 432static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node)
334{ 433{
335 ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx); 434 _register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
336} 435}
337CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock", 436CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
338 of_ti_omap4_dpll_x2_setup); 437 of_ti_omap4_dpll_x2_setup);
@@ -341,7 +440,7 @@ CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
341#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX) 440#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
342static void __init of_ti_am3_dpll_x2_setup(struct device_node *node) 441static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
343{ 442{
344 ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, NULL); 443 _register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
345} 444}
346CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock", 445CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock",
347 of_ti_am3_dpll_x2_setup); 446 of_ti_am3_dpll_x2_setup);
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
new file mode 100644
index 000000000000..6ef89639a9f6
--- /dev/null
+++ b/drivers/clk/ti/fapll.c
@@ -0,0 +1,410 @@
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License as
4 * published by the Free Software Foundation version 2.
5 *
6 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
7 * kind, whether express or implied; without even the implied warranty
8 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 */
11
12#include <linux/clk-provider.h>
13#include <linux/delay.h>
14#include <linux/slab.h>
15#include <linux/err.h>
16#include <linux/of.h>
17#include <linux/of_address.h>
18#include <linux/clk/ti.h>
19#include <asm/div64.h>
20
21/* FAPLL Control Register PLL_CTRL */
22#define FAPLL_MAIN_LOCK BIT(7)
23#define FAPLL_MAIN_PLLEN BIT(3)
24#define FAPLL_MAIN_BP BIT(2)
25#define FAPLL_MAIN_LOC_CTL BIT(0)
26
27/* FAPLL powerdown register PWD */
28#define FAPLL_PWD_OFFSET 4
29
30#define MAX_FAPLL_OUTPUTS 7
31#define FAPLL_MAX_RETRIES 1000
32
33#define to_fapll(_hw) container_of(_hw, struct fapll_data, hw)
34#define to_synth(_hw) container_of(_hw, struct fapll_synth, hw)
35
36/* The bypass bit is inverted on the ddr_pll.. */
37#define fapll_is_ddr_pll(va) (((u32)(va) & 0xffff) == 0x0440)
38
39/*
40 * The audio_pll_clk1 input is hard wired to the 27MHz bypass clock,
41 * and the audio_pll_clk1 synthesizer is hardwared to 32KiHz output.
42 */
43#define is_ddr_pll_clk1(va) (((u32)(va) & 0xffff) == 0x044c)
44#define is_audio_pll_clk1(va) (((u32)(va) & 0xffff) == 0x04a8)
45
46/* Synthesizer divider register */
47#define SYNTH_LDMDIV1 BIT(8)
48
49/* Synthesizer frequency register */
50#define SYNTH_LDFREQ BIT(31)
51
52struct fapll_data {
53 struct clk_hw hw;
54 void __iomem *base;
55 const char *name;
56 struct clk *clk_ref;
57 struct clk *clk_bypass;
58 struct clk_onecell_data outputs;
59 bool bypass_bit_inverted;
60};
61
62struct fapll_synth {
63 struct clk_hw hw;
64 struct fapll_data *fd;
65 int index;
66 void __iomem *freq;
67 void __iomem *div;
68 const char *name;
69 struct clk *clk_pll;
70};
71
72static bool ti_fapll_clock_is_bypass(struct fapll_data *fd)
73{
74 u32 v = readl_relaxed(fd->base);
75
76 if (fd->bypass_bit_inverted)
77 return !(v & FAPLL_MAIN_BP);
78 else
79 return !!(v & FAPLL_MAIN_BP);
80}
81
82static int ti_fapll_enable(struct clk_hw *hw)
83{
84 struct fapll_data *fd = to_fapll(hw);
85 u32 v = readl_relaxed(fd->base);
86
87 v |= (1 << FAPLL_MAIN_PLLEN);
88 writel_relaxed(v, fd->base);
89
90 return 0;
91}
92
93static void ti_fapll_disable(struct clk_hw *hw)
94{
95 struct fapll_data *fd = to_fapll(hw);
96 u32 v = readl_relaxed(fd->base);
97
98 v &= ~(1 << FAPLL_MAIN_PLLEN);
99 writel_relaxed(v, fd->base);
100}
101
102static int ti_fapll_is_enabled(struct clk_hw *hw)
103{
104 struct fapll_data *fd = to_fapll(hw);
105 u32 v = readl_relaxed(fd->base);
106
107 return v & (1 << FAPLL_MAIN_PLLEN);
108}
109
110static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw,
111 unsigned long parent_rate)
112{
113 struct fapll_data *fd = to_fapll(hw);
114 u32 fapll_n, fapll_p, v;
115 long long rate;
116
117 if (ti_fapll_clock_is_bypass(fd))
118 return parent_rate;
119
120 rate = parent_rate;
121
122 /* PLL pre-divider is P and multiplier is N */
123 v = readl_relaxed(fd->base);
124 fapll_p = (v >> 8) & 0xff;
125 if (fapll_p)
126 do_div(rate, fapll_p);
127 fapll_n = v >> 16;
128 if (fapll_n)
129 rate *= fapll_n;
130
131 return rate;
132}
133
134static u8 ti_fapll_get_parent(struct clk_hw *hw)
135{
136 struct fapll_data *fd = to_fapll(hw);
137
138 if (ti_fapll_clock_is_bypass(fd))
139 return 1;
140
141 return 0;
142}
143
144static struct clk_ops ti_fapll_ops = {
145 .enable = ti_fapll_enable,
146 .disable = ti_fapll_disable,
147 .is_enabled = ti_fapll_is_enabled,
148 .recalc_rate = ti_fapll_recalc_rate,
149 .get_parent = ti_fapll_get_parent,
150};
151
152static int ti_fapll_synth_enable(struct clk_hw *hw)
153{
154 struct fapll_synth *synth = to_synth(hw);
155 u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
156
157 v &= ~(1 << synth->index);
158 writel_relaxed(v, synth->fd->base + FAPLL_PWD_OFFSET);
159
160 return 0;
161}
162
163static void ti_fapll_synth_disable(struct clk_hw *hw)
164{
165 struct fapll_synth *synth = to_synth(hw);
166 u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
167
168 v |= 1 << synth->index;
169 writel_relaxed(v, synth->fd->base + FAPLL_PWD_OFFSET);
170}
171
172static int ti_fapll_synth_is_enabled(struct clk_hw *hw)
173{
174 struct fapll_synth *synth = to_synth(hw);
175 u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
176
177 return !(v & (1 << synth->index));
178}
179
180/*
181 * See dm816x TRM chapter 1.10.3 Flying Adder PLL fore more info
182 */
183static unsigned long ti_fapll_synth_recalc_rate(struct clk_hw *hw,
184 unsigned long parent_rate)
185{
186 struct fapll_synth *synth = to_synth(hw);
187 u32 synth_div_m;
188 long long rate;
189
190 /* The audio_pll_clk1 is hardwired to produce 32.768KiHz clock */
191 if (!synth->div)
192 return 32768;
193
194 /*
195 * PLL in bypass sets the synths in bypass mode too. The PLL rate
196 * can be also be set to 27MHz, so we can't use parent_rate to
197 * check for bypass mode.
198 */
199 if (ti_fapll_clock_is_bypass(synth->fd))
200 return parent_rate;
201
202 rate = parent_rate;
203
204 /*
205 * Synth frequency integer and fractional divider.
206 * Note that the phase output K is 8, so the result needs
207 * to be multiplied by 8.
208 */
209 if (synth->freq) {
210 u32 v, synth_int_div, synth_frac_div, synth_div_freq;
211
212 v = readl_relaxed(synth->freq);
213 synth_int_div = (v >> 24) & 0xf;
214 synth_frac_div = v & 0xffffff;
215 synth_div_freq = (synth_int_div * 10000000) + synth_frac_div;
216 rate *= 10000000;
217 do_div(rate, synth_div_freq);
218 rate *= 8;
219 }
220
221 /* Synth ost-divider M */
222 synth_div_m = readl_relaxed(synth->div) & 0xff;
223 do_div(rate, synth_div_m);
224
225 return rate;
226}
227
228static struct clk_ops ti_fapll_synt_ops = {
229 .enable = ti_fapll_synth_enable,
230 .disable = ti_fapll_synth_disable,
231 .is_enabled = ti_fapll_synth_is_enabled,
232 .recalc_rate = ti_fapll_synth_recalc_rate,
233};
234
235static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd,
236 void __iomem *freq,
237 void __iomem *div,
238 int index,
239 const char *name,
240 const char *parent,
241 struct clk *pll_clk)
242{
243 struct clk_init_data *init;
244 struct fapll_synth *synth;
245
246 init = kzalloc(sizeof(*init), GFP_KERNEL);
247 if (!init)
248 return ERR_PTR(-ENOMEM);
249
250 init->ops = &ti_fapll_synt_ops;
251 init->name = name;
252 init->parent_names = &parent;
253 init->num_parents = 1;
254
255 synth = kzalloc(sizeof(*synth), GFP_KERNEL);
256 if (!synth)
257 goto free;
258
259 synth->fd = fd;
260 synth->index = index;
261 synth->freq = freq;
262 synth->div = div;
263 synth->name = name;
264 synth->hw.init = init;
265 synth->clk_pll = pll_clk;
266
267 return clk_register(NULL, &synth->hw);
268
269free:
270 kfree(synth);
271 kfree(init);
272
273 return ERR_PTR(-ENOMEM);
274}
275
276static void __init ti_fapll_setup(struct device_node *node)
277{
278 struct fapll_data *fd;
279 struct clk_init_data *init = NULL;
280 const char *parent_name[2];
281 struct clk *pll_clk;
282 int i;
283
284 fd = kzalloc(sizeof(*fd), GFP_KERNEL);
285 if (!fd)
286 return;
287
288 fd->outputs.clks = kzalloc(sizeof(struct clk *) *
289 MAX_FAPLL_OUTPUTS + 1,
290 GFP_KERNEL);
291 if (!fd->outputs.clks)
292 goto free;
293
294 init = kzalloc(sizeof(*init), GFP_KERNEL);
295 if (!init)
296 goto free;
297
298 init->ops = &ti_fapll_ops;
299 init->name = node->name;
300
301 init->num_parents = of_clk_get_parent_count(node);
302 if (init->num_parents != 2) {
303 pr_err("%s must have two parents\n", node->name);
304 goto free;
305 }
306
307 parent_name[0] = of_clk_get_parent_name(node, 0);
308 parent_name[1] = of_clk_get_parent_name(node, 1);
309 init->parent_names = parent_name;
310
311 fd->clk_ref = of_clk_get(node, 0);
312 if (IS_ERR(fd->clk_ref)) {
313 pr_err("%s could not get clk_ref\n", node->name);
314 goto free;
315 }
316
317 fd->clk_bypass = of_clk_get(node, 1);
318 if (IS_ERR(fd->clk_bypass)) {
319 pr_err("%s could not get clk_bypass\n", node->name);
320 goto free;
321 }
322
323 fd->base = of_iomap(node, 0);
324 if (!fd->base) {
325 pr_err("%s could not get IO base\n", node->name);
326 goto free;
327 }
328
329 if (fapll_is_ddr_pll(fd->base))
330 fd->bypass_bit_inverted = true;
331
332 fd->name = node->name;
333 fd->hw.init = init;
334
335 /* Register the parent PLL */
336 pll_clk = clk_register(NULL, &fd->hw);
337 if (IS_ERR(pll_clk))
338 goto unmap;
339
340 fd->outputs.clks[0] = pll_clk;
341 fd->outputs.clk_num++;
342
343 /*
344 * Set up the child synthesizers starting at index 1 as the
345 * PLL output is at index 0. We need to check the clock-indices
346 * for numbering in case there are holes in the synth mapping,
347 * and then probe the synth register to see if it has a FREQ
348 * register available.
349 */
350 for (i = 0; i < MAX_FAPLL_OUTPUTS; i++) {
351 const char *output_name;
352 void __iomem *freq, *div;
353 struct clk *synth_clk;
354 int output_instance;
355 u32 v;
356
357 if (of_property_read_string_index(node, "clock-output-names",
358 i, &output_name))
359 continue;
360
361 if (of_property_read_u32_index(node, "clock-indices", i,
362 &output_instance))
363 output_instance = i;
364
365 freq = fd->base + (output_instance * 8);
366 div = freq + 4;
367
368 /* Check for hardwired audio_pll_clk1 */
369 if (is_audio_pll_clk1(freq)) {
370 freq = 0;
371 div = 0;
372 } else {
373 /* Does the synthesizer have a FREQ register? */
374 v = readl_relaxed(freq);
375 if (!v)
376 freq = 0;
377 }
378 synth_clk = ti_fapll_synth_setup(fd, freq, div, output_instance,
379 output_name, node->name,
380 pll_clk);
381 if (IS_ERR(synth_clk))
382 continue;
383
384 fd->outputs.clks[output_instance] = synth_clk;
385 fd->outputs.clk_num++;
386
387 clk_register_clkdev(synth_clk, output_name, NULL);
388 }
389
390 /* Register the child synthesizers as the FAPLL outputs */
391 of_clk_add_provider(node, of_clk_src_onecell_get, &fd->outputs);
392 /* Add clock alias for the outputs */
393
394 kfree(init);
395
396 return;
397
398unmap:
399 iounmap(fd->base);
400free:
401 if (fd->clk_bypass)
402 clk_put(fd->clk_bypass);
403 if (fd->clk_ref)
404 clk_put(fd->clk_ref);
405 kfree(fd->outputs.clks);
406 kfree(fd);
407 kfree(init);
408}
409
410CLK_OF_DECLARE(ti_fapll_clock, "ti,dm816-fapll-clock", ti_fapll_setup);
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
index b326d2797feb..d493307b73f4 100644
--- a/drivers/clk/ti/gate.c
+++ b/drivers/clk/ti/gate.c
@@ -22,6 +22,8 @@
22#include <linux/of_address.h> 22#include <linux/of_address.h>
23#include <linux/clk/ti.h> 23#include <linux/clk/ti.h>
24 24
25#include "clock.h"
26
25#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw) 27#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
26 28
27#undef pr_fmt 29#undef pr_fmt
@@ -90,63 +92,164 @@ static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
90 return ret; 92 return ret;
91} 93}
92 94
93static void __init _of_ti_gate_clk_setup(struct device_node *node, 95static struct clk *_register_gate(struct device *dev, const char *name,
94 const struct clk_ops *ops, 96 const char *parent_name, unsigned long flags,
95 const struct clk_hw_omap_ops *hw_ops) 97 void __iomem *reg, u8 bit_idx,
98 u8 clk_gate_flags, const struct clk_ops *ops,
99 const struct clk_hw_omap_ops *hw_ops)
96{ 100{
97 struct clk *clk;
98 struct clk_init_data init = { NULL }; 101 struct clk_init_data init = { NULL };
99 struct clk_hw_omap *clk_hw; 102 struct clk_hw_omap *clk_hw;
100 const char *clk_name = node->name; 103 struct clk *clk;
101 const char *parent_name;
102 u32 val;
103 104
104 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); 105 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
105 if (!clk_hw) 106 if (!clk_hw)
106 return; 107 return ERR_PTR(-ENOMEM);
107 108
108 clk_hw->hw.init = &init; 109 clk_hw->hw.init = &init;
109 110
110 init.name = clk_name; 111 init.name = name;
111 init.ops = ops; 112 init.ops = ops;
112 113
113 if (ops != &omap_gate_clkdm_clk_ops) { 114 clk_hw->enable_reg = reg;
114 clk_hw->enable_reg = ti_clk_get_reg_addr(node, 0); 115 clk_hw->enable_bit = bit_idx;
115 if (!clk_hw->enable_reg) 116 clk_hw->ops = hw_ops;
116 goto cleanup;
117 117
118 if (!of_property_read_u32(node, "ti,bit-shift", &val)) 118 clk_hw->flags = MEMMAP_ADDRESSING | clk_gate_flags;
119 clk_hw->enable_bit = val; 119
120 init.parent_names = &parent_name;
121 init.num_parents = 1;
122
123 init.flags = flags;
124
125 clk = clk_register(NULL, &clk_hw->hw);
126
127 if (IS_ERR(clk))
128 kfree(clk_hw);
129
130 return clk;
131}
132
133#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
134struct clk *ti_clk_register_gate(struct ti_clk *setup)
135{
136 const struct clk_ops *ops = &omap_gate_clk_ops;
137 const struct clk_hw_omap_ops *hw_ops = NULL;
138 u32 reg;
139 struct clk_omap_reg *reg_setup;
140 u32 flags = 0;
141 u8 clk_gate_flags = 0;
142 struct ti_clk_gate *gate;
143
144 gate = setup->data;
145
146 if (gate->flags & CLKF_INTERFACE)
147 return ti_clk_register_interface(setup);
148
149 reg_setup = (struct clk_omap_reg *)&reg;
150
151 if (gate->flags & CLKF_SET_RATE_PARENT)
152 flags |= CLK_SET_RATE_PARENT;
153
154 if (gate->flags & CLKF_SET_BIT_TO_DISABLE)
155 clk_gate_flags |= INVERT_ENABLE;
156
157 if (gate->flags & CLKF_HSDIV) {
158 ops = &omap_gate_clk_hsdiv_restore_ops;
159 hw_ops = &clkhwops_wait;
120 } 160 }
121 161
122 clk_hw->ops = hw_ops; 162 if (gate->flags & CLKF_DSS)
163 hw_ops = &clkhwops_omap3430es2_dss_usbhost_wait;
164
165 if (gate->flags & CLKF_WAIT)
166 hw_ops = &clkhwops_wait;
167
168 if (gate->flags & CLKF_CLKDM)
169 ops = &omap_gate_clkdm_clk_ops;
170
171 if (gate->flags & CLKF_AM35XX)
172 hw_ops = &clkhwops_am35xx_ipss_module_wait;
123 173
124 clk_hw->flags = MEMMAP_ADDRESSING; 174 reg_setup->index = gate->module;
175 reg_setup->offset = gate->reg;
176
177 return _register_gate(NULL, setup->name, gate->parent, flags,
178 (void __iomem *)reg, gate->bit_shift,
179 clk_gate_flags, ops, hw_ops);
180}
181
182struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup)
183{
184 struct clk_hw_omap *gate;
185 struct clk_omap_reg *reg;
186 const struct clk_hw_omap_ops *ops = &clkhwops_wait;
187
188 if (!setup)
189 return NULL;
190
191 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
192 if (!gate)
193 return ERR_PTR(-ENOMEM);
194
195 reg = (struct clk_omap_reg *)&gate->enable_reg;
196 reg->index = setup->module;
197 reg->offset = setup->reg;
198
199 gate->enable_bit = setup->bit_shift;
200
201 if (setup->flags & CLKF_NO_WAIT)
202 ops = NULL;
203
204 if (setup->flags & CLKF_INTERFACE)
205 ops = &clkhwops_iclk_wait;
206
207 gate->ops = ops;
208 gate->flags = MEMMAP_ADDRESSING;
209
210 return &gate->hw;
211}
212#endif
213
214static void __init _of_ti_gate_clk_setup(struct device_node *node,
215 const struct clk_ops *ops,
216 const struct clk_hw_omap_ops *hw_ops)
217{
218 struct clk *clk;
219 const char *parent_name;
220 void __iomem *reg = NULL;
221 u8 enable_bit = 0;
222 u32 val;
223 u32 flags = 0;
224 u8 clk_gate_flags = 0;
225
226 if (ops != &omap_gate_clkdm_clk_ops) {
227 reg = ti_clk_get_reg_addr(node, 0);
228 if (!reg)
229 return;
230
231 if (!of_property_read_u32(node, "ti,bit-shift", &val))
232 enable_bit = val;
233 }
125 234
126 if (of_clk_get_parent_count(node) != 1) { 235 if (of_clk_get_parent_count(node) != 1) {
127 pr_err("%s must have 1 parent\n", clk_name); 236 pr_err("%s must have 1 parent\n", node->name);
128 goto cleanup; 237 return;
129 } 238 }
130 239
131 parent_name = of_clk_get_parent_name(node, 0); 240 parent_name = of_clk_get_parent_name(node, 0);
132 init.parent_names = &parent_name;
133 init.num_parents = 1;
134 241
135 if (of_property_read_bool(node, "ti,set-rate-parent")) 242 if (of_property_read_bool(node, "ti,set-rate-parent"))
136 init.flags |= CLK_SET_RATE_PARENT; 243 flags |= CLK_SET_RATE_PARENT;
137 244
138 if (of_property_read_bool(node, "ti,set-bit-to-disable")) 245 if (of_property_read_bool(node, "ti,set-bit-to-disable"))
139 clk_hw->flags |= INVERT_ENABLE; 246 clk_gate_flags |= INVERT_ENABLE;
140 247
141 clk = clk_register(NULL, &clk_hw->hw); 248 clk = _register_gate(NULL, node->name, parent_name, flags, reg,
249 enable_bit, clk_gate_flags, ops, hw_ops);
142 250
143 if (!IS_ERR(clk)) { 251 if (!IS_ERR(clk))
144 of_clk_add_provider(node, of_clk_src_simple_get, clk); 252 of_clk_add_provider(node, of_clk_src_simple_get, clk);
145 return;
146 }
147
148cleanup:
149 kfree(clk_hw);
150} 253}
151 254
152static void __init 255static void __init
diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
index 9c3e8c4aaa40..265d91f071c5 100644
--- a/drivers/clk/ti/interface.c
+++ b/drivers/clk/ti/interface.c
@@ -20,6 +20,7 @@
20#include <linux/of.h> 20#include <linux/of.h>
21#include <linux/of_address.h> 21#include <linux/of_address.h>
22#include <linux/clk/ti.h> 22#include <linux/clk/ti.h>
23#include "clock.h"
23 24
24#undef pr_fmt 25#undef pr_fmt
25#define pr_fmt(fmt) "%s: " fmt, __func__ 26#define pr_fmt(fmt) "%s: " fmt, __func__
@@ -31,53 +32,102 @@ static const struct clk_ops ti_interface_clk_ops = {
31 .is_enabled = &omap2_dflt_clk_is_enabled, 32 .is_enabled = &omap2_dflt_clk_is_enabled,
32}; 33};
33 34
34static void __init _of_ti_interface_clk_setup(struct device_node *node, 35static struct clk *_register_interface(struct device *dev, const char *name,
35 const struct clk_hw_omap_ops *ops) 36 const char *parent_name,
37 void __iomem *reg, u8 bit_idx,
38 const struct clk_hw_omap_ops *ops)
36{ 39{
37 struct clk *clk;
38 struct clk_init_data init = { NULL }; 40 struct clk_init_data init = { NULL };
39 struct clk_hw_omap *clk_hw; 41 struct clk_hw_omap *clk_hw;
40 const char *parent_name; 42 struct clk *clk;
41 u32 val;
42 43
43 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); 44 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
44 if (!clk_hw) 45 if (!clk_hw)
45 return; 46 return ERR_PTR(-ENOMEM);
46 47
47 clk_hw->hw.init = &init; 48 clk_hw->hw.init = &init;
48 clk_hw->ops = ops; 49 clk_hw->ops = ops;
49 clk_hw->flags = MEMMAP_ADDRESSING; 50 clk_hw->flags = MEMMAP_ADDRESSING;
51 clk_hw->enable_reg = reg;
52 clk_hw->enable_bit = bit_idx;
50 53
51 clk_hw->enable_reg = ti_clk_get_reg_addr(node, 0); 54 init.name = name;
52 if (!clk_hw->enable_reg)
53 goto cleanup;
54
55 if (!of_property_read_u32(node, "ti,bit-shift", &val))
56 clk_hw->enable_bit = val;
57
58 init.name = node->name;
59 init.ops = &ti_interface_clk_ops; 55 init.ops = &ti_interface_clk_ops;
60 init.flags = 0; 56 init.flags = 0;
61 57
62 parent_name = of_clk_get_parent_name(node, 0);
63 if (!parent_name) {
64 pr_err("%s must have a parent\n", node->name);
65 goto cleanup;
66 }
67
68 init.num_parents = 1; 58 init.num_parents = 1;
69 init.parent_names = &parent_name; 59 init.parent_names = &parent_name;
70 60
71 clk = clk_register(NULL, &clk_hw->hw); 61 clk = clk_register(NULL, &clk_hw->hw);
72 62
73 if (!IS_ERR(clk)) { 63 if (IS_ERR(clk))
74 of_clk_add_provider(node, of_clk_src_simple_get, clk); 64 kfree(clk_hw);
65 else
75 omap2_init_clk_hw_omap_clocks(clk); 66 omap2_init_clk_hw_omap_clocks(clk);
67
68 return clk;
69}
70
71#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
72struct clk *ti_clk_register_interface(struct ti_clk *setup)
73{
74 const struct clk_hw_omap_ops *ops = &clkhwops_iclk_wait;
75 u32 reg;
76 struct clk_omap_reg *reg_setup;
77 struct ti_clk_gate *gate;
78
79 gate = setup->data;
80 reg_setup = (struct clk_omap_reg *)&reg;
81 reg_setup->index = gate->module;
82 reg_setup->offset = gate->reg;
83
84 if (gate->flags & CLKF_NO_WAIT)
85 ops = &clkhwops_iclk;
86
87 if (gate->flags & CLKF_HSOTGUSB)
88 ops = &clkhwops_omap3430es2_iclk_hsotgusb_wait;
89
90 if (gate->flags & CLKF_DSS)
91 ops = &clkhwops_omap3430es2_iclk_dss_usbhost_wait;
92
93 if (gate->flags & CLKF_SSI)
94 ops = &clkhwops_omap3430es2_iclk_ssi_wait;
95
96 if (gate->flags & CLKF_AM35XX)
97 ops = &clkhwops_am35xx_ipss_wait;
98
99 return _register_interface(NULL, setup->name, gate->parent,
100 (void __iomem *)reg, gate->bit_shift, ops);
101}
102#endif
103
104static void __init _of_ti_interface_clk_setup(struct device_node *node,
105 const struct clk_hw_omap_ops *ops)
106{
107 struct clk *clk;
108 const char *parent_name;
109 void __iomem *reg;
110 u8 enable_bit = 0;
111 u32 val;
112
113 reg = ti_clk_get_reg_addr(node, 0);
114 if (!reg)
115 return;
116
117 if (!of_property_read_u32(node, "ti,bit-shift", &val))
118 enable_bit = val;
119
120 parent_name = of_clk_get_parent_name(node, 0);
121 if (!parent_name) {
122 pr_err("%s must have a parent\n", node->name);
76 return; 123 return;
77 } 124 }
78 125
79cleanup: 126 clk = _register_interface(NULL, node->name, parent_name, reg,
80 kfree(clk_hw); 127 enable_bit, ops);
128
129 if (!IS_ERR(clk))
130 of_clk_add_provider(node, of_clk_src_simple_get, clk);
81} 131}
82 132
83static void __init of_ti_interface_clk_setup(struct device_node *node) 133static void __init of_ti_interface_clk_setup(struct device_node *node)
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index e9d650e51287..728e253606bc 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -21,6 +21,7 @@
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/of_address.h> 22#include <linux/of_address.h>
23#include <linux/clk/ti.h> 23#include <linux/clk/ti.h>
24#include "clock.h"
24 25
25#undef pr_fmt 26#undef pr_fmt
26#define pr_fmt(fmt) "%s: " fmt, __func__ 27#define pr_fmt(fmt) "%s: " fmt, __func__
@@ -144,6 +145,39 @@ static struct clk *_register_mux(struct device *dev, const char *name,
144 return clk; 145 return clk;
145} 146}
146 147
148struct clk *ti_clk_register_mux(struct ti_clk *setup)
149{
150 struct ti_clk_mux *mux;
151 u32 flags;
152 u8 mux_flags = 0;
153 struct clk_omap_reg *reg_setup;
154 u32 reg;
155 u32 mask;
156
157 reg_setup = (struct clk_omap_reg *)&reg;
158
159 mux = setup->data;
160 flags = CLK_SET_RATE_NO_REPARENT;
161
162 mask = mux->num_parents;
163 if (!(mux->flags & CLKF_INDEX_STARTS_AT_ONE))
164 mask--;
165
166 mask = (1 << fls(mask)) - 1;
167 reg_setup->index = mux->module;
168 reg_setup->offset = mux->reg;
169
170 if (mux->flags & CLKF_INDEX_STARTS_AT_ONE)
171 mux_flags |= CLK_MUX_INDEX_ONE;
172
173 if (mux->flags & CLKF_SET_RATE_PARENT)
174 flags |= CLK_SET_RATE_PARENT;
175
176 return _register_mux(NULL, setup->name, mux->parents, mux->num_parents,
177 flags, (void __iomem *)reg, mux->bit_shift, mask,
178 mux_flags, NULL, NULL);
179}
180
147/** 181/**
148 * of_mux_clk_setup - Setup function for simple mux rate clock 182 * of_mux_clk_setup - Setup function for simple mux rate clock
149 * @node: DT node for the clock 183 * @node: DT node for the clock
@@ -194,8 +228,9 @@ static void of_mux_clk_setup(struct device_node *node)
194 228
195 mask = (1 << fls(mask)) - 1; 229 mask = (1 << fls(mask)) - 1;
196 230
197 clk = _register_mux(NULL, node->name, parent_names, num_parents, flags, 231 clk = _register_mux(NULL, node->name, parent_names, num_parents,
198 reg, shift, mask, clk_mux_flags, NULL, NULL); 232 flags, reg, shift, mask, clk_mux_flags, NULL,
233 NULL);
199 234
200 if (!IS_ERR(clk)) 235 if (!IS_ERR(clk))
201 of_clk_add_provider(node, of_clk_src_simple_get, clk); 236 of_clk_add_provider(node, of_clk_src_simple_get, clk);
@@ -205,6 +240,37 @@ cleanup:
205} 240}
206CLK_OF_DECLARE(mux_clk, "ti,mux-clock", of_mux_clk_setup); 241CLK_OF_DECLARE(mux_clk, "ti,mux-clock", of_mux_clk_setup);
207 242
243struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup)
244{
245 struct clk_mux *mux;
246 struct clk_omap_reg *reg;
247 int num_parents;
248
249 if (!setup)
250 return NULL;
251
252 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
253 if (!mux)
254 return ERR_PTR(-ENOMEM);
255
256 reg = (struct clk_omap_reg *)&mux->reg;
257
258 mux->shift = setup->bit_shift;
259
260 reg->index = setup->module;
261 reg->offset = setup->reg;
262
263 if (setup->flags & CLKF_INDEX_STARTS_AT_ONE)
264 mux->flags |= CLK_MUX_INDEX_ONE;
265
266 num_parents = setup->num_parents;
267
268 mux->mask = num_parents - 1;
269 mux->mask = (1 << fls(mux->mask)) - 1;
270
271 return &mux->hw;
272}
273
208static void __init of_ti_composite_mux_clk_setup(struct device_node *node) 274static void __init of_ti_composite_mux_clk_setup(struct device_node *node)
209{ 275{
210 struct clk_mux *mux; 276 struct clk_mux *mux;
diff --git a/drivers/clk/ux500/clk-prcc.c b/drivers/clk/ux500/clk-prcc.c
index bd4769a84485..0e950769ed03 100644
--- a/drivers/clk/ux500/clk-prcc.c
+++ b/drivers/clk/ux500/clk-prcc.c
@@ -8,7 +8,6 @@
8 */ 8 */
9 9
10#include <linux/clk-provider.h> 10#include <linux/clk-provider.h>
11#include <linux/clk-private.h>
12#include <linux/slab.h> 11#include <linux/slab.h>
13#include <linux/io.h> 12#include <linux/io.h>
14#include <linux/err.h> 13#include <linux/err.h>
diff --git a/drivers/clk/ux500/clk-prcmu.c b/drivers/clk/ux500/clk-prcmu.c
index e2d63bc47436..bf63c96acb1a 100644
--- a/drivers/clk/ux500/clk-prcmu.c
+++ b/drivers/clk/ux500/clk-prcmu.c
@@ -8,7 +8,6 @@
8 */ 8 */
9 9
10#include <linux/clk-provider.h> 10#include <linux/clk-provider.h>
11#include <linux/clk-private.h>
12#include <linux/mfd/dbx500-prcmu.h> 11#include <linux/mfd/dbx500-prcmu.h>
13#include <linux/slab.h> 12#include <linux/slab.h>
14#include <linux/io.h> 13#include <linux/io.h>
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index 9037bebd69f7..f870aad57711 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -303,6 +303,7 @@ static void __init zynq_clk_setup(struct device_node *np)
303 clks[cpu_2x] = clk_register_gate(NULL, clk_output_name[cpu_2x], 303 clks[cpu_2x] = clk_register_gate(NULL, clk_output_name[cpu_2x],
304 "cpu_2x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL, 304 "cpu_2x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL,
305 26, 0, &armclk_lock); 305 26, 0, &armclk_lock);
306 clk_prepare_enable(clks[cpu_2x]);
306 307
307 clk = clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1, 308 clk = clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1,
308 4 + 2 * tmp); 309 4 + 2 * tmp);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 1c2506f68122..68161f7a07d6 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -63,6 +63,11 @@ config VT8500_TIMER
63config CADENCE_TTC_TIMER 63config CADENCE_TTC_TIMER
64 bool 64 bool
65 65
66config ASM9260_TIMER
67 bool
68 select CLKSRC_MMIO
69 select CLKSRC_OF
70
66config CLKSRC_NOMADIK_MTU 71config CLKSRC_NOMADIK_MTU
67 bool 72 bool
68 depends on (ARCH_NOMADIK || ARCH_U8500) 73 depends on (ARCH_NOMADIK || ARCH_U8500)
@@ -245,15 +250,4 @@ config CLKSRC_PXA
245 help 250 help
246 This enables OST0 support available on PXA and SA-11x0 251 This enables OST0 support available on PXA and SA-11x0
247 platforms. 252 platforms.
248
249config ASM9260_TIMER
250 bool "Alphascale ASM9260 timer driver"
251 depends on GENERIC_CLOCKEVENTS
252 select CLKSRC_MMIO
253 select CLKSRC_OF
254 default y if MACH_ASM9260
255 help
256 This enables build of a clocksource and clockevent driver for
257 the 32-bit System Timer hardware available on a Alphascale ASM9260.
258
259endmenu 253endmenu
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
index 32a3d25795d3..68ab42356d0e 100644
--- a/drivers/clocksource/mtk_timer.c
+++ b/drivers/clocksource/mtk_timer.c
@@ -224,6 +224,8 @@ static void __init mtk_timer_init(struct device_node *node)
224 } 224 }
225 rate = clk_get_rate(clk); 225 rate = clk_get_rate(clk);
226 226
227 mtk_timer_global_reset(evt);
228
227 if (request_irq(evt->dev.irq, mtk_timer_interrupt, 229 if (request_irq(evt->dev.irq, mtk_timer_interrupt,
228 IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) { 230 IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
229 pr_warn("failed to setup irq %d\n", evt->dev.irq); 231 pr_warn("failed to setup irq %d\n", evt->dev.irq);
@@ -232,8 +234,6 @@ static void __init mtk_timer_init(struct device_node *node)
232 234
233 evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); 235 evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
234 236
235 mtk_timer_global_reset(evt);
236
237 /* Configure clock source */ 237 /* Configure clock source */
238 mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN); 238 mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
239 clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC), 239 clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
@@ -241,10 +241,11 @@ static void __init mtk_timer_init(struct device_node *node)
241 241
242 /* Configure clock event */ 242 /* Configure clock event */
243 mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT); 243 mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
244 mtk_timer_enable_irq(evt, GPT_CLK_EVT);
245
246 clockevents_config_and_register(&evt->dev, rate, 0x3, 244 clockevents_config_and_register(&evt->dev, rate, 0x3,
247 0xffffffff); 245 0xffffffff);
246
247 mtk_timer_enable_irq(evt, GPT_CLK_EVT);
248
248 return; 249 return;
249 250
250err_clk_disable: 251err_clk_disable:
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
index 941f3f344e08..d9438af2bbd6 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/pxa_timer.c
@@ -163,7 +163,7 @@ static struct irqaction pxa_ost0_irq = {
163 .dev_id = &ckevt_pxa_osmr0, 163 .dev_id = &ckevt_pxa_osmr0,
164}; 164};
165 165
166static void pxa_timer_common_init(int irq, unsigned long clock_tick_rate) 166static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
167{ 167{
168 timer_writel(0, OIER); 168 timer_writel(0, OIER);
169 timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR); 169 timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
diff --git a/drivers/connector/Kconfig b/drivers/connector/Kconfig
index 6e6730f9dfd1..3de5f3a9a104 100644
--- a/drivers/connector/Kconfig
+++ b/drivers/connector/Kconfig
@@ -12,7 +12,7 @@ menuconfig CONNECTOR
12if CONNECTOR 12if CONNECTOR
13 13
14config PROC_EVENTS 14config PROC_EVENTS
15 boolean "Report process events to userspace" 15 bool "Report process events to userspace"
16 depends on CONNECTOR=y 16 depends on CONNECTOR=y
17 default y 17 default y
18 ---help--- 18 ---help---
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 0f9a2c3c0e0d..1b06fc4640e2 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -26,13 +26,21 @@ config ARM_VEXPRESS_SPC_CPUFREQ
26 26
27 27
28config ARM_EXYNOS_CPUFREQ 28config ARM_EXYNOS_CPUFREQ
29 bool 29 tristate "SAMSUNG EXYNOS CPUfreq Driver"
30 depends on CPU_EXYNOS4210 || SOC_EXYNOS4212 || SOC_EXYNOS4412 || SOC_EXYNOS5250
31 depends on THERMAL
32 help
33 This adds the CPUFreq driver for Samsung EXYNOS platforms.
34 Supported SoC versions are:
35 Exynos4210, Exynos4212, Exynos4412, and Exynos5250.
36
37 If in doubt, say N.
30 38
31config ARM_EXYNOS4210_CPUFREQ 39config ARM_EXYNOS4210_CPUFREQ
32 bool "SAMSUNG EXYNOS4210" 40 bool "SAMSUNG EXYNOS4210"
33 depends on CPU_EXYNOS4210 41 depends on CPU_EXYNOS4210
42 depends on ARM_EXYNOS_CPUFREQ
34 default y 43 default y
35 select ARM_EXYNOS_CPUFREQ
36 help 44 help
37 This adds the CPUFreq driver for Samsung EXYNOS4210 45 This adds the CPUFreq driver for Samsung EXYNOS4210
38 SoC (S5PV310 or S5PC210). 46 SoC (S5PV310 or S5PC210).
@@ -42,8 +50,8 @@ config ARM_EXYNOS4210_CPUFREQ
42config ARM_EXYNOS4X12_CPUFREQ 50config ARM_EXYNOS4X12_CPUFREQ
43 bool "SAMSUNG EXYNOS4x12" 51 bool "SAMSUNG EXYNOS4x12"
44 depends on SOC_EXYNOS4212 || SOC_EXYNOS4412 52 depends on SOC_EXYNOS4212 || SOC_EXYNOS4412
53 depends on ARM_EXYNOS_CPUFREQ
45 default y 54 default y
46 select ARM_EXYNOS_CPUFREQ
47 help 55 help
48 This adds the CPUFreq driver for Samsung EXYNOS4X12 56 This adds the CPUFreq driver for Samsung EXYNOS4X12
49 SoC (EXYNOS4212 or EXYNOS4412). 57 SoC (EXYNOS4212 or EXYNOS4412).
@@ -53,28 +61,14 @@ config ARM_EXYNOS4X12_CPUFREQ
53config ARM_EXYNOS5250_CPUFREQ 61config ARM_EXYNOS5250_CPUFREQ
54 bool "SAMSUNG EXYNOS5250" 62 bool "SAMSUNG EXYNOS5250"
55 depends on SOC_EXYNOS5250 63 depends on SOC_EXYNOS5250
64 depends on ARM_EXYNOS_CPUFREQ
56 default y 65 default y
57 select ARM_EXYNOS_CPUFREQ
58 help 66 help
59 This adds the CPUFreq driver for Samsung EXYNOS5250 67 This adds the CPUFreq driver for Samsung EXYNOS5250
60 SoC. 68 SoC.
61 69
62 If in doubt, say N. 70 If in doubt, say N.
63 71
64config ARM_EXYNOS5440_CPUFREQ
65 bool "SAMSUNG EXYNOS5440"
66 depends on SOC_EXYNOS5440
67 depends on HAVE_CLK && OF
68 select PM_OPP
69 default y
70 help
71 This adds the CPUFreq driver for Samsung EXYNOS5440
72 SoC. The nature of exynos5440 clock controller is
73 different than previous exynos controllers so not using
74 the common exynos framework.
75
76 If in doubt, say N.
77
78config ARM_EXYNOS_CPU_FREQ_BOOST_SW 72config ARM_EXYNOS_CPU_FREQ_BOOST_SW
79 bool "EXYNOS Frequency Overclocking - Software" 73 bool "EXYNOS Frequency Overclocking - Software"
80 depends on ARM_EXYNOS_CPUFREQ && THERMAL 74 depends on ARM_EXYNOS_CPUFREQ && THERMAL
@@ -90,6 +84,20 @@ config ARM_EXYNOS_CPU_FREQ_BOOST_SW
90 84
91 If in doubt, say N. 85 If in doubt, say N.
92 86
87config ARM_EXYNOS5440_CPUFREQ
88 tristate "SAMSUNG EXYNOS5440"
89 depends on SOC_EXYNOS5440
90 depends on HAVE_CLK && OF
91 select PM_OPP
92 default y
93 help
94 This adds the CPUFreq driver for Samsung EXYNOS5440
95 SoC. The nature of exynos5440 clock controller is
96 different than previous exynos controllers so not using
97 the common exynos framework.
98
99 If in doubt, say N.
100
93config ARM_HIGHBANK_CPUFREQ 101config ARM_HIGHBANK_CPUFREQ
94 tristate "Calxeda Highbank-based" 102 tristate "Calxeda Highbank-based"
95 depends on ARCH_HIGHBANK && CPUFREQ_DT && REGULATOR 103 depends on ARCH_HIGHBANK && CPUFREQ_DT && REGULATOR
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index 72564b701b4a..7ea24413cee6 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -26,7 +26,7 @@ config CPU_FREQ_MAPLE
26config PPC_CORENET_CPUFREQ 26config PPC_CORENET_CPUFREQ
27 tristate "CPU frequency scaling driver for Freescale E500MC SoCs" 27 tristate "CPU frequency scaling driver for Freescale E500MC SoCs"
28 depends on PPC_E500MC && OF && COMMON_CLK 28 depends on PPC_E500MC && OF && COMMON_CLK
29 select CLK_PPC_CORENET 29 select CLK_QORIQ
30 help 30 help
31 This adds the CPUFreq driver support for Freescale e500mc, 31 This adds the CPUFreq driver support for Freescale e500mc,
32 e5500 and e6500 series SoCs which are capable of changing 32 e5500 and e6500 series SoCs which are capable of changing
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 8b4220ac888b..82a1821471fd 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -52,10 +52,11 @@ obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
52 52
53obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o 53obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
54obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o 54obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
55obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o 55obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += arm-exynos-cpufreq.o
56obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o 56arm-exynos-cpufreq-y := exynos-cpufreq.o
57obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o 57arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
58obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o 58arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o
59arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
59obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o 60obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
60obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o 61obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
61obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o 62obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index f99a0b0b7c06..5e98c6b1f284 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -18,10 +18,13 @@
18#include <linux/cpufreq.h> 18#include <linux/cpufreq.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/of.h> 20#include <linux/of.h>
21#include <linux/cpu_cooling.h>
22#include <linux/cpu.h>
21 23
22#include "exynos-cpufreq.h" 24#include "exynos-cpufreq.h"
23 25
24static struct exynos_dvfs_info *exynos_info; 26static struct exynos_dvfs_info *exynos_info;
27static struct thermal_cooling_device *cdev;
25static struct regulator *arm_regulator; 28static struct regulator *arm_regulator;
26static unsigned int locking_frequency; 29static unsigned int locking_frequency;
27 30
@@ -156,6 +159,7 @@ static struct cpufreq_driver exynos_driver = {
156 159
157static int exynos_cpufreq_probe(struct platform_device *pdev) 160static int exynos_cpufreq_probe(struct platform_device *pdev)
158{ 161{
162 struct device_node *cpus, *np;
159 int ret = -EINVAL; 163 int ret = -EINVAL;
160 164
161 exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL); 165 exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL);
@@ -198,9 +202,36 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
198 /* Done here as we want to capture boot frequency */ 202 /* Done here as we want to capture boot frequency */
199 locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000; 203 locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000;
200 204
201 if (!cpufreq_register_driver(&exynos_driver)) 205 ret = cpufreq_register_driver(&exynos_driver);
206 if (ret)
207 goto err_cpufreq_reg;
208
209 cpus = of_find_node_by_path("/cpus");
210 if (!cpus) {
211 pr_err("failed to find cpus node\n");
212 return 0;
213 }
214
215 np = of_get_next_child(cpus, NULL);
216 if (!np) {
217 pr_err("failed to find cpus child node\n");
218 of_node_put(cpus);
202 return 0; 219 return 0;
220 }
221
222 if (of_find_property(np, "#cooling-cells", NULL)) {
223 cdev = of_cpufreq_cooling_register(np,
224 cpu_present_mask);
225 if (IS_ERR(cdev))
226 pr_err("running cpufreq without cooling device: %ld\n",
227 PTR_ERR(cdev));
228 }
229 of_node_put(np);
230 of_node_put(cpus);
231
232 return 0;
203 233
234err_cpufreq_reg:
204 dev_err(&pdev->dev, "failed to register cpufreq driver\n"); 235 dev_err(&pdev->dev, "failed to register cpufreq driver\n");
205 regulator_put(arm_regulator); 236 regulator_put(arm_regulator);
206err_vdd_arm: 237err_vdd_arm:
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index 2fd53eaaec20..d6d425773fa4 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -263,7 +263,7 @@ out:
263} 263}
264 264
265#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE 265#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
266static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq) 266static void s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
267{ 267{
268 int count, v, i, found; 268 int count, v, i, found;
269 struct cpufreq_frequency_table *pos; 269 struct cpufreq_frequency_table *pos;
@@ -333,7 +333,7 @@ static struct notifier_block s3c2416_cpufreq_reboot_notifier = {
333 .notifier_call = s3c2416_cpufreq_reboot_notifier_evt, 333 .notifier_call = s3c2416_cpufreq_reboot_notifier_evt,
334}; 334};
335 335
336static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) 336static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
337{ 337{
338 struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; 338 struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
339 struct cpufreq_frequency_table *pos; 339 struct cpufreq_frequency_table *pos;
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index d00f1cee4509..733aa5153e74 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -144,11 +144,6 @@ static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg)
144 (cfg->info->set_fvco)(cfg); 144 (cfg->info->set_fvco)(cfg);
145} 145}
146 146
147static inline void s3c_cpufreq_resume_clocks(void)
148{
149 cpu_cur.info->resume_clocks();
150}
151
152static inline void s3c_cpufreq_updateclk(struct clk *clk, 147static inline void s3c_cpufreq_updateclk(struct clk *clk,
153 unsigned int freq) 148 unsigned int freq)
154{ 149{
@@ -417,9 +412,6 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
417 412
418 last_target = ~0; /* invalidate last_target setting */ 413 last_target = ~0; /* invalidate last_target setting */
419 414
420 /* first, find out what speed we resumed at. */
421 s3c_cpufreq_resume_clocks();
422
423 /* whilst we will be called later on, we try and re-set the 415 /* whilst we will be called later on, we try and re-set the
424 * cpu frequencies as soon as possible so that we do not end 416 * cpu frequencies as soon as possible so that we do not end
425 * up resuming devices and then immediately having to re-set 417 * up resuming devices and then immediately having to re-set
@@ -454,7 +446,7 @@ static struct cpufreq_driver s3c24xx_driver = {
454}; 446};
455 447
456 448
457int __init s3c_cpufreq_register(struct s3c_cpufreq_info *info) 449int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
458{ 450{
459 if (!info || !info->name) { 451 if (!info || !info->name) {
460 printk(KERN_ERR "%s: failed to pass valid information\n", 452 printk(KERN_ERR "%s: failed to pass valid information\n",
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index aedec0957934..59372077ec7c 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -13,6 +13,7 @@
13#include <linux/notifier.h> 13#include <linux/notifier.h>
14#include <linux/clockchips.h> 14#include <linux/clockchips.h>
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/slab.h>
16 17
17#include <asm/machdep.h> 18#include <asm/machdep.h>
18#include <asm/firmware.h> 19#include <asm/firmware.h>
@@ -158,70 +159,83 @@ static int powernv_add_idle_states(void)
158 struct device_node *power_mgt; 159 struct device_node *power_mgt;
159 int nr_idle_states = 1; /* Snooze */ 160 int nr_idle_states = 1; /* Snooze */
160 int dt_idle_states; 161 int dt_idle_states;
161 const __be32 *idle_state_flags; 162 u32 *latency_ns, *residency_ns, *flags;
162 const __be32 *idle_state_latency; 163 int i, rc;
163 u32 len_flags, flags, latency_ns;
164 int i;
165 164
166 /* Currently we have snooze statically defined */ 165 /* Currently we have snooze statically defined */
167 166
168 power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); 167 power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
169 if (!power_mgt) { 168 if (!power_mgt) {
170 pr_warn("opal: PowerMgmt Node not found\n"); 169 pr_warn("opal: PowerMgmt Node not found\n");
171 return nr_idle_states; 170 goto out;
172 } 171 }
173 172
174 idle_state_flags = of_get_property(power_mgt, "ibm,cpu-idle-state-flags", &len_flags); 173 /* Read values of any property to determine the num of idle states */
175 if (!idle_state_flags) { 174 dt_idle_states = of_property_count_u32_elems(power_mgt, "ibm,cpu-idle-state-flags");
176 pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n"); 175 if (dt_idle_states < 0) {
177 return nr_idle_states; 176 pr_warn("cpuidle-powernv: no idle states found in the DT\n");
177 goto out;
178 } 178 }
179 179
180 idle_state_latency = of_get_property(power_mgt, 180 flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
181 "ibm,cpu-idle-state-latencies-ns", NULL); 181 if (of_property_read_u32_array(power_mgt,
182 if (!idle_state_latency) { 182 "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
183 pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-latencies-ns\n"); 183 pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n");
184 return nr_idle_states; 184 goto out_free_flags;
185 } 185 }
186 186
187 dt_idle_states = len_flags / sizeof(u32); 187 latency_ns = kzalloc(sizeof(*latency_ns) * dt_idle_states, GFP_KERNEL);
188 rc = of_property_read_u32_array(power_mgt,
189 "ibm,cpu-idle-state-latencies-ns", latency_ns, dt_idle_states);
190 if (rc) {
191 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
192 goto out_free_latency;
193 }
188 194
189 for (i = 0; i < dt_idle_states; i++) { 195 residency_ns = kzalloc(sizeof(*residency_ns) * dt_idle_states, GFP_KERNEL);
196 rc = of_property_read_u32_array(power_mgt,
197 "ibm,cpu-idle-state-residency-ns", residency_ns, dt_idle_states);
190 198
191 flags = be32_to_cpu(idle_state_flags[i]); 199 for (i = 0; i < dt_idle_states; i++) {
192 200
193 /* Cpuidle accepts exit_latency in us and we estimate 201 /*
194 * target residency to be 10x exit_latency 202 * Cpuidle accepts exit_latency and target_residency in us.
203 * Use default target_residency values if f/w does not expose it.
195 */ 204 */
196 latency_ns = be32_to_cpu(idle_state_latency[i]); 205 if (flags[i] & OPAL_PM_NAP_ENABLED) {
197 if (flags & OPAL_PM_NAP_ENABLED) {
198 /* Add NAP state */ 206 /* Add NAP state */
199 strcpy(powernv_states[nr_idle_states].name, "Nap"); 207 strcpy(powernv_states[nr_idle_states].name, "Nap");
200 strcpy(powernv_states[nr_idle_states].desc, "Nap"); 208 strcpy(powernv_states[nr_idle_states].desc, "Nap");
201 powernv_states[nr_idle_states].flags = 0; 209 powernv_states[nr_idle_states].flags = 0;
202 powernv_states[nr_idle_states].exit_latency = 210 powernv_states[nr_idle_states].target_residency = 100;
203 ((unsigned int)latency_ns) / 1000;
204 powernv_states[nr_idle_states].target_residency =
205 ((unsigned int)latency_ns / 100);
206 powernv_states[nr_idle_states].enter = &nap_loop; 211 powernv_states[nr_idle_states].enter = &nap_loop;
207 nr_idle_states++; 212 } else if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
208 } 213 flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
209
210 if (flags & OPAL_PM_SLEEP_ENABLED ||
211 flags & OPAL_PM_SLEEP_ENABLED_ER1) {
212 /* Add FASTSLEEP state */ 214 /* Add FASTSLEEP state */
213 strcpy(powernv_states[nr_idle_states].name, "FastSleep"); 215 strcpy(powernv_states[nr_idle_states].name, "FastSleep");
214 strcpy(powernv_states[nr_idle_states].desc, "FastSleep"); 216 strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
215 powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP; 217 powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP;
216 powernv_states[nr_idle_states].exit_latency = 218 powernv_states[nr_idle_states].target_residency = 300000;
217 ((unsigned int)latency_ns) / 1000;
218 powernv_states[nr_idle_states].target_residency =
219 ((unsigned int)latency_ns / 100);
220 powernv_states[nr_idle_states].enter = &fastsleep_loop; 219 powernv_states[nr_idle_states].enter = &fastsleep_loop;
221 nr_idle_states++;
222 } 220 }
221
222 powernv_states[nr_idle_states].exit_latency =
223 ((unsigned int)latency_ns[i]) / 1000;
224
225 if (!rc) {
226 powernv_states[nr_idle_states].target_residency =
227 ((unsigned int)residency_ns[i]) / 1000;
228 }
229
230 nr_idle_states++;
223 } 231 }
224 232
233 kfree(residency_ns);
234out_free_latency:
235 kfree(latency_ns);
236out_free_flags:
237 kfree(flags);
238out:
225 return nr_idle_states; 239 return nr_idle_states;
226} 240}
227 241
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index d594ae962ed2..fded0a5cfcd7 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -606,12 +606,12 @@ static void cryp_dma_done(struct cryp_ctx *ctx)
606 dev_dbg(ctx->device->dev, "[%s]: ", __func__); 606 dev_dbg(ctx->device->dev, "[%s]: ", __func__);
607 607
608 chan = ctx->device->dma.chan_mem2cryp; 608 chan = ctx->device->dma.chan_mem2cryp;
609 dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); 609 dmaengine_terminate_all(chan);
610 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src, 610 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
611 ctx->device->dma.sg_src_len, DMA_TO_DEVICE); 611 ctx->device->dma.sg_src_len, DMA_TO_DEVICE);
612 612
613 chan = ctx->device->dma.chan_cryp2mem; 613 chan = ctx->device->dma.chan_cryp2mem;
614 dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); 614 dmaengine_terminate_all(chan);
615 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst, 615 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
616 ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE); 616 ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE);
617} 617}
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 70a20871e998..187a8fd7eee7 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -202,7 +202,7 @@ static void hash_dma_done(struct hash_ctx *ctx)
202 struct dma_chan *chan; 202 struct dma_chan *chan;
203 203
204 chan = ctx->device->dma.chan_mem2hash; 204 chan = ctx->device->dma.chan_mem2hash;
205 dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); 205 dmaengine_terminate_all(chan);
206 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, 206 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
207 ctx->device->dma.sg_len, DMA_TO_DEVICE); 207 ctx->device->dma.sg_len, DMA_TO_DEVICE);
208} 208}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index faf30a4e642b..a874b6ec6650 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -416,6 +416,15 @@ config NBPFAXI_DMA
416 help 416 help
417 Support for "Type-AXI" NBPF DMA IPs from Renesas 417 Support for "Type-AXI" NBPF DMA IPs from Renesas
418 418
419config IMG_MDC_DMA
420 tristate "IMG MDC support"
421 depends on MIPS || COMPILE_TEST
422 depends on MFD_SYSCON
423 select DMA_ENGINE
424 select DMA_VIRTUAL_CHANNELS
425 help
426 Enable support for the IMG multi-threaded DMA controller (MDC).
427
419config DMA_ENGINE 428config DMA_ENGINE
420 bool 429 bool
421 430
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 2022b5451377..f915f61ec574 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -19,7 +19,7 @@ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
19obj-$(CONFIG_AT_XDMAC) += at_xdmac.o 19obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
20obj-$(CONFIG_MX3_IPU) += ipu/ 20obj-$(CONFIG_MX3_IPU) += ipu/
21obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o 21obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
22obj-$(CONFIG_SH_DMAE_BASE) += sh/ 22obj-$(CONFIG_RENESAS_DMA) += sh/
23obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o 23obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
24obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ 24obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
25obj-$(CONFIG_IMX_SDMA) += imx-sdma.o 25obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
@@ -50,3 +50,4 @@ obj-y += xilinx/
50obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o 50obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
51obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o 51obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
52obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o 52obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
53obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 1364d00881dd..4a5fd245014e 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1386,32 +1386,6 @@ static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
1386 return pl08x_cctl(cctl); 1386 return pl08x_cctl(cctl);
1387} 1387}
1388 1388
1389static int dma_set_runtime_config(struct dma_chan *chan,
1390 struct dma_slave_config *config)
1391{
1392 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1393 struct pl08x_driver_data *pl08x = plchan->host;
1394
1395 if (!plchan->slave)
1396 return -EINVAL;
1397
1398 /* Reject definitely invalid configurations */
1399 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1400 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1401 return -EINVAL;
1402
1403 if (config->device_fc && pl08x->vd->pl080s) {
1404 dev_err(&pl08x->adev->dev,
1405 "%s: PL080S does not support peripheral flow control\n",
1406 __func__);
1407 return -EINVAL;
1408 }
1409
1410 plchan->cfg = *config;
1411
1412 return 0;
1413}
1414
1415/* 1389/*
1416 * Slave transactions callback to the slave device to allow 1390 * Slave transactions callback to the slave device to allow
1417 * synchronization of slave DMA signals with the DMAC enable 1391 * synchronization of slave DMA signals with the DMAC enable
@@ -1693,20 +1667,71 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
1693 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1667 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
1694} 1668}
1695 1669
1696static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1670static int pl08x_config(struct dma_chan *chan,
1697 unsigned long arg) 1671 struct dma_slave_config *config)
1672{
1673 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1674 struct pl08x_driver_data *pl08x = plchan->host;
1675
1676 if (!plchan->slave)
1677 return -EINVAL;
1678
1679 /* Reject definitely invalid configurations */
1680 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1681 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1682 return -EINVAL;
1683
1684 if (config->device_fc && pl08x->vd->pl080s) {
1685 dev_err(&pl08x->adev->dev,
1686 "%s: PL080S does not support peripheral flow control\n",
1687 __func__);
1688 return -EINVAL;
1689 }
1690
1691 plchan->cfg = *config;
1692
1693 return 0;
1694}
1695
1696static int pl08x_terminate_all(struct dma_chan *chan)
1698{ 1697{
1699 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1698 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1700 struct pl08x_driver_data *pl08x = plchan->host; 1699 struct pl08x_driver_data *pl08x = plchan->host;
1701 unsigned long flags; 1700 unsigned long flags;
1702 int ret = 0;
1703 1701
1704 /* Controls applicable to inactive channels */ 1702 spin_lock_irqsave(&plchan->vc.lock, flags);
1705 if (cmd == DMA_SLAVE_CONFIG) { 1703 if (!plchan->phychan && !plchan->at) {
1706 return dma_set_runtime_config(chan, 1704 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1707 (struct dma_slave_config *)arg); 1705 return 0;
1708 } 1706 }
1709 1707
1708 plchan->state = PL08X_CHAN_IDLE;
1709
1710 if (plchan->phychan) {
1711 /*
1712 * Mark physical channel as free and free any slave
1713 * signal
1714 */
1715 pl08x_phy_free(plchan);
1716 }
1717 /* Dequeue jobs and free LLIs */
1718 if (plchan->at) {
1719 pl08x_desc_free(&plchan->at->vd);
1720 plchan->at = NULL;
1721 }
1722 /* Dequeue jobs not yet fired as well */
1723 pl08x_free_txd_list(pl08x, plchan);
1724
1725 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1726
1727 return 0;
1728}
1729
1730static int pl08x_pause(struct dma_chan *chan)
1731{
1732 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1733 unsigned long flags;
1734
1710 /* 1735 /*
1711 * Anything succeeds on channels with no physical allocation and 1736 * Anything succeeds on channels with no physical allocation and
1712 * no queued transfers. 1737 * no queued transfers.
@@ -1717,42 +1742,35 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1717 return 0; 1742 return 0;
1718 } 1743 }
1719 1744
1720 switch (cmd) { 1745 pl08x_pause_phy_chan(plchan->phychan);
1721 case DMA_TERMINATE_ALL: 1746 plchan->state = PL08X_CHAN_PAUSED;
1722 plchan->state = PL08X_CHAN_IDLE;
1723 1747
1724 if (plchan->phychan) { 1748 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1725 /* 1749
1726 * Mark physical channel as free and free any slave 1750 return 0;
1727 * signal 1751}
1728 */ 1752
1729 pl08x_phy_free(plchan); 1753static int pl08x_resume(struct dma_chan *chan)
1730 } 1754{
1731 /* Dequeue jobs and free LLIs */ 1755 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1732 if (plchan->at) { 1756 unsigned long flags;
1733 pl08x_desc_free(&plchan->at->vd); 1757
1734 plchan->at = NULL; 1758 /*
1735 } 1759 * Anything succeeds on channels with no physical allocation and
1736 /* Dequeue jobs not yet fired as well */ 1760 * no queued transfers.
1737 pl08x_free_txd_list(pl08x, plchan); 1761 */
1738 break; 1762 spin_lock_irqsave(&plchan->vc.lock, flags);
1739 case DMA_PAUSE: 1763 if (!plchan->phychan && !plchan->at) {
1740 pl08x_pause_phy_chan(plchan->phychan); 1764 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1741 plchan->state = PL08X_CHAN_PAUSED; 1765 return 0;
1742 break;
1743 case DMA_RESUME:
1744 pl08x_resume_phy_chan(plchan->phychan);
1745 plchan->state = PL08X_CHAN_RUNNING;
1746 break;
1747 default:
1748 /* Unknown command */
1749 ret = -ENXIO;
1750 break;
1751 } 1766 }
1752 1767
1768 pl08x_resume_phy_chan(plchan->phychan);
1769 plchan->state = PL08X_CHAN_RUNNING;
1770
1753 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1771 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1754 1772
1755 return ret; 1773 return 0;
1756} 1774}
1757 1775
1758bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1776bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
@@ -2048,7 +2066,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2048 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 2066 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
2049 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; 2067 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
2050 pl08x->memcpy.device_issue_pending = pl08x_issue_pending; 2068 pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
2051 pl08x->memcpy.device_control = pl08x_control; 2069 pl08x->memcpy.device_config = pl08x_config;
2070 pl08x->memcpy.device_pause = pl08x_pause;
2071 pl08x->memcpy.device_resume = pl08x_resume;
2072 pl08x->memcpy.device_terminate_all = pl08x_terminate_all;
2052 2073
2053 /* Initialize slave engine */ 2074 /* Initialize slave engine */
2054 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 2075 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
@@ -2061,7 +2082,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2061 pl08x->slave.device_issue_pending = pl08x_issue_pending; 2082 pl08x->slave.device_issue_pending = pl08x_issue_pending;
2062 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; 2083 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
2063 pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic; 2084 pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
2064 pl08x->slave.device_control = pl08x_control; 2085 pl08x->slave.device_config = pl08x_config;
2086 pl08x->slave.device_pause = pl08x_pause;
2087 pl08x->slave.device_resume = pl08x_resume;
2088 pl08x->slave.device_terminate_all = pl08x_terminate_all;
2065 2089
2066 /* Get the platform data */ 2090 /* Get the platform data */
2067 pl08x->pd = dev_get_platdata(&adev->dev); 2091 pl08x->pd = dev_get_platdata(&adev->dev);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index ca9dd2613283..1e1a4c567542 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -42,6 +42,11 @@
42#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) 42#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
43#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ 43#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
44 |ATC_DIF(AT_DMA_MEM_IF)) 44 |ATC_DIF(AT_DMA_MEM_IF))
45#define ATC_DMA_BUSWIDTHS\
46 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
47 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
48 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
45 50
46/* 51/*
47 * Initial number of descriptors to allocate for each channel. This could 52 * Initial number of descriptors to allocate for each channel. This could
@@ -972,11 +977,13 @@ err_out:
972 return NULL; 977 return NULL;
973} 978}
974 979
975static int set_runtime_config(struct dma_chan *chan, 980static int atc_config(struct dma_chan *chan,
976 struct dma_slave_config *sconfig) 981 struct dma_slave_config *sconfig)
977{ 982{
978 struct at_dma_chan *atchan = to_at_dma_chan(chan); 983 struct at_dma_chan *atchan = to_at_dma_chan(chan);
979 984
985 dev_vdbg(chan2dev(chan), "%s\n", __func__);
986
980 /* Check if it is chan is configured for slave transfers */ 987 /* Check if it is chan is configured for slave transfers */
981 if (!chan->private) 988 if (!chan->private)
982 return -EINVAL; 989 return -EINVAL;
@@ -989,9 +996,28 @@ static int set_runtime_config(struct dma_chan *chan,
989 return 0; 996 return 0;
990} 997}
991 998
999static int atc_pause(struct dma_chan *chan)
1000{
1001 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1002 struct at_dma *atdma = to_at_dma(chan->device);
1003 int chan_id = atchan->chan_common.chan_id;
1004 unsigned long flags;
992 1005
993static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1006 LIST_HEAD(list);
994 unsigned long arg) 1007
1008 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1009
1010 spin_lock_irqsave(&atchan->lock, flags);
1011
1012 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1013 set_bit(ATC_IS_PAUSED, &atchan->status);
1014
1015 spin_unlock_irqrestore(&atchan->lock, flags);
1016
1017 return 0;
1018}
1019
1020static int atc_resume(struct dma_chan *chan)
995{ 1021{
996 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1022 struct at_dma_chan *atchan = to_at_dma_chan(chan);
997 struct at_dma *atdma = to_at_dma(chan->device); 1023 struct at_dma *atdma = to_at_dma(chan->device);
@@ -1000,60 +1026,61 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1000 1026
1001 LIST_HEAD(list); 1027 LIST_HEAD(list);
1002 1028
1003 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); 1029 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1004 1030
1005 if (cmd == DMA_PAUSE) { 1031 if (!atc_chan_is_paused(atchan))
1006 spin_lock_irqsave(&atchan->lock, flags); 1032 return 0;
1007 1033
1008 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); 1034 spin_lock_irqsave(&atchan->lock, flags);
1009 set_bit(ATC_IS_PAUSED, &atchan->status);
1010 1035
1011 spin_unlock_irqrestore(&atchan->lock, flags); 1036 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1012 } else if (cmd == DMA_RESUME) { 1037 clear_bit(ATC_IS_PAUSED, &atchan->status);
1013 if (!atc_chan_is_paused(atchan))
1014 return 0;
1015 1038
1016 spin_lock_irqsave(&atchan->lock, flags); 1039 spin_unlock_irqrestore(&atchan->lock, flags);
1017 1040
1018 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); 1041 return 0;
1019 clear_bit(ATC_IS_PAUSED, &atchan->status); 1042}
1020 1043
1021 spin_unlock_irqrestore(&atchan->lock, flags); 1044static int atc_terminate_all(struct dma_chan *chan)
1022 } else if (cmd == DMA_TERMINATE_ALL) { 1045{
1023 struct at_desc *desc, *_desc; 1046 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1024 /* 1047 struct at_dma *atdma = to_at_dma(chan->device);
1025 * This is only called when something went wrong elsewhere, so 1048 int chan_id = atchan->chan_common.chan_id;
1026 * we don't really care about the data. Just disable the 1049 struct at_desc *desc, *_desc;
1027 * channel. We still have to poll the channel enable bit due 1050 unsigned long flags;
1028 * to AHB/HSB limitations.
1029 */
1030 spin_lock_irqsave(&atchan->lock, flags);
1031 1051
1032 /* disabling channel: must also remove suspend state */ 1052 LIST_HEAD(list);
1033 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1034 1053
1035 /* confirm that this channel is disabled */ 1054 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1036 while (dma_readl(atdma, CHSR) & atchan->mask)
1037 cpu_relax();
1038 1055
1039 /* active_list entries will end up before queued entries */ 1056 /*
1040 list_splice_init(&atchan->queue, &list); 1057 * This is only called when something went wrong elsewhere, so
1041 list_splice_init(&atchan->active_list, &list); 1058 * we don't really care about the data. Just disable the
1059 * channel. We still have to poll the channel enable bit due
1060 * to AHB/HSB limitations.
1061 */
1062 spin_lock_irqsave(&atchan->lock, flags);
1042 1063
1043 /* Flush all pending and queued descriptors */ 1064 /* disabling channel: must also remove suspend state */
1044 list_for_each_entry_safe(desc, _desc, &list, desc_node) 1065 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1045 atc_chain_complete(atchan, desc);
1046 1066
1047 clear_bit(ATC_IS_PAUSED, &atchan->status); 1067 /* confirm that this channel is disabled */
1048 /* if channel dedicated to cyclic operations, free it */ 1068 while (dma_readl(atdma, CHSR) & atchan->mask)
1049 clear_bit(ATC_IS_CYCLIC, &atchan->status); 1069 cpu_relax();
1050 1070
1051 spin_unlock_irqrestore(&atchan->lock, flags); 1071 /* active_list entries will end up before queued entries */
1052 } else if (cmd == DMA_SLAVE_CONFIG) { 1072 list_splice_init(&atchan->queue, &list);
1053 return set_runtime_config(chan, (struct dma_slave_config *)arg); 1073 list_splice_init(&atchan->active_list, &list);
1054 } else { 1074
1055 return -ENXIO; 1075 /* Flush all pending and queued descriptors */
1056 } 1076 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1077 atc_chain_complete(atchan, desc);
1078
1079 clear_bit(ATC_IS_PAUSED, &atchan->status);
1080 /* if channel dedicated to cyclic operations, free it */
1081 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1082
1083 spin_unlock_irqrestore(&atchan->lock, flags);
1057 1084
1058 return 0; 1085 return 0;
1059} 1086}
@@ -1505,7 +1532,14 @@ static int __init at_dma_probe(struct platform_device *pdev)
1505 /* controller can do slave DMA: can trigger cyclic transfers */ 1532 /* controller can do slave DMA: can trigger cyclic transfers */
1506 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); 1533 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1507 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; 1534 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1508 atdma->dma_common.device_control = atc_control; 1535 atdma->dma_common.device_config = atc_config;
1536 atdma->dma_common.device_pause = atc_pause;
1537 atdma->dma_common.device_resume = atc_resume;
1538 atdma->dma_common.device_terminate_all = atc_terminate_all;
1539 atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
1540 atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
1541 atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1542 atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1509 } 1543 }
1510 1544
1511 dma_writel(atdma, EN, AT_DMA_ENABLE); 1545 dma_writel(atdma, EN, AT_DMA_ENABLE);
@@ -1622,7 +1656,7 @@ static void atc_suspend_cyclic(struct at_dma_chan *atchan)
1622 if (!atc_chan_is_paused(atchan)) { 1656 if (!atc_chan_is_paused(atchan)) {
1623 dev_warn(chan2dev(chan), 1657 dev_warn(chan2dev(chan),
1624 "cyclic channel not paused, should be done by channel user\n"); 1658 "cyclic channel not paused, should be done by channel user\n");
1625 atc_control(chan, DMA_PAUSE, 0); 1659 atc_pause(chan);
1626 } 1660 }
1627 1661
1628 /* now preserve additional data for cyclic operations */ 1662 /* now preserve additional data for cyclic operations */
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 2787aba60c6b..d6bba6c636c2 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -232,7 +232,8 @@ enum atc_status {
232 * @save_dscr: for cyclic operations, preserve next descriptor address in 232 * @save_dscr: for cyclic operations, preserve next descriptor address in
233 * the cyclic list on suspend/resume cycle 233 * the cyclic list on suspend/resume cycle
234 * @remain_desc: to save remain desc length 234 * @remain_desc: to save remain desc length
235 * @dma_sconfig: configuration for slave transfers, passed via DMA_SLAVE_CONFIG 235 * @dma_sconfig: configuration for slave transfers, passed via
236 * .device_config
236 * @lock: serializes enqueue/dequeue operations to descriptors lists 237 * @lock: serializes enqueue/dequeue operations to descriptors lists
237 * @active_list: list of descriptors dmaengine is being running on 238 * @active_list: list of descriptors dmaengine is being running on
238 * @queue: list of descriptors ready to be submitted to engine 239 * @queue: list of descriptors ready to be submitted to engine
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b60d77a22df6..09e2825a547a 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -25,6 +25,7 @@
25#include <linux/dmapool.h> 25#include <linux/dmapool.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/irq.h> 27#include <linux/irq.h>
28#include <linux/kernel.h>
28#include <linux/list.h> 29#include <linux/list.h>
29#include <linux/module.h> 30#include <linux/module.h>
30#include <linux/of_dma.h> 31#include <linux/of_dma.h>
@@ -174,6 +175,13 @@
174 175
175#define AT_XDMAC_MAX_CHAN 0x20 176#define AT_XDMAC_MAX_CHAN 0x20
176 177
178#define AT_XDMAC_DMA_BUSWIDTHS\
179 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
180 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
181 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
182 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
183 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
184
177enum atc_status { 185enum atc_status {
178 AT_XDMAC_CHAN_IS_CYCLIC = 0, 186 AT_XDMAC_CHAN_IS_CYCLIC = 0,
179 AT_XDMAC_CHAN_IS_PAUSED, 187 AT_XDMAC_CHAN_IS_PAUSED,
@@ -184,15 +192,15 @@ struct at_xdmac_chan {
184 struct dma_chan chan; 192 struct dma_chan chan;
185 void __iomem *ch_regs; 193 void __iomem *ch_regs;
186 u32 mask; /* Channel Mask */ 194 u32 mask; /* Channel Mask */
187 u32 cfg[3]; /* Channel Configuration Register */ 195 u32 cfg[2]; /* Channel Configuration Register */
188 #define AT_XDMAC_CUR_CFG 0 /* Current channel conf */ 196 #define AT_XDMAC_DEV_TO_MEM_CFG 0 /* Predifined dev to mem channel conf */
189 #define AT_XDMAC_DEV_TO_MEM_CFG 1 /* Predifined dev to mem channel conf */ 197 #define AT_XDMAC_MEM_TO_DEV_CFG 1 /* Predifined mem to dev channel conf */
190 #define AT_XDMAC_MEM_TO_DEV_CFG 2 /* Predifined mem to dev channel conf */
191 u8 perid; /* Peripheral ID */ 198 u8 perid; /* Peripheral ID */
192 u8 perif; /* Peripheral Interface */ 199 u8 perif; /* Peripheral Interface */
193 u8 memif; /* Memory Interface */ 200 u8 memif; /* Memory Interface */
194 u32 per_src_addr; 201 u32 per_src_addr;
195 u32 per_dst_addr; 202 u32 per_dst_addr;
203 u32 save_cc;
196 u32 save_cim; 204 u32 save_cim;
197 u32 save_cnda; 205 u32 save_cnda;
198 u32 save_cndc; 206 u32 save_cndc;
@@ -344,20 +352,13 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
344 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg); 352 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
345 353
346 /* 354 /*
347 * When doing memory to memory transfer we need to use the next 355 * When doing non cyclic transfer we need to use the next
348 * descriptor view 2 since some fields of the configuration register 356 * descriptor view 2 since some fields of the configuration register
349 * depend on transfer size and src/dest addresses. 357 * depend on transfer size and src/dest addresses.
350 */ 358 */
351 if (is_slave_direction(first->direction)) { 359 if (at_xdmac_chan_is_cyclic(atchan)) {
352 reg = AT_XDMAC_CNDC_NDVIEW_NDV1; 360 reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
353 if (first->direction == DMA_MEM_TO_DEV) 361 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
354 atchan->cfg[AT_XDMAC_CUR_CFG] =
355 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
356 else
357 atchan->cfg[AT_XDMAC_CUR_CFG] =
358 atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
359 at_xdmac_chan_write(atchan, AT_XDMAC_CC,
360 atchan->cfg[AT_XDMAC_CUR_CFG]);
361 } else { 362 } else {
362 /* 363 /*
363 * No need to write AT_XDMAC_CC reg, it will be done when the 364 * No need to write AT_XDMAC_CC reg, it will be done when the
@@ -561,7 +562,6 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
561 struct at_xdmac_desc *first = NULL, *prev = NULL; 562 struct at_xdmac_desc *first = NULL, *prev = NULL;
562 struct scatterlist *sg; 563 struct scatterlist *sg;
563 int i; 564 int i;
564 u32 cfg;
565 unsigned int xfer_size = 0; 565 unsigned int xfer_size = 0;
566 566
567 if (!sgl) 567 if (!sgl)
@@ -583,7 +583,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
583 /* Prepare descriptors. */ 583 /* Prepare descriptors. */
584 for_each_sg(sgl, sg, sg_len, i) { 584 for_each_sg(sgl, sg, sg_len, i) {
585 struct at_xdmac_desc *desc = NULL; 585 struct at_xdmac_desc *desc = NULL;
586 u32 len, mem; 586 u32 len, mem, dwidth, fixed_dwidth;
587 587
588 len = sg_dma_len(sg); 588 len = sg_dma_len(sg);
589 mem = sg_dma_address(sg); 589 mem = sg_dma_address(sg);
@@ -608,17 +608,21 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
608 if (direction == DMA_DEV_TO_MEM) { 608 if (direction == DMA_DEV_TO_MEM) {
609 desc->lld.mbr_sa = atchan->per_src_addr; 609 desc->lld.mbr_sa = atchan->per_src_addr;
610 desc->lld.mbr_da = mem; 610 desc->lld.mbr_da = mem;
611 cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; 611 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
612 } else { 612 } else {
613 desc->lld.mbr_sa = mem; 613 desc->lld.mbr_sa = mem;
614 desc->lld.mbr_da = atchan->per_dst_addr; 614 desc->lld.mbr_da = atchan->per_dst_addr;
615 cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; 615 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
616 } 616 }
617 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 /* next descriptor view */ 617 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
618 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ 618 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
619 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ 619 ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
620 | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE) /* descriptor fetch */ 620 : AT_XDMAC_CC_DWIDTH_BYTE;
621 | len / (1 << at_xdmac_get_dwidth(cfg)); /* microblock length */ 621 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
622 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
623 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
624 | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE) /* descriptor fetch */
625 | (len >> fixed_dwidth); /* microblock length */
622 dev_dbg(chan2dev(chan), 626 dev_dbg(chan2dev(chan),
623 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", 627 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
624 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); 628 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
@@ -882,7 +886,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
882 enum dma_status ret; 886 enum dma_status ret;
883 int residue; 887 int residue;
884 u32 cur_nda, mask, value; 888 u32 cur_nda, mask, value;
885 u8 dwidth = at_xdmac_get_dwidth(atchan->cfg[AT_XDMAC_CUR_CFG]); 889 u8 dwidth = 0;
886 890
887 ret = dma_cookie_status(chan, cookie, txstate); 891 ret = dma_cookie_status(chan, cookie, txstate);
888 if (ret == DMA_COMPLETE) 892 if (ret == DMA_COMPLETE)
@@ -912,7 +916,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
912 */ 916 */
913 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; 917 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
914 value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM; 918 value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
915 if ((atchan->cfg[AT_XDMAC_CUR_CFG] & mask) == value) { 919 if ((desc->lld.mbr_cfg & mask) == value) {
916 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); 920 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
917 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS)) 921 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
918 cpu_relax(); 922 cpu_relax();
@@ -926,6 +930,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
926 */ 930 */
927 descs_list = &desc->descs_list; 931 descs_list = &desc->descs_list;
928 list_for_each_entry_safe(desc, _desc, descs_list, desc_node) { 932 list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
933 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
929 residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth; 934 residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
930 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) 935 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
931 break; 936 break;
@@ -1107,58 +1112,80 @@ static void at_xdmac_issue_pending(struct dma_chan *chan)
1107 return; 1112 return;
1108} 1113}
1109 1114
1110static int at_xdmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1115static int at_xdmac_device_config(struct dma_chan *chan,
1111 unsigned long arg) 1116 struct dma_slave_config *config)
1117{
1118 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1119 int ret;
1120
1121 dev_dbg(chan2dev(chan), "%s\n", __func__);
1122
1123 spin_lock_bh(&atchan->lock);
1124 ret = at_xdmac_set_slave_config(chan, config);
1125 spin_unlock_bh(&atchan->lock);
1126
1127 return ret;
1128}
1129
1130static int at_xdmac_device_pause(struct dma_chan *chan)
1112{ 1131{
1113 struct at_xdmac_desc *desc, *_desc;
1114 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1132 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1115 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); 1133 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1116 int ret = 0;
1117 1134
1118 dev_dbg(chan2dev(chan), "%s: cmd=%d\n", __func__, cmd); 1135 dev_dbg(chan2dev(chan), "%s\n", __func__);
1136
1137 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
1138 return 0;
1119 1139
1120 spin_lock_bh(&atchan->lock); 1140 spin_lock_bh(&atchan->lock);
1141 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
1142 while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
1143 & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
1144 cpu_relax();
1145 spin_unlock_bh(&atchan->lock);
1121 1146
1122 switch (cmd) { 1147 return 0;
1123 case DMA_PAUSE: 1148}
1124 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
1125 set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1126 break;
1127 1149
1128 case DMA_RESUME: 1150static int at_xdmac_device_resume(struct dma_chan *chan)
1129 if (!at_xdmac_chan_is_paused(atchan)) 1151{
1130 break; 1152 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1153 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1131 1154
1132 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); 1155 dev_dbg(chan2dev(chan), "%s\n", __func__);
1133 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1134 break;
1135 1156
1136 case DMA_TERMINATE_ALL: 1157 spin_lock_bh(&atchan->lock);
1137 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); 1158 if (!at_xdmac_chan_is_paused(atchan))
1138 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) 1159 return 0;
1139 cpu_relax(); 1160
1161 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
1162 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1163 spin_unlock_bh(&atchan->lock);
1140 1164
1141 /* Cancel all pending transfers. */ 1165 return 0;
1142 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) 1166}
1143 at_xdmac_remove_xfer(atchan, desc); 1167
1168static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1169{
1170 struct at_xdmac_desc *desc, *_desc;
1171 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1172 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1144 1173
1145 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); 1174 dev_dbg(chan2dev(chan), "%s\n", __func__);
1146 break;
1147 1175
1148 case DMA_SLAVE_CONFIG: 1176 spin_lock_bh(&atchan->lock);
1149 ret = at_xdmac_set_slave_config(chan, 1177 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1150 (struct dma_slave_config *)arg); 1178 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1151 break; 1179 cpu_relax();
1152 1180
1153 default: 1181 /* Cancel all pending transfers. */
1154 dev_err(chan2dev(chan), 1182 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
1155 "unmanaged or unknown dma control cmd: %d\n", cmd); 1183 at_xdmac_remove_xfer(atchan, desc);
1156 ret = -ENXIO;
1157 }
1158 1184
1185 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1159 spin_unlock_bh(&atchan->lock); 1186 spin_unlock_bh(&atchan->lock);
1160 1187
1161 return ret; 1188 return 0;
1162} 1189}
1163 1190
1164static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) 1191static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
@@ -1217,27 +1244,6 @@ static void at_xdmac_free_chan_resources(struct dma_chan *chan)
1217 return; 1244 return;
1218} 1245}
1219 1246
1220#define AT_XDMAC_DMA_BUSWIDTHS\
1221 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
1222 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
1223 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
1224 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
1225 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
1226
1227static int at_xdmac_device_slave_caps(struct dma_chan *dchan,
1228 struct dma_slave_caps *caps)
1229{
1230
1231 caps->src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
1232 caps->dstn_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
1233 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1234 caps->cmd_pause = true;
1235 caps->cmd_terminate = true;
1236 caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1237
1238 return 0;
1239}
1240
1241#ifdef CONFIG_PM 1247#ifdef CONFIG_PM
1242static int atmel_xdmac_prepare(struct device *dev) 1248static int atmel_xdmac_prepare(struct device *dev)
1243{ 1249{
@@ -1268,9 +1274,10 @@ static int atmel_xdmac_suspend(struct device *dev)
1268 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { 1274 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1269 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1275 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1270 1276
1277 atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
1271 if (at_xdmac_chan_is_cyclic(atchan)) { 1278 if (at_xdmac_chan_is_cyclic(atchan)) {
1272 if (!at_xdmac_chan_is_paused(atchan)) 1279 if (!at_xdmac_chan_is_paused(atchan))
1273 at_xdmac_control(chan, DMA_PAUSE, 0); 1280 at_xdmac_device_pause(chan);
1274 atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); 1281 atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1275 atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA); 1282 atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
1276 atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC); 1283 atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
@@ -1290,7 +1297,6 @@ static int atmel_xdmac_resume(struct device *dev)
1290 struct at_xdmac_chan *atchan; 1297 struct at_xdmac_chan *atchan;
1291 struct dma_chan *chan, *_chan; 1298 struct dma_chan *chan, *_chan;
1292 int i; 1299 int i;
1293 u32 cfg;
1294 1300
1295 clk_prepare_enable(atxdmac->clk); 1301 clk_prepare_enable(atxdmac->clk);
1296 1302
@@ -1305,8 +1311,7 @@ static int atmel_xdmac_resume(struct device *dev)
1305 at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs); 1311 at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs);
1306 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { 1312 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1307 atchan = to_at_xdmac_chan(chan); 1313 atchan = to_at_xdmac_chan(chan);
1308 cfg = atchan->cfg[AT_XDMAC_CUR_CFG]; 1314 at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
1309 at_xdmac_chan_write(atchan, AT_XDMAC_CC, cfg);
1310 if (at_xdmac_chan_is_cyclic(atchan)) { 1315 if (at_xdmac_chan_is_cyclic(atchan)) {
1311 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda); 1316 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
1312 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc); 1317 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
@@ -1407,8 +1412,14 @@ static int at_xdmac_probe(struct platform_device *pdev)
1407 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic; 1412 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
1408 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy; 1413 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
1409 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg; 1414 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
1410 atxdmac->dma.device_control = at_xdmac_control; 1415 atxdmac->dma.device_config = at_xdmac_device_config;
1411 atxdmac->dma.device_slave_caps = at_xdmac_device_slave_caps; 1416 atxdmac->dma.device_pause = at_xdmac_device_pause;
1417 atxdmac->dma.device_resume = at_xdmac_device_resume;
1418 atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all;
1419 atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
1420 atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
1421 atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1422 atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1412 1423
1413 /* Disable all chans and interrupts. */ 1424 /* Disable all chans and interrupts. */
1414 at_xdmac_off(atxdmac); 1425 at_xdmac_off(atxdmac);
@@ -1507,7 +1518,6 @@ static struct platform_driver at_xdmac_driver = {
1507 .remove = at_xdmac_remove, 1518 .remove = at_xdmac_remove,
1508 .driver = { 1519 .driver = {
1509 .name = "at_xdmac", 1520 .name = "at_xdmac",
1510 .owner = THIS_MODULE,
1511 .of_match_table = of_match_ptr(atmel_xdmac_dt_ids), 1521 .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
1512 .pm = &atmel_xdmac_dev_pm_ops, 1522 .pm = &atmel_xdmac_dev_pm_ops,
1513 } 1523 }
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 918b7b3f766f..0723096fb50a 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -436,9 +436,11 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
436 return vchan_tx_prep(&c->vc, &d->vd, flags); 436 return vchan_tx_prep(&c->vc, &d->vd, flags);
437} 437}
438 438
439static int bcm2835_dma_slave_config(struct bcm2835_chan *c, 439static int bcm2835_dma_slave_config(struct dma_chan *chan,
440 struct dma_slave_config *cfg) 440 struct dma_slave_config *cfg)
441{ 441{
442 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
443
442 if ((cfg->direction == DMA_DEV_TO_MEM && 444 if ((cfg->direction == DMA_DEV_TO_MEM &&
443 cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || 445 cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
444 (cfg->direction == DMA_MEM_TO_DEV && 446 (cfg->direction == DMA_MEM_TO_DEV &&
@@ -452,8 +454,9 @@ static int bcm2835_dma_slave_config(struct bcm2835_chan *c,
452 return 0; 454 return 0;
453} 455}
454 456
455static int bcm2835_dma_terminate_all(struct bcm2835_chan *c) 457static int bcm2835_dma_terminate_all(struct dma_chan *chan)
456{ 458{
459 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
457 struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); 460 struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
458 unsigned long flags; 461 unsigned long flags;
459 int timeout = 10000; 462 int timeout = 10000;
@@ -495,24 +498,6 @@ static int bcm2835_dma_terminate_all(struct bcm2835_chan *c)
495 return 0; 498 return 0;
496} 499}
497 500
498static int bcm2835_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
499 unsigned long arg)
500{
501 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
502
503 switch (cmd) {
504 case DMA_SLAVE_CONFIG:
505 return bcm2835_dma_slave_config(c,
506 (struct dma_slave_config *)arg);
507
508 case DMA_TERMINATE_ALL:
509 return bcm2835_dma_terminate_all(c);
510
511 default:
512 return -ENXIO;
513 }
514}
515
516static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq) 501static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
517{ 502{
518 struct bcm2835_chan *c; 503 struct bcm2835_chan *c;
@@ -565,18 +550,6 @@ static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
565 return chan; 550 return chan;
566} 551}
567 552
568static int bcm2835_dma_device_slave_caps(struct dma_chan *dchan,
569 struct dma_slave_caps *caps)
570{
571 caps->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
572 caps->dstn_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
573 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
574 caps->cmd_pause = false;
575 caps->cmd_terminate = true;
576
577 return 0;
578}
579
580static int bcm2835_dma_probe(struct platform_device *pdev) 553static int bcm2835_dma_probe(struct platform_device *pdev)
581{ 554{
582 struct bcm2835_dmadev *od; 555 struct bcm2835_dmadev *od;
@@ -615,9 +588,12 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
615 od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources; 588 od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
616 od->ddev.device_tx_status = bcm2835_dma_tx_status; 589 od->ddev.device_tx_status = bcm2835_dma_tx_status;
617 od->ddev.device_issue_pending = bcm2835_dma_issue_pending; 590 od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
618 od->ddev.device_slave_caps = bcm2835_dma_device_slave_caps;
619 od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic; 591 od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
620 od->ddev.device_control = bcm2835_dma_control; 592 od->ddev.device_config = bcm2835_dma_slave_config;
593 od->ddev.device_terminate_all = bcm2835_dma_terminate_all;
594 od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
595 od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
596 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
621 od->ddev.dev = &pdev->dev; 597 od->ddev.dev = &pdev->dev;
622 INIT_LIST_HEAD(&od->ddev.channels); 598 INIT_LIST_HEAD(&od->ddev.channels);
623 spin_lock_init(&od->lock); 599 spin_lock_init(&od->lock);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index e88588d8ecd3..fd22dd36985f 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1690,7 +1690,7 @@ static u32 coh901318_get_bytes_left(struct dma_chan *chan)
1690 * Pauses a transfer without losing data. Enables power save. 1690 * Pauses a transfer without losing data. Enables power save.
1691 * Use this function in conjunction with coh901318_resume. 1691 * Use this function in conjunction with coh901318_resume.
1692 */ 1692 */
1693static void coh901318_pause(struct dma_chan *chan) 1693static int coh901318_pause(struct dma_chan *chan)
1694{ 1694{
1695 u32 val; 1695 u32 val;
1696 unsigned long flags; 1696 unsigned long flags;
@@ -1730,12 +1730,13 @@ static void coh901318_pause(struct dma_chan *chan)
1730 enable_powersave(cohc); 1730 enable_powersave(cohc);
1731 1731
1732 spin_unlock_irqrestore(&cohc->lock, flags); 1732 spin_unlock_irqrestore(&cohc->lock, flags);
1733 return 0;
1733} 1734}
1734 1735
1735/* Resumes a transfer that has been stopped via 300_dma_stop(..). 1736/* Resumes a transfer that has been stopped via 300_dma_stop(..).
1736 Power save is handled. 1737 Power save is handled.
1737*/ 1738*/
1738static void coh901318_resume(struct dma_chan *chan) 1739static int coh901318_resume(struct dma_chan *chan)
1739{ 1740{
1740 u32 val; 1741 u32 val;
1741 unsigned long flags; 1742 unsigned long flags;
@@ -1760,6 +1761,7 @@ static void coh901318_resume(struct dma_chan *chan)
1760 } 1761 }
1761 1762
1762 spin_unlock_irqrestore(&cohc->lock, flags); 1763 spin_unlock_irqrestore(&cohc->lock, flags);
1764 return 0;
1763} 1765}
1764 1766
1765bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) 1767bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
@@ -2114,6 +2116,57 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
2114 return IRQ_HANDLED; 2116 return IRQ_HANDLED;
2115} 2117}
2116 2118
2119static int coh901318_terminate_all(struct dma_chan *chan)
2120{
2121 unsigned long flags;
2122 struct coh901318_chan *cohc = to_coh901318_chan(chan);
2123 struct coh901318_desc *cohd;
2124 void __iomem *virtbase = cohc->base->virtbase;
2125
2126 /* The remainder of this function terminates the transfer */
2127 coh901318_pause(chan);
2128 spin_lock_irqsave(&cohc->lock, flags);
2129
2130 /* Clear any pending BE or TC interrupt */
2131 if (cohc->id < 32) {
2132 writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1);
2133 writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1);
2134 } else {
2135 writel(1 << (cohc->id - 32), virtbase +
2136 COH901318_BE_INT_CLEAR2);
2137 writel(1 << (cohc->id - 32), virtbase +
2138 COH901318_TC_INT_CLEAR2);
2139 }
2140
2141 enable_powersave(cohc);
2142
2143 while ((cohd = coh901318_first_active_get(cohc))) {
2144 /* release the lli allocation*/
2145 coh901318_lli_free(&cohc->base->pool, &cohd->lli);
2146
2147 /* return desc to free-list */
2148 coh901318_desc_remove(cohd);
2149 coh901318_desc_free(cohc, cohd);
2150 }
2151
2152 while ((cohd = coh901318_first_queued(cohc))) {
2153 /* release the lli allocation*/
2154 coh901318_lli_free(&cohc->base->pool, &cohd->lli);
2155
2156 /* return desc to free-list */
2157 coh901318_desc_remove(cohd);
2158 coh901318_desc_free(cohc, cohd);
2159 }
2160
2161
2162 cohc->nbr_active_done = 0;
2163 cohc->busy = 0;
2164
2165 spin_unlock_irqrestore(&cohc->lock, flags);
2166
2167 return 0;
2168}
2169
2117static int coh901318_alloc_chan_resources(struct dma_chan *chan) 2170static int coh901318_alloc_chan_resources(struct dma_chan *chan)
2118{ 2171{
2119 struct coh901318_chan *cohc = to_coh901318_chan(chan); 2172 struct coh901318_chan *cohc = to_coh901318_chan(chan);
@@ -2156,7 +2209,7 @@ coh901318_free_chan_resources(struct dma_chan *chan)
2156 2209
2157 spin_unlock_irqrestore(&cohc->lock, flags); 2210 spin_unlock_irqrestore(&cohc->lock, flags);
2158 2211
2159 dmaengine_terminate_all(chan); 2212 coh901318_terminate_all(chan);
2160} 2213}
2161 2214
2162 2215
@@ -2461,8 +2514,8 @@ static const struct burst_table burst_sizes[] = {
2461 }, 2514 },
2462}; 2515};
2463 2516
2464static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan, 2517static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
2465 struct dma_slave_config *config) 2518 struct dma_slave_config *config)
2466{ 2519{
2467 struct coh901318_chan *cohc = to_coh901318_chan(chan); 2520 struct coh901318_chan *cohc = to_coh901318_chan(chan);
2468 dma_addr_t addr; 2521 dma_addr_t addr;
@@ -2482,7 +2535,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
2482 maxburst = config->dst_maxburst; 2535 maxburst = config->dst_maxburst;
2483 } else { 2536 } else {
2484 dev_err(COHC_2_DEV(cohc), "illegal channel mode\n"); 2537 dev_err(COHC_2_DEV(cohc), "illegal channel mode\n");
2485 return; 2538 return -EINVAL;
2486 } 2539 }
2487 2540
2488 dev_dbg(COHC_2_DEV(cohc), "configure channel for %d byte transfers\n", 2541 dev_dbg(COHC_2_DEV(cohc), "configure channel for %d byte transfers\n",
@@ -2528,7 +2581,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
2528 default: 2581 default:
2529 dev_err(COHC_2_DEV(cohc), 2582 dev_err(COHC_2_DEV(cohc),
2530 "bad runtimeconfig: alien address width\n"); 2583 "bad runtimeconfig: alien address width\n");
2531 return; 2584 return -EINVAL;
2532 } 2585 }
2533 2586
2534 ctrl |= burst_sizes[i].reg; 2587 ctrl |= burst_sizes[i].reg;
@@ -2538,84 +2591,12 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
2538 2591
2539 cohc->addr = addr; 2592 cohc->addr = addr;
2540 cohc->ctrl = ctrl; 2593 cohc->ctrl = ctrl;
2541}
2542
2543static int
2544coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2545 unsigned long arg)
2546{
2547 unsigned long flags;
2548 struct coh901318_chan *cohc = to_coh901318_chan(chan);
2549 struct coh901318_desc *cohd;
2550 void __iomem *virtbase = cohc->base->virtbase;
2551
2552 if (cmd == DMA_SLAVE_CONFIG) {
2553 struct dma_slave_config *config =
2554 (struct dma_slave_config *) arg;
2555
2556 coh901318_dma_set_runtimeconfig(chan, config);
2557 return 0;
2558 }
2559
2560 if (cmd == DMA_PAUSE) {
2561 coh901318_pause(chan);
2562 return 0;
2563 }
2564
2565 if (cmd == DMA_RESUME) {
2566 coh901318_resume(chan);
2567 return 0;
2568 }
2569
2570 if (cmd != DMA_TERMINATE_ALL)
2571 return -ENXIO;
2572
2573 /* The remainder of this function terminates the transfer */
2574 coh901318_pause(chan);
2575 spin_lock_irqsave(&cohc->lock, flags);
2576
2577 /* Clear any pending BE or TC interrupt */
2578 if (cohc->id < 32) {
2579 writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1);
2580 writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1);
2581 } else {
2582 writel(1 << (cohc->id - 32), virtbase +
2583 COH901318_BE_INT_CLEAR2);
2584 writel(1 << (cohc->id - 32), virtbase +
2585 COH901318_TC_INT_CLEAR2);
2586 }
2587
2588 enable_powersave(cohc);
2589
2590 while ((cohd = coh901318_first_active_get(cohc))) {
2591 /* release the lli allocation*/
2592 coh901318_lli_free(&cohc->base->pool, &cohd->lli);
2593
2594 /* return desc to free-list */
2595 coh901318_desc_remove(cohd);
2596 coh901318_desc_free(cohc, cohd);
2597 }
2598
2599 while ((cohd = coh901318_first_queued(cohc))) {
2600 /* release the lli allocation*/
2601 coh901318_lli_free(&cohc->base->pool, &cohd->lli);
2602
2603 /* return desc to free-list */
2604 coh901318_desc_remove(cohd);
2605 coh901318_desc_free(cohc, cohd);
2606 }
2607
2608
2609 cohc->nbr_active_done = 0;
2610 cohc->busy = 0;
2611
2612 spin_unlock_irqrestore(&cohc->lock, flags);
2613 2594
2614 return 0; 2595 return 0;
2615} 2596}
2616 2597
2617void coh901318_base_init(struct dma_device *dma, const int *pick_chans, 2598static void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
2618 struct coh901318_base *base) 2599 struct coh901318_base *base)
2619{ 2600{
2620 int chans_i; 2601 int chans_i;
2621 int i = 0; 2602 int i = 0;
@@ -2717,7 +2698,10 @@ static int __init coh901318_probe(struct platform_device *pdev)
2717 base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg; 2698 base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
2718 base->dma_slave.device_tx_status = coh901318_tx_status; 2699 base->dma_slave.device_tx_status = coh901318_tx_status;
2719 base->dma_slave.device_issue_pending = coh901318_issue_pending; 2700 base->dma_slave.device_issue_pending = coh901318_issue_pending;
2720 base->dma_slave.device_control = coh901318_control; 2701 base->dma_slave.device_config = coh901318_dma_set_runtimeconfig;
2702 base->dma_slave.device_pause = coh901318_pause;
2703 base->dma_slave.device_resume = coh901318_resume;
2704 base->dma_slave.device_terminate_all = coh901318_terminate_all;
2721 base->dma_slave.dev = &pdev->dev; 2705 base->dma_slave.dev = &pdev->dev;
2722 2706
2723 err = dma_async_device_register(&base->dma_slave); 2707 err = dma_async_device_register(&base->dma_slave);
@@ -2737,7 +2721,10 @@ static int __init coh901318_probe(struct platform_device *pdev)
2737 base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy; 2721 base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
2738 base->dma_memcpy.device_tx_status = coh901318_tx_status; 2722 base->dma_memcpy.device_tx_status = coh901318_tx_status;
2739 base->dma_memcpy.device_issue_pending = coh901318_issue_pending; 2723 base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
2740 base->dma_memcpy.device_control = coh901318_control; 2724 base->dma_memcpy.device_config = coh901318_dma_set_runtimeconfig;
2725 base->dma_memcpy.device_pause = coh901318_pause;
2726 base->dma_memcpy.device_resume = coh901318_resume;
2727 base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
2741 base->dma_memcpy.dev = &pdev->dev; 2728 base->dma_memcpy.dev = &pdev->dev;
2742 /* 2729 /*
2743 * This controller can only access address at even 32bit boundaries, 2730 * This controller can only access address at even 32bit boundaries,
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index b743adf56465..512cb8e2805e 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -525,12 +525,6 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
525 return &c->txd; 525 return &c->txd;
526} 526}
527 527
528static int cpp41_cfg_chan(struct cppi41_channel *c,
529 struct dma_slave_config *cfg)
530{
531 return 0;
532}
533
534static void cppi41_compute_td_desc(struct cppi41_desc *d) 528static void cppi41_compute_td_desc(struct cppi41_desc *d)
535{ 529{
536 d->pd0 = DESC_TYPE_TEARD << DESC_TYPE; 530 d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
@@ -647,28 +641,6 @@ static int cppi41_stop_chan(struct dma_chan *chan)
647 return 0; 641 return 0;
648} 642}
649 643
650static int cppi41_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
651 unsigned long arg)
652{
653 struct cppi41_channel *c = to_cpp41_chan(chan);
654 int ret;
655
656 switch (cmd) {
657 case DMA_SLAVE_CONFIG:
658 ret = cpp41_cfg_chan(c, (struct dma_slave_config *) arg);
659 break;
660
661 case DMA_TERMINATE_ALL:
662 ret = cppi41_stop_chan(chan);
663 break;
664
665 default:
666 ret = -ENXIO;
667 break;
668 }
669 return ret;
670}
671
672static void cleanup_chans(struct cppi41_dd *cdd) 644static void cleanup_chans(struct cppi41_dd *cdd)
673{ 645{
674 while (!list_empty(&cdd->ddev.channels)) { 646 while (!list_empty(&cdd->ddev.channels)) {
@@ -953,7 +925,7 @@ static int cppi41_dma_probe(struct platform_device *pdev)
953 cdd->ddev.device_tx_status = cppi41_dma_tx_status; 925 cdd->ddev.device_tx_status = cppi41_dma_tx_status;
954 cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; 926 cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
955 cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; 927 cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
956 cdd->ddev.device_control = cppi41_dma_control; 928 cdd->ddev.device_terminate_all = cppi41_stop_chan;
957 cdd->ddev.dev = dev; 929 cdd->ddev.dev = dev;
958 INIT_LIST_HEAD(&cdd->ddev.channels); 930 INIT_LIST_HEAD(&cdd->ddev.channels);
959 cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; 931 cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index bdeafeefa5f6..4527a3ebeac4 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -210,7 +210,7 @@ static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst)
210} 210}
211 211
212static int jz4740_dma_slave_config(struct dma_chan *c, 212static int jz4740_dma_slave_config(struct dma_chan *c,
213 const struct dma_slave_config *config) 213 struct dma_slave_config *config)
214{ 214{
215 struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); 215 struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
216 struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); 216 struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
@@ -290,21 +290,6 @@ static int jz4740_dma_terminate_all(struct dma_chan *c)
290 return 0; 290 return 0;
291} 291}
292 292
293static int jz4740_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
294 unsigned long arg)
295{
296 struct dma_slave_config *config = (struct dma_slave_config *)arg;
297
298 switch (cmd) {
299 case DMA_SLAVE_CONFIG:
300 return jz4740_dma_slave_config(chan, config);
301 case DMA_TERMINATE_ALL:
302 return jz4740_dma_terminate_all(chan);
303 default:
304 return -ENOSYS;
305 }
306}
307
308static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan) 293static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan)
309{ 294{
310 struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); 295 struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
@@ -561,7 +546,8 @@ static int jz4740_dma_probe(struct platform_device *pdev)
561 dd->device_issue_pending = jz4740_dma_issue_pending; 546 dd->device_issue_pending = jz4740_dma_issue_pending;
562 dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg; 547 dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg;
563 dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic; 548 dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
564 dd->device_control = jz4740_dma_control; 549 dd->device_config = jz4740_dma_slave_config;
550 dd->device_terminate_all = jz4740_dma_terminate_all;
565 dd->dev = &pdev->dev; 551 dd->dev = &pdev->dev;
566 INIT_LIST_HEAD(&dd->channels); 552 INIT_LIST_HEAD(&dd->channels);
567 553
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index e057935e3023..f15712f2fec6 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -222,31 +222,35 @@ static void balance_ref_count(struct dma_chan *chan)
222 */ 222 */
223static int dma_chan_get(struct dma_chan *chan) 223static int dma_chan_get(struct dma_chan *chan)
224{ 224{
225 int err = -ENODEV;
226 struct module *owner = dma_chan_to_owner(chan); 225 struct module *owner = dma_chan_to_owner(chan);
226 int ret;
227 227
228 /* The channel is already in use, update client count */
228 if (chan->client_count) { 229 if (chan->client_count) {
229 __module_get(owner); 230 __module_get(owner);
230 err = 0; 231 goto out;
231 } else if (try_module_get(owner)) 232 }
232 err = 0;
233 233
234 if (err == 0) 234 if (!try_module_get(owner))
235 chan->client_count++; 235 return -ENODEV;
236 236
237 /* allocate upon first client reference */ 237 /* allocate upon first client reference */
238 if (chan->client_count == 1 && err == 0) { 238 if (chan->device->device_alloc_chan_resources) {
239 int desc_cnt = chan->device->device_alloc_chan_resources(chan); 239 ret = chan->device->device_alloc_chan_resources(chan);
240 240 if (ret < 0)
241 if (desc_cnt < 0) { 241 goto err_out;
242 err = desc_cnt;
243 chan->client_count = 0;
244 module_put(owner);
245 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
246 balance_ref_count(chan);
247 } 242 }
248 243
249 return err; 244 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
245 balance_ref_count(chan);
246
247out:
248 chan->client_count++;
249 return 0;
250
251err_out:
252 module_put(owner);
253 return ret;
250} 254}
251 255
252/** 256/**
@@ -257,11 +261,15 @@ static int dma_chan_get(struct dma_chan *chan)
257 */ 261 */
258static void dma_chan_put(struct dma_chan *chan) 262static void dma_chan_put(struct dma_chan *chan)
259{ 263{
264 /* This channel is not in use, bail out */
260 if (!chan->client_count) 265 if (!chan->client_count)
261 return; /* this channel failed alloc_chan_resources */ 266 return;
267
262 chan->client_count--; 268 chan->client_count--;
263 module_put(dma_chan_to_owner(chan)); 269 module_put(dma_chan_to_owner(chan));
264 if (chan->client_count == 0) 270
271 /* This channel is not in use anymore, free it */
272 if (!chan->client_count && chan->device->device_free_chan_resources)
265 chan->device->device_free_chan_resources(chan); 273 chan->device->device_free_chan_resources(chan);
266} 274}
267 275
@@ -471,6 +479,39 @@ static void dma_channel_rebalance(void)
471 } 479 }
472} 480}
473 481
482int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
483{
484 struct dma_device *device;
485
486 if (!chan || !caps)
487 return -EINVAL;
488
489 device = chan->device;
490
491 /* check if the channel supports slave transactions */
492 if (!test_bit(DMA_SLAVE, device->cap_mask.bits))
493 return -ENXIO;
494
495 /*
496 * Check whether it reports it uses the generic slave
497 * capabilities, if not, that means it doesn't support any
498 * kind of slave capabilities reporting.
499 */
500 if (!device->directions)
501 return -ENXIO;
502
503 caps->src_addr_widths = device->src_addr_widths;
504 caps->dst_addr_widths = device->dst_addr_widths;
505 caps->directions = device->directions;
506 caps->residue_granularity = device->residue_granularity;
507
508 caps->cmd_pause = !!device->device_pause;
509 caps->cmd_terminate = !!device->device_terminate_all;
510
511 return 0;
512}
513EXPORT_SYMBOL_GPL(dma_get_slave_caps);
514
474static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, 515static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
475 struct dma_device *dev, 516 struct dma_device *dev,
476 dma_filter_fn fn, void *fn_param) 517 dma_filter_fn fn, void *fn_param)
@@ -811,17 +852,16 @@ int dma_async_device_register(struct dma_device *device)
811 !device->device_prep_dma_sg); 852 !device->device_prep_dma_sg);
812 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && 853 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
813 !device->device_prep_dma_cyclic); 854 !device->device_prep_dma_cyclic);
814 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
815 !device->device_control);
816 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && 855 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
817 !device->device_prep_interleaved_dma); 856 !device->device_prep_interleaved_dma);
818 857
819 BUG_ON(!device->device_alloc_chan_resources);
820 BUG_ON(!device->device_free_chan_resources);
821 BUG_ON(!device->device_tx_status); 858 BUG_ON(!device->device_tx_status);
822 BUG_ON(!device->device_issue_pending); 859 BUG_ON(!device->device_issue_pending);
823 BUG_ON(!device->dev); 860 BUG_ON(!device->dev);
824 861
862 WARN(dma_has_cap(DMA_SLAVE, device->cap_mask) && !device->directions,
863 "this driver doesn't support generic slave capabilities reporting\n");
864
825 /* note: this only matters in the 865 /* note: this only matters in the
826 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case 866 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
827 */ 867 */
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index a8d7809e2f4c..220ee49633e4 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -349,14 +349,14 @@ static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
349 unsigned long data) 349 unsigned long data)
350{ 350{
351 pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n", 351 pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
352 current->comm, n, err, src_off, dst_off, len, data); 352 current->comm, n, err, src_off, dst_off, len, data);
353} 353}
354 354
355#define verbose_result(err, n, src_off, dst_off, len, data) ({ \ 355#define verbose_result(err, n, src_off, dst_off, len, data) ({ \
356 if (verbose) \ 356 if (verbose) \
357 result(err, n, src_off, dst_off, len, data); \ 357 result(err, n, src_off, dst_off, len, data); \
358 else \ 358 else \
359 dbg_result(err, n, src_off, dst_off, len, data); \ 359 dbg_result(err, n, src_off, dst_off, len, data);\
360}) 360})
361 361
362static unsigned long long dmatest_persec(s64 runtime, unsigned int val) 362static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
@@ -405,7 +405,6 @@ static int dmatest_func(void *data)
405 struct dmatest_params *params; 405 struct dmatest_params *params;
406 struct dma_chan *chan; 406 struct dma_chan *chan;
407 struct dma_device *dev; 407 struct dma_device *dev;
408 unsigned int src_off, dst_off, len;
409 unsigned int error_count; 408 unsigned int error_count;
410 unsigned int failed_tests = 0; 409 unsigned int failed_tests = 0;
411 unsigned int total_tests = 0; 410 unsigned int total_tests = 0;
@@ -484,6 +483,7 @@ static int dmatest_func(void *data)
484 struct dmaengine_unmap_data *um; 483 struct dmaengine_unmap_data *um;
485 dma_addr_t srcs[src_cnt]; 484 dma_addr_t srcs[src_cnt];
486 dma_addr_t *dsts; 485 dma_addr_t *dsts;
486 unsigned int src_off, dst_off, len;
487 u8 align = 0; 487 u8 align = 0;
488 488
489 total_tests++; 489 total_tests++;
@@ -502,15 +502,21 @@ static int dmatest_func(void *data)
502 break; 502 break;
503 } 503 }
504 504
505 if (params->noverify) { 505 if (params->noverify)
506 len = params->buf_size; 506 len = params->buf_size;
507 else
508 len = dmatest_random() % params->buf_size + 1;
509
510 len = (len >> align) << align;
511 if (!len)
512 len = 1 << align;
513
514 total_len += len;
515
516 if (params->noverify) {
507 src_off = 0; 517 src_off = 0;
508 dst_off = 0; 518 dst_off = 0;
509 } else { 519 } else {
510 len = dmatest_random() % params->buf_size + 1;
511 len = (len >> align) << align;
512 if (!len)
513 len = 1 << align;
514 src_off = dmatest_random() % (params->buf_size - len + 1); 520 src_off = dmatest_random() % (params->buf_size - len + 1);
515 dst_off = dmatest_random() % (params->buf_size - len + 1); 521 dst_off = dmatest_random() % (params->buf_size - len + 1);
516 522
@@ -523,11 +529,6 @@ static int dmatest_func(void *data)
523 params->buf_size); 529 params->buf_size);
524 } 530 }
525 531
526 len = (len >> align) << align;
527 if (!len)
528 len = 1 << align;
529 total_len += len;
530
531 um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt, 532 um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt,
532 GFP_KERNEL); 533 GFP_KERNEL);
533 if (!um) { 534 if (!um) {
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 5c062548957c..455b7a4f1e87 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -61,6 +61,13 @@
61 */ 61 */
62#define NR_DESCS_PER_CHANNEL 64 62#define NR_DESCS_PER_CHANNEL 64
63 63
64/* The set of bus widths supported by the DMA controller */
65#define DW_DMA_BUSWIDTHS \
66 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
67 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
68 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
69 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
70
64/*----------------------------------------------------------------------*/ 71/*----------------------------------------------------------------------*/
65 72
66static struct device *chan2dev(struct dma_chan *chan) 73static struct device *chan2dev(struct dma_chan *chan)
@@ -955,8 +962,7 @@ static inline void convert_burst(u32 *maxburst)
955 *maxburst = 0; 962 *maxburst = 0;
956} 963}
957 964
958static int 965static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
959set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
960{ 966{
961 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 967 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
962 968
@@ -973,16 +979,25 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
973 return 0; 979 return 0;
974} 980}
975 981
976static inline void dwc_chan_pause(struct dw_dma_chan *dwc) 982static int dwc_pause(struct dma_chan *chan)
977{ 983{
978 u32 cfglo = channel_readl(dwc, CFG_LO); 984 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
979 unsigned int count = 20; /* timeout iterations */ 985 unsigned long flags;
986 unsigned int count = 20; /* timeout iterations */
987 u32 cfglo;
988
989 spin_lock_irqsave(&dwc->lock, flags);
980 990
991 cfglo = channel_readl(dwc, CFG_LO);
981 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); 992 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
982 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) 993 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
983 udelay(2); 994 udelay(2);
984 995
985 dwc->paused = true; 996 dwc->paused = true;
997
998 spin_unlock_irqrestore(&dwc->lock, flags);
999
1000 return 0;
986} 1001}
987 1002
988static inline void dwc_chan_resume(struct dw_dma_chan *dwc) 1003static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
@@ -994,53 +1009,48 @@ static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
994 dwc->paused = false; 1009 dwc->paused = false;
995} 1010}
996 1011
997static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1012static int dwc_resume(struct dma_chan *chan)
998 unsigned long arg)
999{ 1013{
1000 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1014 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1001 struct dw_dma *dw = to_dw_dma(chan->device);
1002 struct dw_desc *desc, *_desc;
1003 unsigned long flags; 1015 unsigned long flags;
1004 LIST_HEAD(list);
1005 1016
1006 if (cmd == DMA_PAUSE) { 1017 if (!dwc->paused)
1007 spin_lock_irqsave(&dwc->lock, flags); 1018 return 0;
1008 1019
1009 dwc_chan_pause(dwc); 1020 spin_lock_irqsave(&dwc->lock, flags);
1010 1021
1011 spin_unlock_irqrestore(&dwc->lock, flags); 1022 dwc_chan_resume(dwc);
1012 } else if (cmd == DMA_RESUME) {
1013 if (!dwc->paused)
1014 return 0;
1015 1023
1016 spin_lock_irqsave(&dwc->lock, flags); 1024 spin_unlock_irqrestore(&dwc->lock, flags);
1017 1025
1018 dwc_chan_resume(dwc); 1026 return 0;
1027}
1019 1028
1020 spin_unlock_irqrestore(&dwc->lock, flags); 1029static int dwc_terminate_all(struct dma_chan *chan)
1021 } else if (cmd == DMA_TERMINATE_ALL) { 1030{
1022 spin_lock_irqsave(&dwc->lock, flags); 1031 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1032 struct dw_dma *dw = to_dw_dma(chan->device);
1033 struct dw_desc *desc, *_desc;
1034 unsigned long flags;
1035 LIST_HEAD(list);
1023 1036
1024 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); 1037 spin_lock_irqsave(&dwc->lock, flags);
1025 1038
1026 dwc_chan_disable(dw, dwc); 1039 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
1040
1041 dwc_chan_disable(dw, dwc);
1027 1042
1028 dwc_chan_resume(dwc); 1043 dwc_chan_resume(dwc);
1029 1044
1030 /* active_list entries will end up before queued entries */ 1045 /* active_list entries will end up before queued entries */
1031 list_splice_init(&dwc->queue, &list); 1046 list_splice_init(&dwc->queue, &list);
1032 list_splice_init(&dwc->active_list, &list); 1047 list_splice_init(&dwc->active_list, &list);
1033 1048
1034 spin_unlock_irqrestore(&dwc->lock, flags); 1049 spin_unlock_irqrestore(&dwc->lock, flags);
1035 1050
1036 /* Flush all pending and queued descriptors */ 1051 /* Flush all pending and queued descriptors */
1037 list_for_each_entry_safe(desc, _desc, &list, desc_node) 1052 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1038 dwc_descriptor_complete(dwc, desc, false); 1053 dwc_descriptor_complete(dwc, desc, false);
1039 } else if (cmd == DMA_SLAVE_CONFIG) {
1040 return set_runtime_config(chan, (struct dma_slave_config *)arg);
1041 } else {
1042 return -ENXIO;
1043 }
1044 1054
1045 return 0; 1055 return 0;
1046} 1056}
@@ -1551,7 +1561,8 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1551 } 1561 }
1552 } else { 1562 } else {
1553 dw->nr_masters = pdata->nr_masters; 1563 dw->nr_masters = pdata->nr_masters;
1554 memcpy(dw->data_width, pdata->data_width, 4); 1564 for (i = 0; i < dw->nr_masters; i++)
1565 dw->data_width[i] = pdata->data_width[i];
1555 } 1566 }
1556 1567
1557 /* Calculate all channel mask before DMA setup */ 1568 /* Calculate all channel mask before DMA setup */
@@ -1656,13 +1667,23 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1656 dw->dma.device_free_chan_resources = dwc_free_chan_resources; 1667 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1657 1668
1658 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; 1669 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1659
1660 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; 1670 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1661 dw->dma.device_control = dwc_control; 1671
1672 dw->dma.device_config = dwc_config;
1673 dw->dma.device_pause = dwc_pause;
1674 dw->dma.device_resume = dwc_resume;
1675 dw->dma.device_terminate_all = dwc_terminate_all;
1662 1676
1663 dw->dma.device_tx_status = dwc_tx_status; 1677 dw->dma.device_tx_status = dwc_tx_status;
1664 dw->dma.device_issue_pending = dwc_issue_pending; 1678 dw->dma.device_issue_pending = dwc_issue_pending;
1665 1679
1680 /* DMA capabilities */
1681 dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS;
1682 dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS;
1683 dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
1684 BIT(DMA_MEM_TO_MEM);
1685 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1686
1666 err = dma_async_device_register(&dw->dma); 1687 err = dma_async_device_register(&dw->dma);
1667 if (err) 1688 if (err)
1668 goto err_dma_register; 1689 goto err_dma_register;
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 32ea1aca7a0e..6565a361e7e5 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -100,7 +100,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
100{ 100{
101 struct device_node *np = pdev->dev.of_node; 101 struct device_node *np = pdev->dev.of_node;
102 struct dw_dma_platform_data *pdata; 102 struct dw_dma_platform_data *pdata;
103 u32 tmp, arr[4]; 103 u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
104 104
105 if (!np) { 105 if (!np) {
106 dev_err(&pdev->dev, "Missing DT data\n"); 106 dev_err(&pdev->dev, "Missing DT data\n");
@@ -127,7 +127,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
127 pdata->block_size = tmp; 127 pdata->block_size = tmp;
128 128
129 if (!of_property_read_u32(np, "dma-masters", &tmp)) { 129 if (!of_property_read_u32(np, "dma-masters", &tmp)) {
130 if (tmp > 4) 130 if (tmp > DW_DMA_MAX_NR_MASTERS)
131 return NULL; 131 return NULL;
132 132
133 pdata->nr_masters = tmp; 133 pdata->nr_masters = tmp;
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index 848e232f7cc7..241ff2b1402b 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -252,7 +252,7 @@ struct dw_dma_chan {
252 u8 src_master; 252 u8 src_master;
253 u8 dst_master; 253 u8 dst_master;
254 254
255 /* configuration passed via DMA_SLAVE_CONFIG */ 255 /* configuration passed via .device_config */
256 struct dma_slave_config dma_sconfig; 256 struct dma_slave_config dma_sconfig;
257}; 257};
258 258
@@ -285,7 +285,7 @@ struct dw_dma {
285 285
286 /* hardware configuration */ 286 /* hardware configuration */
287 unsigned char nr_masters; 287 unsigned char nr_masters;
288 unsigned char data_width[4]; 288 unsigned char data_width[DW_DMA_MAX_NR_MASTERS];
289}; 289};
290 290
291static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) 291static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index b969206439b7..276157f22612 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -15,6 +15,7 @@
15 15
16#include <linux/dmaengine.h> 16#include <linux/dmaengine.h>
17#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
18#include <linux/edma.h>
18#include <linux/err.h> 19#include <linux/err.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/interrupt.h> 21#include <linux/interrupt.h>
@@ -244,8 +245,9 @@ static void edma_execute(struct edma_chan *echan)
244 } 245 }
245} 246}
246 247
247static int edma_terminate_all(struct edma_chan *echan) 248static int edma_terminate_all(struct dma_chan *chan)
248{ 249{
250 struct edma_chan *echan = to_edma_chan(chan);
249 unsigned long flags; 251 unsigned long flags;
250 LIST_HEAD(head); 252 LIST_HEAD(head);
251 253
@@ -273,9 +275,11 @@ static int edma_terminate_all(struct edma_chan *echan)
273 return 0; 275 return 0;
274} 276}
275 277
276static int edma_slave_config(struct edma_chan *echan, 278static int edma_slave_config(struct dma_chan *chan,
277 struct dma_slave_config *cfg) 279 struct dma_slave_config *cfg)
278{ 280{
281 struct edma_chan *echan = to_edma_chan(chan);
282
279 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 283 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
280 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 284 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
281 return -EINVAL; 285 return -EINVAL;
@@ -285,8 +289,10 @@ static int edma_slave_config(struct edma_chan *echan,
285 return 0; 289 return 0;
286} 290}
287 291
288static int edma_dma_pause(struct edma_chan *echan) 292static int edma_dma_pause(struct dma_chan *chan)
289{ 293{
294 struct edma_chan *echan = to_edma_chan(chan);
295
290 /* Pause/Resume only allowed with cyclic mode */ 296 /* Pause/Resume only allowed with cyclic mode */
291 if (!echan->edesc || !echan->edesc->cyclic) 297 if (!echan->edesc || !echan->edesc->cyclic)
292 return -EINVAL; 298 return -EINVAL;
@@ -295,8 +301,10 @@ static int edma_dma_pause(struct edma_chan *echan)
295 return 0; 301 return 0;
296} 302}
297 303
298static int edma_dma_resume(struct edma_chan *echan) 304static int edma_dma_resume(struct dma_chan *chan)
299{ 305{
306 struct edma_chan *echan = to_edma_chan(chan);
307
300 /* Pause/Resume only allowed with cyclic mode */ 308 /* Pause/Resume only allowed with cyclic mode */
301 if (!echan->edesc->cyclic) 309 if (!echan->edesc->cyclic)
302 return -EINVAL; 310 return -EINVAL;
@@ -305,36 +313,6 @@ static int edma_dma_resume(struct edma_chan *echan)
305 return 0; 313 return 0;
306} 314}
307 315
308static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
309 unsigned long arg)
310{
311 int ret = 0;
312 struct dma_slave_config *config;
313 struct edma_chan *echan = to_edma_chan(chan);
314
315 switch (cmd) {
316 case DMA_TERMINATE_ALL:
317 edma_terminate_all(echan);
318 break;
319 case DMA_SLAVE_CONFIG:
320 config = (struct dma_slave_config *)arg;
321 ret = edma_slave_config(echan, config);
322 break;
323 case DMA_PAUSE:
324 ret = edma_dma_pause(echan);
325 break;
326
327 case DMA_RESUME:
328 ret = edma_dma_resume(echan);
329 break;
330
331 default:
332 ret = -ENOSYS;
333 }
334
335 return ret;
336}
337
338/* 316/*
339 * A PaRAM set configuration abstraction used by other modes 317 * A PaRAM set configuration abstraction used by other modes
340 * @chan: Channel who's PaRAM set we're configuring 318 * @chan: Channel who's PaRAM set we're configuring
@@ -557,7 +535,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
557 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 535 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
558} 536}
559 537
560struct dma_async_tx_descriptor *edma_prep_dma_memcpy( 538static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
561 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 539 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
562 size_t len, unsigned long tx_flags) 540 size_t len, unsigned long tx_flags)
563{ 541{
@@ -994,19 +972,6 @@ static void __init edma_chan_init(struct edma_cc *ecc,
994 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ 972 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
995 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 973 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
996 974
997static int edma_dma_device_slave_caps(struct dma_chan *dchan,
998 struct dma_slave_caps *caps)
999{
1000 caps->src_addr_widths = EDMA_DMA_BUSWIDTHS;
1001 caps->dstn_addr_widths = EDMA_DMA_BUSWIDTHS;
1002 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1003 caps->cmd_pause = true;
1004 caps->cmd_terminate = true;
1005 caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1006
1007 return 0;
1008}
1009
1010static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, 975static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
1011 struct device *dev) 976 struct device *dev)
1012{ 977{
@@ -1017,8 +982,16 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
1017 dma->device_free_chan_resources = edma_free_chan_resources; 982 dma->device_free_chan_resources = edma_free_chan_resources;
1018 dma->device_issue_pending = edma_issue_pending; 983 dma->device_issue_pending = edma_issue_pending;
1019 dma->device_tx_status = edma_tx_status; 984 dma->device_tx_status = edma_tx_status;
1020 dma->device_control = edma_control; 985 dma->device_config = edma_slave_config;
1021 dma->device_slave_caps = edma_dma_device_slave_caps; 986 dma->device_pause = edma_dma_pause;
987 dma->device_resume = edma_dma_resume;
988 dma->device_terminate_all = edma_terminate_all;
989
990 dma->src_addr_widths = EDMA_DMA_BUSWIDTHS;
991 dma->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
992 dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
993 dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
994
1022 dma->dev = dev; 995 dma->dev = dev;
1023 996
1024 /* 997 /*
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 7650470196c4..24e5290faa32 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -144,7 +144,7 @@ struct ep93xx_dma_desc {
144 * @queue: pending descriptors which are handled next 144 * @queue: pending descriptors which are handled next
145 * @free_list: list of free descriptors which can be used 145 * @free_list: list of free descriptors which can be used
146 * @runtime_addr: physical address currently used as dest/src (M2M only). This 146 * @runtime_addr: physical address currently used as dest/src (M2M only). This
147 * is set via %DMA_SLAVE_CONFIG before slave operation is 147 * is set via .device_config before slave operation is
148 * prepared 148 * prepared
149 * @runtime_ctrl: M2M runtime values for the control register. 149 * @runtime_ctrl: M2M runtime values for the control register.
150 * 150 *
@@ -1164,13 +1164,14 @@ fail:
1164 1164
1165/** 1165/**
1166 * ep93xx_dma_terminate_all - terminate all transactions 1166 * ep93xx_dma_terminate_all - terminate all transactions
1167 * @edmac: channel 1167 * @chan: channel
1168 * 1168 *
1169 * Stops all DMA transactions. All descriptors are put back to the 1169 * Stops all DMA transactions. All descriptors are put back to the
1170 * @edmac->free_list and callbacks are _not_ called. 1170 * @edmac->free_list and callbacks are _not_ called.
1171 */ 1171 */
1172static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac) 1172static int ep93xx_dma_terminate_all(struct dma_chan *chan)
1173{ 1173{
1174 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1174 struct ep93xx_dma_desc *desc, *_d; 1175 struct ep93xx_dma_desc *desc, *_d;
1175 unsigned long flags; 1176 unsigned long flags;
1176 LIST_HEAD(list); 1177 LIST_HEAD(list);
@@ -1194,9 +1195,10 @@ static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
1194 return 0; 1195 return 0;
1195} 1196}
1196 1197
1197static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac, 1198static int ep93xx_dma_slave_config(struct dma_chan *chan,
1198 struct dma_slave_config *config) 1199 struct dma_slave_config *config)
1199{ 1200{
1201 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1200 enum dma_slave_buswidth width; 1202 enum dma_slave_buswidth width;
1201 unsigned long flags; 1203 unsigned long flags;
1202 u32 addr, ctrl; 1204 u32 addr, ctrl;
@@ -1242,36 +1244,6 @@ static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
1242} 1244}
1243 1245
1244/** 1246/**
1245 * ep93xx_dma_control - manipulate all pending operations on a channel
1246 * @chan: channel
1247 * @cmd: control command to perform
1248 * @arg: optional argument
1249 *
1250 * Controls the channel. Function returns %0 in case of success or negative
1251 * error in case of failure.
1252 */
1253static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1254 unsigned long arg)
1255{
1256 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1257 struct dma_slave_config *config;
1258
1259 switch (cmd) {
1260 case DMA_TERMINATE_ALL:
1261 return ep93xx_dma_terminate_all(edmac);
1262
1263 case DMA_SLAVE_CONFIG:
1264 config = (struct dma_slave_config *)arg;
1265 return ep93xx_dma_slave_config(edmac, config);
1266
1267 default:
1268 break;
1269 }
1270
1271 return -ENOSYS;
1272}
1273
1274/**
1275 * ep93xx_dma_tx_status - check if a transaction is completed 1247 * ep93xx_dma_tx_status - check if a transaction is completed
1276 * @chan: channel 1248 * @chan: channel
1277 * @cookie: transaction specific cookie 1249 * @cookie: transaction specific cookie
@@ -1352,7 +1324,8 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
1352 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources; 1324 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1353 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; 1325 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1354 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; 1326 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1355 dma_dev->device_control = ep93xx_dma_control; 1327 dma_dev->device_config = ep93xx_dma_slave_config;
1328 dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1356 dma_dev->device_issue_pending = ep93xx_dma_issue_pending; 1329 dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1357 dma_dev->device_tx_status = ep93xx_dma_tx_status; 1330 dma_dev->device_tx_status = ep93xx_dma_tx_status;
1358 1331
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index e9ebb89e1711..09e2842d15ec 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -289,62 +289,69 @@ static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
289 kfree(fsl_desc); 289 kfree(fsl_desc);
290} 290}
291 291
292static int fsl_edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 292static int fsl_edma_terminate_all(struct dma_chan *chan)
293 unsigned long arg)
294{ 293{
295 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 294 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
296 struct dma_slave_config *cfg = (void *)arg;
297 unsigned long flags; 295 unsigned long flags;
298 LIST_HEAD(head); 296 LIST_HEAD(head);
299 297
300 switch (cmd) { 298 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
301 case DMA_TERMINATE_ALL: 299 fsl_edma_disable_request(fsl_chan);
302 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 300 fsl_chan->edesc = NULL;
301 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
302 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
303 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
304 return 0;
305}
306
307static int fsl_edma_pause(struct dma_chan *chan)
308{
309 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
310 unsigned long flags;
311
312 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
313 if (fsl_chan->edesc) {
303 fsl_edma_disable_request(fsl_chan); 314 fsl_edma_disable_request(fsl_chan);
304 fsl_chan->edesc = NULL; 315 fsl_chan->status = DMA_PAUSED;
305 vchan_get_all_descriptors(&fsl_chan->vchan, &head); 316 }
306 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 317 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
307 vchan_dma_desc_free_list(&fsl_chan->vchan, &head); 318 return 0;
308 return 0; 319}
309
310 case DMA_SLAVE_CONFIG:
311 fsl_chan->fsc.dir = cfg->direction;
312 if (cfg->direction == DMA_DEV_TO_MEM) {
313 fsl_chan->fsc.dev_addr = cfg->src_addr;
314 fsl_chan->fsc.addr_width = cfg->src_addr_width;
315 fsl_chan->fsc.burst = cfg->src_maxburst;
316 fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
317 } else if (cfg->direction == DMA_MEM_TO_DEV) {
318 fsl_chan->fsc.dev_addr = cfg->dst_addr;
319 fsl_chan->fsc.addr_width = cfg->dst_addr_width;
320 fsl_chan->fsc.burst = cfg->dst_maxburst;
321 fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
322 } else {
323 return -EINVAL;
324 }
325 return 0;
326 320
327 case DMA_PAUSE: 321static int fsl_edma_resume(struct dma_chan *chan)
328 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 322{
329 if (fsl_chan->edesc) { 323 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
330 fsl_edma_disable_request(fsl_chan); 324 unsigned long flags;
331 fsl_chan->status = DMA_PAUSED;
332 }
333 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
334 return 0;
335
336 case DMA_RESUME:
337 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
338 if (fsl_chan->edesc) {
339 fsl_edma_enable_request(fsl_chan);
340 fsl_chan->status = DMA_IN_PROGRESS;
341 }
342 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
343 return 0;
344 325
345 default: 326 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
346 return -ENXIO; 327 if (fsl_chan->edesc) {
328 fsl_edma_enable_request(fsl_chan);
329 fsl_chan->status = DMA_IN_PROGRESS;
330 }
331 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
332 return 0;
333}
334
335static int fsl_edma_slave_config(struct dma_chan *chan,
336 struct dma_slave_config *cfg)
337{
338 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
339
340 fsl_chan->fsc.dir = cfg->direction;
341 if (cfg->direction == DMA_DEV_TO_MEM) {
342 fsl_chan->fsc.dev_addr = cfg->src_addr;
343 fsl_chan->fsc.addr_width = cfg->src_addr_width;
344 fsl_chan->fsc.burst = cfg->src_maxburst;
345 fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
346 } else if (cfg->direction == DMA_MEM_TO_DEV) {
347 fsl_chan->fsc.dev_addr = cfg->dst_addr;
348 fsl_chan->fsc.addr_width = cfg->dst_addr_width;
349 fsl_chan->fsc.burst = cfg->dst_maxburst;
350 fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
351 } else {
352 return -EINVAL;
347 } 353 }
354 return 0;
348} 355}
349 356
350static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan, 357static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
@@ -780,18 +787,6 @@ static void fsl_edma_free_chan_resources(struct dma_chan *chan)
780 fsl_chan->tcd_pool = NULL; 787 fsl_chan->tcd_pool = NULL;
781} 788}
782 789
783static int fsl_dma_device_slave_caps(struct dma_chan *dchan,
784 struct dma_slave_caps *caps)
785{
786 caps->src_addr_widths = FSL_EDMA_BUSWIDTHS;
787 caps->dstn_addr_widths = FSL_EDMA_BUSWIDTHS;
788 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
789 caps->cmd_pause = true;
790 caps->cmd_terminate = true;
791
792 return 0;
793}
794
795static int 790static int
796fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) 791fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
797{ 792{
@@ -917,9 +912,15 @@ static int fsl_edma_probe(struct platform_device *pdev)
917 fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status; 912 fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
918 fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg; 913 fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
919 fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic; 914 fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
920 fsl_edma->dma_dev.device_control = fsl_edma_control; 915 fsl_edma->dma_dev.device_config = fsl_edma_slave_config;
916 fsl_edma->dma_dev.device_pause = fsl_edma_pause;
917 fsl_edma->dma_dev.device_resume = fsl_edma_resume;
918 fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
921 fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending; 919 fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
922 fsl_edma->dma_dev.device_slave_caps = fsl_dma_device_slave_caps; 920
921 fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
922 fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
923 fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
923 924
924 platform_set_drvdata(pdev, fsl_edma); 925 platform_set_drvdata(pdev, fsl_edma);
925 926
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 38821cdf862b..300f821f1890 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -941,84 +941,56 @@ fail:
941 return NULL; 941 return NULL;
942} 942}
943 943
944/** 944static int fsl_dma_device_terminate_all(struct dma_chan *dchan)
945 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
946 * @chan: DMA channel
947 * @sgl: scatterlist to transfer to/from
948 * @sg_len: number of entries in @scatterlist
949 * @direction: DMA direction
950 * @flags: DMAEngine flags
951 * @context: transaction context (ignored)
952 *
953 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
954 * DMA_SLAVE API, this gets the device-specific information from the
955 * chan->private variable.
956 */
957static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
958 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
959 enum dma_transfer_direction direction, unsigned long flags,
960 void *context)
961{ 945{
962 /*
963 * This operation is not supported on the Freescale DMA controller
964 *
965 * However, we need to provide the function pointer to allow the
966 * device_control() method to work.
967 */
968 return NULL;
969}
970
971static int fsl_dma_device_control(struct dma_chan *dchan,
972 enum dma_ctrl_cmd cmd, unsigned long arg)
973{
974 struct dma_slave_config *config;
975 struct fsldma_chan *chan; 946 struct fsldma_chan *chan;
976 int size;
977 947
978 if (!dchan) 948 if (!dchan)
979 return -EINVAL; 949 return -EINVAL;
980 950
981 chan = to_fsl_chan(dchan); 951 chan = to_fsl_chan(dchan);
982 952
983 switch (cmd) { 953 spin_lock_bh(&chan->desc_lock);
984 case DMA_TERMINATE_ALL:
985 spin_lock_bh(&chan->desc_lock);
986
987 /* Halt the DMA engine */
988 dma_halt(chan);
989 954
990 /* Remove and free all of the descriptors in the LD queue */ 955 /* Halt the DMA engine */
991 fsldma_free_desc_list(chan, &chan->ld_pending); 956 dma_halt(chan);
992 fsldma_free_desc_list(chan, &chan->ld_running);
993 fsldma_free_desc_list(chan, &chan->ld_completed);
994 chan->idle = true;
995 957
996 spin_unlock_bh(&chan->desc_lock); 958 /* Remove and free all of the descriptors in the LD queue */
997 return 0; 959 fsldma_free_desc_list(chan, &chan->ld_pending);
960 fsldma_free_desc_list(chan, &chan->ld_running);
961 fsldma_free_desc_list(chan, &chan->ld_completed);
962 chan->idle = true;
998 963
999 case DMA_SLAVE_CONFIG: 964 spin_unlock_bh(&chan->desc_lock);
1000 config = (struct dma_slave_config *)arg; 965 return 0;
966}
1001 967
1002 /* make sure the channel supports setting burst size */ 968static int fsl_dma_device_config(struct dma_chan *dchan,
1003 if (!chan->set_request_count) 969 struct dma_slave_config *config)
1004 return -ENXIO; 970{
971 struct fsldma_chan *chan;
972 int size;
1005 973
1006 /* we set the controller burst size depending on direction */ 974 if (!dchan)
1007 if (config->direction == DMA_MEM_TO_DEV) 975 return -EINVAL;
1008 size = config->dst_addr_width * config->dst_maxburst;
1009 else
1010 size = config->src_addr_width * config->src_maxburst;
1011 976
1012 chan->set_request_count(chan, size); 977 chan = to_fsl_chan(dchan);
1013 return 0;
1014 978
1015 default: 979 /* make sure the channel supports setting burst size */
980 if (!chan->set_request_count)
1016 return -ENXIO; 981 return -ENXIO;
1017 }
1018 982
983 /* we set the controller burst size depending on direction */
984 if (config->direction == DMA_MEM_TO_DEV)
985 size = config->dst_addr_width * config->dst_maxburst;
986 else
987 size = config->src_addr_width * config->src_maxburst;
988
989 chan->set_request_count(chan, size);
1019 return 0; 990 return 0;
1020} 991}
1021 992
993
1022/** 994/**
1023 * fsl_dma_memcpy_issue_pending - Issue the DMA start command 995 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
1024 * @chan : Freescale DMA channel 996 * @chan : Freescale DMA channel
@@ -1395,10 +1367,15 @@ static int fsldma_of_probe(struct platform_device *op)
1395 fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; 1367 fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
1396 fdev->common.device_tx_status = fsl_tx_status; 1368 fdev->common.device_tx_status = fsl_tx_status;
1397 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 1369 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1398 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; 1370 fdev->common.device_config = fsl_dma_device_config;
1399 fdev->common.device_control = fsl_dma_device_control; 1371 fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
1400 fdev->common.dev = &op->dev; 1372 fdev->common.dev = &op->dev;
1401 1373
1374 fdev->common.src_addr_widths = FSL_DMA_BUSWIDTHS;
1375 fdev->common.dst_addr_widths = FSL_DMA_BUSWIDTHS;
1376 fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1377 fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1378
1402 dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); 1379 dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
1403 1380
1404 platform_set_drvdata(op, fdev); 1381 platform_set_drvdata(op, fdev);
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 239c20c84382..31bffccdcc75 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -83,6 +83,10 @@
83#define FSL_DMA_DGSR_EOSI 0x02 83#define FSL_DMA_DGSR_EOSI 0x02
84#define FSL_DMA_DGSR_EOLSI 0x01 84#define FSL_DMA_DGSR_EOLSI 0x01
85 85
86#define FSL_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
87 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
88 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
89 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
86typedef u64 __bitwise v64; 90typedef u64 __bitwise v64;
87typedef u32 __bitwise v32; 91typedef u32 __bitwise v32;
88 92
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
new file mode 100644
index 000000000000..ed045a9ad634
--- /dev/null
+++ b/drivers/dma/img-mdc-dma.c
@@ -0,0 +1,1011 @@
1/*
2 * IMG Multi-threaded DMA Controller (MDC)
3 *
4 * Copyright (C) 2009,2012,2013 Imagination Technologies Ltd.
5 * Copyright (C) 2014 Google, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 */
11
12#include <linux/clk.h>
13#include <linux/dma-mapping.h>
14#include <linux/dmaengine.h>
15#include <linux/dmapool.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/irq.h>
19#include <linux/kernel.h>
20#include <linux/mfd/syscon.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/of_dma.h>
25#include <linux/platform_device.h>
26#include <linux/regmap.h>
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29
30#include "dmaengine.h"
31#include "virt-dma.h"
32
33#define MDC_MAX_DMA_CHANNELS 32
34
35#define MDC_GENERAL_CONFIG 0x000
36#define MDC_GENERAL_CONFIG_LIST_IEN BIT(31)
37#define MDC_GENERAL_CONFIG_IEN BIT(29)
38#define MDC_GENERAL_CONFIG_LEVEL_INT BIT(28)
39#define MDC_GENERAL_CONFIG_INC_W BIT(12)
40#define MDC_GENERAL_CONFIG_INC_R BIT(8)
41#define MDC_GENERAL_CONFIG_PHYSICAL_W BIT(7)
42#define MDC_GENERAL_CONFIG_WIDTH_W_SHIFT 4
43#define MDC_GENERAL_CONFIG_WIDTH_W_MASK 0x7
44#define MDC_GENERAL_CONFIG_PHYSICAL_R BIT(3)
45#define MDC_GENERAL_CONFIG_WIDTH_R_SHIFT 0
46#define MDC_GENERAL_CONFIG_WIDTH_R_MASK 0x7
47
48#define MDC_READ_PORT_CONFIG 0x004
49#define MDC_READ_PORT_CONFIG_STHREAD_SHIFT 28
50#define MDC_READ_PORT_CONFIG_STHREAD_MASK 0xf
51#define MDC_READ_PORT_CONFIG_RTHREAD_SHIFT 24
52#define MDC_READ_PORT_CONFIG_RTHREAD_MASK 0xf
53#define MDC_READ_PORT_CONFIG_WTHREAD_SHIFT 16
54#define MDC_READ_PORT_CONFIG_WTHREAD_MASK 0xf
55#define MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT 4
56#define MDC_READ_PORT_CONFIG_BURST_SIZE_MASK 0xff
57#define MDC_READ_PORT_CONFIG_DREQ_ENABLE BIT(1)
58
59#define MDC_READ_ADDRESS 0x008
60
61#define MDC_WRITE_ADDRESS 0x00c
62
63#define MDC_TRANSFER_SIZE 0x010
64#define MDC_TRANSFER_SIZE_MASK 0xffffff
65
66#define MDC_LIST_NODE_ADDRESS 0x014
67
68#define MDC_CMDS_PROCESSED 0x018
69#define MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT 16
70#define MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK 0x3f
71#define MDC_CMDS_PROCESSED_INT_ACTIVE BIT(8)
72#define MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT 0
73#define MDC_CMDS_PROCESSED_CMDS_DONE_MASK 0x3f
74
75#define MDC_CONTROL_AND_STATUS 0x01c
76#define MDC_CONTROL_AND_STATUS_CANCEL BIT(20)
77#define MDC_CONTROL_AND_STATUS_LIST_EN BIT(4)
78#define MDC_CONTROL_AND_STATUS_EN BIT(0)
79
80#define MDC_ACTIVE_TRANSFER_SIZE 0x030
81
82#define MDC_GLOBAL_CONFIG_A 0x900
83#define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT 16
84#define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK 0xff
85#define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT 8
86#define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK 0xff
87#define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT 0
88#define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK 0xff
89
90struct mdc_hw_list_desc {
91 u32 gen_conf;
92 u32 readport_conf;
93 u32 read_addr;
94 u32 write_addr;
95 u32 xfer_size;
96 u32 node_addr;
97 u32 cmds_done;
98 u32 ctrl_status;
99 /*
100 * Not part of the list descriptor, but instead used by the CPU to
101 * traverse the list.
102 */
103 struct mdc_hw_list_desc *next_desc;
104};
105
106struct mdc_tx_desc {
107 struct mdc_chan *chan;
108 struct virt_dma_desc vd;
109 dma_addr_t list_phys;
110 struct mdc_hw_list_desc *list;
111 bool cyclic;
112 bool cmd_loaded;
113 unsigned int list_len;
114 unsigned int list_period_len;
115 size_t list_xfer_size;
116 unsigned int list_cmds_done;
117};
118
119struct mdc_chan {
120 struct mdc_dma *mdma;
121 struct virt_dma_chan vc;
122 struct dma_slave_config config;
123 struct mdc_tx_desc *desc;
124 int irq;
125 unsigned int periph;
126 unsigned int thread;
127 unsigned int chan_nr;
128};
129
130struct mdc_dma_soc_data {
131 void (*enable_chan)(struct mdc_chan *mchan);
132 void (*disable_chan)(struct mdc_chan *mchan);
133};
134
135struct mdc_dma {
136 struct dma_device dma_dev;
137 void __iomem *regs;
138 struct clk *clk;
139 struct dma_pool *desc_pool;
140 struct regmap *periph_regs;
141 spinlock_t lock;
142 unsigned int nr_threads;
143 unsigned int nr_channels;
144 unsigned int bus_width;
145 unsigned int max_burst_mult;
146 unsigned int max_xfer_size;
147 const struct mdc_dma_soc_data *soc;
148 struct mdc_chan channels[MDC_MAX_DMA_CHANNELS];
149};
150
151static inline u32 mdc_readl(struct mdc_dma *mdma, u32 reg)
152{
153 return readl(mdma->regs + reg);
154}
155
156static inline void mdc_writel(struct mdc_dma *mdma, u32 val, u32 reg)
157{
158 writel(val, mdma->regs + reg);
159}
160
161static inline u32 mdc_chan_readl(struct mdc_chan *mchan, u32 reg)
162{
163 return mdc_readl(mchan->mdma, mchan->chan_nr * 0x040 + reg);
164}
165
166static inline void mdc_chan_writel(struct mdc_chan *mchan, u32 val, u32 reg)
167{
168 mdc_writel(mchan->mdma, val, mchan->chan_nr * 0x040 + reg);
169}
170
171static inline struct mdc_chan *to_mdc_chan(struct dma_chan *c)
172{
173 return container_of(to_virt_chan(c), struct mdc_chan, vc);
174}
175
176static inline struct mdc_tx_desc *to_mdc_desc(struct dma_async_tx_descriptor *t)
177{
178 struct virt_dma_desc *vdesc = container_of(t, struct virt_dma_desc, tx);
179
180 return container_of(vdesc, struct mdc_tx_desc, vd);
181}
182
183static inline struct device *mdma2dev(struct mdc_dma *mdma)
184{
185 return mdma->dma_dev.dev;
186}
187
188static inline unsigned int to_mdc_width(unsigned int bytes)
189{
190 return ffs(bytes) - 1;
191}
192
193static inline void mdc_set_read_width(struct mdc_hw_list_desc *ldesc,
194 unsigned int bytes)
195{
196 ldesc->gen_conf |= to_mdc_width(bytes) <<
197 MDC_GENERAL_CONFIG_WIDTH_R_SHIFT;
198}
199
200static inline void mdc_set_write_width(struct mdc_hw_list_desc *ldesc,
201 unsigned int bytes)
202{
203 ldesc->gen_conf |= to_mdc_width(bytes) <<
204 MDC_GENERAL_CONFIG_WIDTH_W_SHIFT;
205}
206
207static void mdc_list_desc_config(struct mdc_chan *mchan,
208 struct mdc_hw_list_desc *ldesc,
209 enum dma_transfer_direction dir,
210 dma_addr_t src, dma_addr_t dst, size_t len)
211{
212 struct mdc_dma *mdma = mchan->mdma;
213 unsigned int max_burst, burst_size;
214
215 ldesc->gen_conf = MDC_GENERAL_CONFIG_IEN | MDC_GENERAL_CONFIG_LIST_IEN |
216 MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W |
217 MDC_GENERAL_CONFIG_PHYSICAL_R;
218 ldesc->readport_conf =
219 (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
220 (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
221 (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
222 ldesc->read_addr = src;
223 ldesc->write_addr = dst;
224 ldesc->xfer_size = len - 1;
225 ldesc->node_addr = 0;
226 ldesc->cmds_done = 0;
227 ldesc->ctrl_status = MDC_CONTROL_AND_STATUS_LIST_EN |
228 MDC_CONTROL_AND_STATUS_EN;
229 ldesc->next_desc = NULL;
230
231 if (IS_ALIGNED(dst, mdma->bus_width) &&
232 IS_ALIGNED(src, mdma->bus_width))
233 max_burst = mdma->bus_width * mdma->max_burst_mult;
234 else
235 max_burst = mdma->bus_width * (mdma->max_burst_mult - 1);
236
237 if (dir == DMA_MEM_TO_DEV) {
238 ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R;
239 ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE;
240 mdc_set_read_width(ldesc, mdma->bus_width);
241 mdc_set_write_width(ldesc, mchan->config.dst_addr_width);
242 burst_size = min(max_burst, mchan->config.dst_maxburst *
243 mchan->config.dst_addr_width);
244 } else if (dir == DMA_DEV_TO_MEM) {
245 ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_W;
246 ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE;
247 mdc_set_read_width(ldesc, mchan->config.src_addr_width);
248 mdc_set_write_width(ldesc, mdma->bus_width);
249 burst_size = min(max_burst, mchan->config.src_maxburst *
250 mchan->config.src_addr_width);
251 } else {
252 ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R |
253 MDC_GENERAL_CONFIG_INC_W;
254 mdc_set_read_width(ldesc, mdma->bus_width);
255 mdc_set_write_width(ldesc, mdma->bus_width);
256 burst_size = max_burst;
257 }
258 ldesc->readport_conf |= (burst_size - 1) <<
259 MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT;
260}
261
262static void mdc_list_desc_free(struct mdc_tx_desc *mdesc)
263{
264 struct mdc_dma *mdma = mdesc->chan->mdma;
265 struct mdc_hw_list_desc *curr, *next;
266 dma_addr_t curr_phys, next_phys;
267
268 curr = mdesc->list;
269 curr_phys = mdesc->list_phys;
270 while (curr) {
271 next = curr->next_desc;
272 next_phys = curr->node_addr;
273 dma_pool_free(mdma->desc_pool, curr, curr_phys);
274 curr = next;
275 curr_phys = next_phys;
276 }
277}
278
279static void mdc_desc_free(struct virt_dma_desc *vd)
280{
281 struct mdc_tx_desc *mdesc = to_mdc_desc(&vd->tx);
282
283 mdc_list_desc_free(mdesc);
284 kfree(mdesc);
285}
286
287static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
288 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len,
289 unsigned long flags)
290{
291 struct mdc_chan *mchan = to_mdc_chan(chan);
292 struct mdc_dma *mdma = mchan->mdma;
293 struct mdc_tx_desc *mdesc;
294 struct mdc_hw_list_desc *curr, *prev = NULL;
295 dma_addr_t curr_phys, prev_phys;
296
297 if (!len)
298 return NULL;
299
300 mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
301 if (!mdesc)
302 return NULL;
303 mdesc->chan = mchan;
304 mdesc->list_xfer_size = len;
305
306 while (len > 0) {
307 size_t xfer_size;
308
309 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, &curr_phys);
310 if (!curr)
311 goto free_desc;
312
313 if (prev) {
314 prev->node_addr = curr_phys;
315 prev->next_desc = curr;
316 } else {
317 mdesc->list_phys = curr_phys;
318 mdesc->list = curr;
319 }
320
321 xfer_size = min_t(size_t, mdma->max_xfer_size, len);
322
323 mdc_list_desc_config(mchan, curr, DMA_MEM_TO_MEM, src, dest,
324 xfer_size);
325
326 prev = curr;
327 prev_phys = curr_phys;
328
329 mdesc->list_len++;
330 src += xfer_size;
331 dest += xfer_size;
332 len -= xfer_size;
333 }
334
335 return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
336
337free_desc:
338 mdc_desc_free(&mdesc->vd);
339
340 return NULL;
341}
342
343static int mdc_check_slave_width(struct mdc_chan *mchan,
344 enum dma_transfer_direction dir)
345{
346 enum dma_slave_buswidth width;
347
348 if (dir == DMA_MEM_TO_DEV)
349 width = mchan->config.dst_addr_width;
350 else
351 width = mchan->config.src_addr_width;
352
353 switch (width) {
354 case DMA_SLAVE_BUSWIDTH_1_BYTE:
355 case DMA_SLAVE_BUSWIDTH_2_BYTES:
356 case DMA_SLAVE_BUSWIDTH_4_BYTES:
357 case DMA_SLAVE_BUSWIDTH_8_BYTES:
358 break;
359 default:
360 return -EINVAL;
361 }
362
363 if (width > mchan->mdma->bus_width)
364 return -EINVAL;
365
366 return 0;
367}
368
369static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
370 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
371 size_t period_len, enum dma_transfer_direction dir,
372 unsigned long flags)
373{
374 struct mdc_chan *mchan = to_mdc_chan(chan);
375 struct mdc_dma *mdma = mchan->mdma;
376 struct mdc_tx_desc *mdesc;
377 struct mdc_hw_list_desc *curr, *prev = NULL;
378 dma_addr_t curr_phys, prev_phys;
379
380 if (!buf_len && !period_len)
381 return NULL;
382
383 if (!is_slave_direction(dir))
384 return NULL;
385
386 if (mdc_check_slave_width(mchan, dir) < 0)
387 return NULL;
388
389 mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
390 if (!mdesc)
391 return NULL;
392 mdesc->chan = mchan;
393 mdesc->cyclic = true;
394 mdesc->list_xfer_size = buf_len;
395 mdesc->list_period_len = DIV_ROUND_UP(period_len,
396 mdma->max_xfer_size);
397
398 while (buf_len > 0) {
399 size_t remainder = min(period_len, buf_len);
400
401 while (remainder > 0) {
402 size_t xfer_size;
403
404 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
405 &curr_phys);
406 if (!curr)
407 goto free_desc;
408
409 if (!prev) {
410 mdesc->list_phys = curr_phys;
411 mdesc->list = curr;
412 } else {
413 prev->node_addr = curr_phys;
414 prev->next_desc = curr;
415 }
416
417 xfer_size = min_t(size_t, mdma->max_xfer_size,
418 remainder);
419
420 if (dir == DMA_MEM_TO_DEV) {
421 mdc_list_desc_config(mchan, curr, dir,
422 buf_addr,
423 mchan->config.dst_addr,
424 xfer_size);
425 } else {
426 mdc_list_desc_config(mchan, curr, dir,
427 mchan->config.src_addr,
428 buf_addr,
429 xfer_size);
430 }
431
432 prev = curr;
433 prev_phys = curr_phys;
434
435 mdesc->list_len++;
436 buf_addr += xfer_size;
437 buf_len -= xfer_size;
438 remainder -= xfer_size;
439 }
440 }
441 prev->node_addr = mdesc->list_phys;
442
443 return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
444
445free_desc:
446 mdc_desc_free(&mdesc->vd);
447
448 return NULL;
449}
450
451static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
452 struct dma_chan *chan, struct scatterlist *sgl,
453 unsigned int sg_len, enum dma_transfer_direction dir,
454 unsigned long flags, void *context)
455{
456 struct mdc_chan *mchan = to_mdc_chan(chan);
457 struct mdc_dma *mdma = mchan->mdma;
458 struct mdc_tx_desc *mdesc;
459 struct scatterlist *sg;
460 struct mdc_hw_list_desc *curr, *prev = NULL;
461 dma_addr_t curr_phys, prev_phys;
462 unsigned int i;
463
464 if (!sgl)
465 return NULL;
466
467 if (!is_slave_direction(dir))
468 return NULL;
469
470 if (mdc_check_slave_width(mchan, dir) < 0)
471 return NULL;
472
473 mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
474 if (!mdesc)
475 return NULL;
476 mdesc->chan = mchan;
477
478 for_each_sg(sgl, sg, sg_len, i) {
479 dma_addr_t buf = sg_dma_address(sg);
480 size_t buf_len = sg_dma_len(sg);
481
482 while (buf_len > 0) {
483 size_t xfer_size;
484
485 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
486 &curr_phys);
487 if (!curr)
488 goto free_desc;
489
490 if (!prev) {
491 mdesc->list_phys = curr_phys;
492 mdesc->list = curr;
493 } else {
494 prev->node_addr = curr_phys;
495 prev->next_desc = curr;
496 }
497
498 xfer_size = min_t(size_t, mdma->max_xfer_size,
499 buf_len);
500
501 if (dir == DMA_MEM_TO_DEV) {
502 mdc_list_desc_config(mchan, curr, dir, buf,
503 mchan->config.dst_addr,
504 xfer_size);
505 } else {
506 mdc_list_desc_config(mchan, curr, dir,
507 mchan->config.src_addr,
508 buf, xfer_size);
509 }
510
511 prev = curr;
512 prev_phys = curr_phys;
513
514 mdesc->list_len++;
515 mdesc->list_xfer_size += xfer_size;
516 buf += xfer_size;
517 buf_len -= xfer_size;
518 }
519 }
520
521 return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
522
523free_desc:
524 mdc_desc_free(&mdesc->vd);
525
526 return NULL;
527}
528
529static void mdc_issue_desc(struct mdc_chan *mchan)
530{
531 struct mdc_dma *mdma = mchan->mdma;
532 struct virt_dma_desc *vd;
533 struct mdc_tx_desc *mdesc;
534 u32 val;
535
536 vd = vchan_next_desc(&mchan->vc);
537 if (!vd)
538 return;
539
540 list_del(&vd->node);
541
542 mdesc = to_mdc_desc(&vd->tx);
543 mchan->desc = mdesc;
544
545 dev_dbg(mdma2dev(mdma), "Issuing descriptor on channel %d\n",
546 mchan->chan_nr);
547
548 mdma->soc->enable_chan(mchan);
549
550 val = mdc_chan_readl(mchan, MDC_GENERAL_CONFIG);
551 val |= MDC_GENERAL_CONFIG_LIST_IEN | MDC_GENERAL_CONFIG_IEN |
552 MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W |
553 MDC_GENERAL_CONFIG_PHYSICAL_R;
554 mdc_chan_writel(mchan, val, MDC_GENERAL_CONFIG);
555 val = (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
556 (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
557 (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
558 mdc_chan_writel(mchan, val, MDC_READ_PORT_CONFIG);
559 mdc_chan_writel(mchan, mdesc->list_phys, MDC_LIST_NODE_ADDRESS);
560 val = mdc_chan_readl(mchan, MDC_CONTROL_AND_STATUS);
561 val |= MDC_CONTROL_AND_STATUS_LIST_EN;
562 mdc_chan_writel(mchan, val, MDC_CONTROL_AND_STATUS);
563}
564
565static void mdc_issue_pending(struct dma_chan *chan)
566{
567 struct mdc_chan *mchan = to_mdc_chan(chan);
568 unsigned long flags;
569
570 spin_lock_irqsave(&mchan->vc.lock, flags);
571 if (vchan_issue_pending(&mchan->vc) && !mchan->desc)
572 mdc_issue_desc(mchan);
573 spin_unlock_irqrestore(&mchan->vc.lock, flags);
574}
575
576static enum dma_status mdc_tx_status(struct dma_chan *chan,
577 dma_cookie_t cookie, struct dma_tx_state *txstate)
578{
579 struct mdc_chan *mchan = to_mdc_chan(chan);
580 struct mdc_tx_desc *mdesc;
581 struct virt_dma_desc *vd;
582 unsigned long flags;
583 size_t bytes = 0;
584 int ret;
585
586 ret = dma_cookie_status(chan, cookie, txstate);
587 if (ret == DMA_COMPLETE)
588 return ret;
589
590 if (!txstate)
591 return ret;
592
593 spin_lock_irqsave(&mchan->vc.lock, flags);
594 vd = vchan_find_desc(&mchan->vc, cookie);
595 if (vd) {
596 mdesc = to_mdc_desc(&vd->tx);
597 bytes = mdesc->list_xfer_size;
598 } else if (mchan->desc && mchan->desc->vd.tx.cookie == cookie) {
599 struct mdc_hw_list_desc *ldesc;
600 u32 val1, val2, done, processed, residue;
601 int i, cmds;
602
603 mdesc = mchan->desc;
604
605 /*
606 * Determine the number of commands that haven't been
607 * processed (handled by the IRQ handler) yet.
608 */
609 do {
610 val1 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
611 ~MDC_CMDS_PROCESSED_INT_ACTIVE;
612 residue = mdc_chan_readl(mchan,
613 MDC_ACTIVE_TRANSFER_SIZE);
614 val2 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
615 ~MDC_CMDS_PROCESSED_INT_ACTIVE;
616 } while (val1 != val2);
617
618 done = (val1 >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
619 MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
620 processed = (val1 >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
621 MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
622 cmds = (done - processed) %
623 (MDC_CMDS_PROCESSED_CMDS_DONE_MASK + 1);
624
625 /*
626 * If the command loaded event hasn't been processed yet, then
627 * the difference above includes an extra command.
628 */
629 if (!mdesc->cmd_loaded)
630 cmds--;
631 else
632 cmds += mdesc->list_cmds_done;
633
634 bytes = mdesc->list_xfer_size;
635 ldesc = mdesc->list;
636 for (i = 0; i < cmds; i++) {
637 bytes -= ldesc->xfer_size + 1;
638 ldesc = ldesc->next_desc;
639 }
640 if (ldesc) {
641 if (residue != MDC_TRANSFER_SIZE_MASK)
642 bytes -= ldesc->xfer_size - residue;
643 else
644 bytes -= ldesc->xfer_size + 1;
645 }
646 }
647 spin_unlock_irqrestore(&mchan->vc.lock, flags);
648
649 dma_set_residue(txstate, bytes);
650
651 return ret;
652}
653
654static int mdc_terminate_all(struct dma_chan *chan)
655{
656 struct mdc_chan *mchan = to_mdc_chan(chan);
657 struct mdc_tx_desc *mdesc;
658 unsigned long flags;
659 LIST_HEAD(head);
660
661 spin_lock_irqsave(&mchan->vc.lock, flags);
662
663 mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL,
664 MDC_CONTROL_AND_STATUS);
665
666 mdesc = mchan->desc;
667 mchan->desc = NULL;
668 vchan_get_all_descriptors(&mchan->vc, &head);
669
670 spin_unlock_irqrestore(&mchan->vc.lock, flags);
671
672 if (mdesc)
673 mdc_desc_free(&mdesc->vd);
674 vchan_dma_desc_free_list(&mchan->vc, &head);
675
676 return 0;
677}
678
679static int mdc_slave_config(struct dma_chan *chan,
680 struct dma_slave_config *config)
681{
682 struct mdc_chan *mchan = to_mdc_chan(chan);
683 unsigned long flags;
684
685 spin_lock_irqsave(&mchan->vc.lock, flags);
686 mchan->config = *config;
687 spin_unlock_irqrestore(&mchan->vc.lock, flags);
688
689 return 0;
690}
691
692static int mdc_alloc_chan_resources(struct dma_chan *chan)
693{
694 return 0;
695}
696
697static void mdc_free_chan_resources(struct dma_chan *chan)
698{
699 struct mdc_chan *mchan = to_mdc_chan(chan);
700 struct mdc_dma *mdma = mchan->mdma;
701
702 mdc_terminate_all(chan);
703
704 mdma->soc->disable_chan(mchan);
705}
706
707static irqreturn_t mdc_chan_irq(int irq, void *dev_id)
708{
709 struct mdc_chan *mchan = (struct mdc_chan *)dev_id;
710 struct mdc_tx_desc *mdesc;
711 u32 val, processed, done1, done2;
712 unsigned int i;
713
714 spin_lock(&mchan->vc.lock);
715
716 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
717 processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
718 MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
719 /*
720 * CMDS_DONE may have incremented between reading CMDS_PROCESSED
721 * and clearing INT_ACTIVE. Re-read CMDS_PROCESSED to ensure we
722 * didn't miss a command completion.
723 */
724 do {
725 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
726 done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
727 MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
728 val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK <<
729 MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) |
730 MDC_CMDS_PROCESSED_INT_ACTIVE);
731 val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT;
732 mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED);
733 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
734 done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
735 MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
736 } while (done1 != done2);
737
738 dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr);
739
740 mdesc = mchan->desc;
741 if (!mdesc) {
742 dev_warn(mdma2dev(mchan->mdma),
743 "IRQ with no active descriptor on channel %d\n",
744 mchan->chan_nr);
745 goto out;
746 }
747
748 for (i = processed; i != done1;
749 i = (i + 1) % (MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1)) {
750 /*
751 * The first interrupt in a transfer indicates that the
752 * command list has been loaded, not that a command has
753 * been completed.
754 */
755 if (!mdesc->cmd_loaded) {
756 mdesc->cmd_loaded = true;
757 continue;
758 }
759
760 mdesc->list_cmds_done++;
761 if (mdesc->cyclic) {
762 mdesc->list_cmds_done %= mdesc->list_len;
763 if (mdesc->list_cmds_done % mdesc->list_period_len == 0)
764 vchan_cyclic_callback(&mdesc->vd);
765 } else if (mdesc->list_cmds_done == mdesc->list_len) {
766 mchan->desc = NULL;
767 vchan_cookie_complete(&mdesc->vd);
768 mdc_issue_desc(mchan);
769 break;
770 }
771 }
772out:
773 spin_unlock(&mchan->vc.lock);
774
775 return IRQ_HANDLED;
776}
777
778static struct dma_chan *mdc_of_xlate(struct of_phandle_args *dma_spec,
779 struct of_dma *ofdma)
780{
781 struct mdc_dma *mdma = ofdma->of_dma_data;
782 struct dma_chan *chan;
783
784 if (dma_spec->args_count != 3)
785 return NULL;
786
787 list_for_each_entry(chan, &mdma->dma_dev.channels, device_node) {
788 struct mdc_chan *mchan = to_mdc_chan(chan);
789
790 if (!(dma_spec->args[1] & BIT(mchan->chan_nr)))
791 continue;
792 if (dma_get_slave_channel(chan)) {
793 mchan->periph = dma_spec->args[0];
794 mchan->thread = dma_spec->args[2];
795 return chan;
796 }
797 }
798
799 return NULL;
800}
801
802#define PISTACHIO_CR_PERIPH_DMA_ROUTE(ch) (0x120 + 0x4 * ((ch) / 4))
803#define PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(ch) (8 * ((ch) % 4))
804#define PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK 0x3f
805
806static void pistachio_mdc_enable_chan(struct mdc_chan *mchan)
807{
808 struct mdc_dma *mdma = mchan->mdma;
809
810 regmap_update_bits(mdma->periph_regs,
811 PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
812 PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK <<
813 PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
814 mchan->periph <<
815 PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr));
816}
817
818static void pistachio_mdc_disable_chan(struct mdc_chan *mchan)
819{
820 struct mdc_dma *mdma = mchan->mdma;
821
822 regmap_update_bits(mdma->periph_regs,
823 PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
824 PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK <<
825 PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
826 0);
827}
828
829static const struct mdc_dma_soc_data pistachio_mdc_data = {
830 .enable_chan = pistachio_mdc_enable_chan,
831 .disable_chan = pistachio_mdc_disable_chan,
832};
833
834static const struct of_device_id mdc_dma_of_match[] = {
835 { .compatible = "img,pistachio-mdc-dma", .data = &pistachio_mdc_data, },
836 { },
837};
838MODULE_DEVICE_TABLE(of, mdc_dma_of_match);
839
840static int mdc_dma_probe(struct platform_device *pdev)
841{
842 struct mdc_dma *mdma;
843 struct resource *res;
844 const struct of_device_id *match;
845 unsigned int i;
846 u32 val;
847 int ret;
848
849 mdma = devm_kzalloc(&pdev->dev, sizeof(*mdma), GFP_KERNEL);
850 if (!mdma)
851 return -ENOMEM;
852 platform_set_drvdata(pdev, mdma);
853
854 match = of_match_device(mdc_dma_of_match, &pdev->dev);
855 mdma->soc = match->data;
856
857 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
858 mdma->regs = devm_ioremap_resource(&pdev->dev, res);
859 if (IS_ERR(mdma->regs))
860 return PTR_ERR(mdma->regs);
861
862 mdma->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
863 "img,cr-periph");
864 if (IS_ERR(mdma->periph_regs))
865 return PTR_ERR(mdma->periph_regs);
866
867 mdma->clk = devm_clk_get(&pdev->dev, "sys");
868 if (IS_ERR(mdma->clk))
869 return PTR_ERR(mdma->clk);
870
871 ret = clk_prepare_enable(mdma->clk);
872 if (ret)
873 return ret;
874
875 dma_cap_zero(mdma->dma_dev.cap_mask);
876 dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask);
877 dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask);
878 dma_cap_set(DMA_CYCLIC, mdma->dma_dev.cap_mask);
879 dma_cap_set(DMA_MEMCPY, mdma->dma_dev.cap_mask);
880
881 val = mdc_readl(mdma, MDC_GLOBAL_CONFIG_A);
882 mdma->nr_channels = (val >> MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT) &
883 MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK;
884 mdma->nr_threads =
885 1 << ((val >> MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT) &
886 MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK);
887 mdma->bus_width =
888 (1 << ((val >> MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT) &
889 MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK)) / 8;
890 /*
891 * Although transfer sizes of up to MDC_TRANSFER_SIZE_MASK + 1 bytes
892 * are supported, this makes it possible for the value reported in
893 * MDC_ACTIVE_TRANSFER_SIZE to be ambiguous - an active transfer size
894 * of MDC_TRANSFER_SIZE_MASK may indicate either that 0 bytes or
895 * MDC_TRANSFER_SIZE_MASK + 1 bytes are remaining. To eliminate this
896 * ambiguity, restrict transfer sizes to one bus-width less than the
897 * actual maximum.
898 */
899 mdma->max_xfer_size = MDC_TRANSFER_SIZE_MASK + 1 - mdma->bus_width;
900
901 of_property_read_u32(pdev->dev.of_node, "dma-channels",
902 &mdma->nr_channels);
903 ret = of_property_read_u32(pdev->dev.of_node,
904 "img,max-burst-multiplier",
905 &mdma->max_burst_mult);
906 if (ret)
907 goto disable_clk;
908
909 mdma->dma_dev.dev = &pdev->dev;
910 mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg;
911 mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic;
912 mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy;
913 mdma->dma_dev.device_alloc_chan_resources = mdc_alloc_chan_resources;
914 mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources;
915 mdma->dma_dev.device_tx_status = mdc_tx_status;
916 mdma->dma_dev.device_issue_pending = mdc_issue_pending;
917 mdma->dma_dev.device_terminate_all = mdc_terminate_all;
918 mdma->dma_dev.device_config = mdc_slave_config;
919
920 mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
921 mdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
922 for (i = 1; i <= mdma->bus_width; i <<= 1) {
923 mdma->dma_dev.src_addr_widths |= BIT(i);
924 mdma->dma_dev.dst_addr_widths |= BIT(i);
925 }
926
927 INIT_LIST_HEAD(&mdma->dma_dev.channels);
928 for (i = 0; i < mdma->nr_channels; i++) {
929 struct mdc_chan *mchan = &mdma->channels[i];
930
931 mchan->mdma = mdma;
932 mchan->chan_nr = i;
933 mchan->irq = platform_get_irq(pdev, i);
934 if (mchan->irq < 0) {
935 ret = mchan->irq;
936 goto disable_clk;
937 }
938 ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq,
939 IRQ_TYPE_LEVEL_HIGH,
940 dev_name(&pdev->dev), mchan);
941 if (ret < 0)
942 goto disable_clk;
943
944 mchan->vc.desc_free = mdc_desc_free;
945 vchan_init(&mchan->vc, &mdma->dma_dev);
946 }
947
948 mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
949 sizeof(struct mdc_hw_list_desc),
950 4, 0);
951 if (!mdma->desc_pool) {
952 ret = -ENOMEM;
953 goto disable_clk;
954 }
955
956 ret = dma_async_device_register(&mdma->dma_dev);
957 if (ret)
958 goto disable_clk;
959
960 ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma);
961 if (ret)
962 goto unregister;
963
964 dev_info(&pdev->dev, "MDC with %u channels and %u threads\n",
965 mdma->nr_channels, mdma->nr_threads);
966
967 return 0;
968
969unregister:
970 dma_async_device_unregister(&mdma->dma_dev);
971disable_clk:
972 clk_disable_unprepare(mdma->clk);
973 return ret;
974}
975
976static int mdc_dma_remove(struct platform_device *pdev)
977{
978 struct mdc_dma *mdma = platform_get_drvdata(pdev);
979 struct mdc_chan *mchan, *next;
980
981 of_dma_controller_free(pdev->dev.of_node);
982 dma_async_device_unregister(&mdma->dma_dev);
983
984 list_for_each_entry_safe(mchan, next, &mdma->dma_dev.channels,
985 vc.chan.device_node) {
986 list_del(&mchan->vc.chan.device_node);
987
988 synchronize_irq(mchan->irq);
989 devm_free_irq(&pdev->dev, mchan->irq, mchan);
990
991 tasklet_kill(&mchan->vc.task);
992 }
993
994 clk_disable_unprepare(mdma->clk);
995
996 return 0;
997}
998
999static struct platform_driver mdc_dma_driver = {
1000 .driver = {
1001 .name = "img-mdc-dma",
1002 .of_match_table = of_match_ptr(mdc_dma_of_match),
1003 },
1004 .probe = mdc_dma_probe,
1005 .remove = mdc_dma_remove,
1006};
1007module_platform_driver(mdc_dma_driver);
1008
1009MODULE_DESCRIPTION("IMG Multi-threaded DMA Controller (MDC) driver");
1010MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
1011MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 10bbc0a675b0..eed405976ea9 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -230,11 +230,6 @@ static inline int is_imx1_dma(struct imxdma_engine *imxdma)
230 return imxdma->devtype == IMX1_DMA; 230 return imxdma->devtype == IMX1_DMA;
231} 231}
232 232
233static inline int is_imx21_dma(struct imxdma_engine *imxdma)
234{
235 return imxdma->devtype == IMX21_DMA;
236}
237
238static inline int is_imx27_dma(struct imxdma_engine *imxdma) 233static inline int is_imx27_dma(struct imxdma_engine *imxdma)
239{ 234{
240 return imxdma->devtype == IMX27_DMA; 235 return imxdma->devtype == IMX27_DMA;
@@ -669,69 +664,67 @@ out:
669 664
670} 665}
671 666
672static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 667static int imxdma_terminate_all(struct dma_chan *chan)
673 unsigned long arg)
674{ 668{
675 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 669 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
676 struct dma_slave_config *dmaengine_cfg = (void *)arg;
677 struct imxdma_engine *imxdma = imxdmac->imxdma; 670 struct imxdma_engine *imxdma = imxdmac->imxdma;
678 unsigned long flags; 671 unsigned long flags;
679 unsigned int mode = 0;
680
681 switch (cmd) {
682 case DMA_TERMINATE_ALL:
683 imxdma_disable_hw(imxdmac);
684
685 spin_lock_irqsave(&imxdma->lock, flags);
686 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
687 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
688 spin_unlock_irqrestore(&imxdma->lock, flags);
689 return 0;
690 case DMA_SLAVE_CONFIG:
691 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
692 imxdmac->per_address = dmaengine_cfg->src_addr;
693 imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
694 imxdmac->word_size = dmaengine_cfg->src_addr_width;
695 } else {
696 imxdmac->per_address = dmaengine_cfg->dst_addr;
697 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
698 imxdmac->word_size = dmaengine_cfg->dst_addr_width;
699 }
700 672
701 switch (imxdmac->word_size) { 673 imxdma_disable_hw(imxdmac);
702 case DMA_SLAVE_BUSWIDTH_1_BYTE:
703 mode = IMX_DMA_MEMSIZE_8;
704 break;
705 case DMA_SLAVE_BUSWIDTH_2_BYTES:
706 mode = IMX_DMA_MEMSIZE_16;
707 break;
708 default:
709 case DMA_SLAVE_BUSWIDTH_4_BYTES:
710 mode = IMX_DMA_MEMSIZE_32;
711 break;
712 }
713 674
714 imxdmac->hw_chaining = 0; 675 spin_lock_irqsave(&imxdma->lock, flags);
676 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
677 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
678 spin_unlock_irqrestore(&imxdma->lock, flags);
679 return 0;
680}
715 681
716 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) | 682static int imxdma_config(struct dma_chan *chan,
717 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | 683 struct dma_slave_config *dmaengine_cfg)
718 CCR_REN; 684{
719 imxdmac->ccr_to_device = 685 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
720 (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | 686 struct imxdma_engine *imxdma = imxdmac->imxdma;
721 ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; 687 unsigned int mode = 0;
722 imx_dmav1_writel(imxdma, imxdmac->dma_request,
723 DMA_RSSR(imxdmac->channel));
724 688
725 /* Set burst length */ 689 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
726 imx_dmav1_writel(imxdma, imxdmac->watermark_level * 690 imxdmac->per_address = dmaengine_cfg->src_addr;
727 imxdmac->word_size, DMA_BLR(imxdmac->channel)); 691 imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
692 imxdmac->word_size = dmaengine_cfg->src_addr_width;
693 } else {
694 imxdmac->per_address = dmaengine_cfg->dst_addr;
695 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
696 imxdmac->word_size = dmaengine_cfg->dst_addr_width;
697 }
728 698
729 return 0; 699 switch (imxdmac->word_size) {
700 case DMA_SLAVE_BUSWIDTH_1_BYTE:
701 mode = IMX_DMA_MEMSIZE_8;
702 break;
703 case DMA_SLAVE_BUSWIDTH_2_BYTES:
704 mode = IMX_DMA_MEMSIZE_16;
705 break;
730 default: 706 default:
731 return -ENOSYS; 707 case DMA_SLAVE_BUSWIDTH_4_BYTES:
708 mode = IMX_DMA_MEMSIZE_32;
709 break;
732 } 710 }
733 711
734 return -EINVAL; 712 imxdmac->hw_chaining = 0;
713
714 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
715 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
716 CCR_REN;
717 imxdmac->ccr_to_device =
718 (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
719 ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
720 imx_dmav1_writel(imxdma, imxdmac->dma_request,
721 DMA_RSSR(imxdmac->channel));
722
723 /* Set burst length */
724 imx_dmav1_writel(imxdma, imxdmac->watermark_level *
725 imxdmac->word_size, DMA_BLR(imxdmac->channel));
726
727 return 0;
735} 728}
736 729
737static enum dma_status imxdma_tx_status(struct dma_chan *chan, 730static enum dma_status imxdma_tx_status(struct dma_chan *chan,
@@ -1184,7 +1177,8 @@ static int __init imxdma_probe(struct platform_device *pdev)
1184 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; 1177 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1185 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy; 1178 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1186 imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved; 1179 imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1187 imxdma->dma_device.device_control = imxdma_control; 1180 imxdma->dma_device.device_config = imxdma_config;
1181 imxdma->dma_device.device_terminate_all = imxdma_terminate_all;
1188 imxdma->dma_device.device_issue_pending = imxdma_issue_pending; 1182 imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
1189 1183
1190 platform_set_drvdata(pdev, imxdma); 1184 platform_set_drvdata(pdev, imxdma);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index d0df198f62e9..18c0a131e4e4 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -830,20 +830,29 @@ static int sdma_load_context(struct sdma_channel *sdmac)
830 return ret; 830 return ret;
831} 831}
832 832
833static void sdma_disable_channel(struct sdma_channel *sdmac) 833static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
834{
835 return container_of(chan, struct sdma_channel, chan);
836}
837
838static int sdma_disable_channel(struct dma_chan *chan)
834{ 839{
840 struct sdma_channel *sdmac = to_sdma_chan(chan);
835 struct sdma_engine *sdma = sdmac->sdma; 841 struct sdma_engine *sdma = sdmac->sdma;
836 int channel = sdmac->channel; 842 int channel = sdmac->channel;
837 843
838 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP); 844 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
839 sdmac->status = DMA_ERROR; 845 sdmac->status = DMA_ERROR;
846
847 return 0;
840} 848}
841 849
842static int sdma_config_channel(struct sdma_channel *sdmac) 850static int sdma_config_channel(struct dma_chan *chan)
843{ 851{
852 struct sdma_channel *sdmac = to_sdma_chan(chan);
844 int ret; 853 int ret;
845 854
846 sdma_disable_channel(sdmac); 855 sdma_disable_channel(chan);
847 856
848 sdmac->event_mask[0] = 0; 857 sdmac->event_mask[0] = 0;
849 sdmac->event_mask[1] = 0; 858 sdmac->event_mask[1] = 0;
@@ -935,11 +944,6 @@ out:
935 return ret; 944 return ret;
936} 945}
937 946
938static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
939{
940 return container_of(chan, struct sdma_channel, chan);
941}
942
943static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) 947static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
944{ 948{
945 unsigned long flags; 949 unsigned long flags;
@@ -1004,7 +1008,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
1004 struct sdma_channel *sdmac = to_sdma_chan(chan); 1008 struct sdma_channel *sdmac = to_sdma_chan(chan);
1005 struct sdma_engine *sdma = sdmac->sdma; 1009 struct sdma_engine *sdma = sdmac->sdma;
1006 1010
1007 sdma_disable_channel(sdmac); 1011 sdma_disable_channel(chan);
1008 1012
1009 if (sdmac->event_id0) 1013 if (sdmac->event_id0)
1010 sdma_event_disable(sdmac, sdmac->event_id0); 1014 sdma_event_disable(sdmac, sdmac->event_id0);
@@ -1203,35 +1207,24 @@ err_out:
1203 return NULL; 1207 return NULL;
1204} 1208}
1205 1209
1206static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1210static int sdma_config(struct dma_chan *chan,
1207 unsigned long arg) 1211 struct dma_slave_config *dmaengine_cfg)
1208{ 1212{
1209 struct sdma_channel *sdmac = to_sdma_chan(chan); 1213 struct sdma_channel *sdmac = to_sdma_chan(chan);
1210 struct dma_slave_config *dmaengine_cfg = (void *)arg;
1211
1212 switch (cmd) {
1213 case DMA_TERMINATE_ALL:
1214 sdma_disable_channel(sdmac);
1215 return 0;
1216 case DMA_SLAVE_CONFIG:
1217 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
1218 sdmac->per_address = dmaengine_cfg->src_addr;
1219 sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1220 dmaengine_cfg->src_addr_width;
1221 sdmac->word_size = dmaengine_cfg->src_addr_width;
1222 } else {
1223 sdmac->per_address = dmaengine_cfg->dst_addr;
1224 sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1225 dmaengine_cfg->dst_addr_width;
1226 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1227 }
1228 sdmac->direction = dmaengine_cfg->direction;
1229 return sdma_config_channel(sdmac);
1230 default:
1231 return -ENOSYS;
1232 }
1233 1214
1234 return -EINVAL; 1215 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
1216 sdmac->per_address = dmaengine_cfg->src_addr;
1217 sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1218 dmaengine_cfg->src_addr_width;
1219 sdmac->word_size = dmaengine_cfg->src_addr_width;
1220 } else {
1221 sdmac->per_address = dmaengine_cfg->dst_addr;
1222 sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1223 dmaengine_cfg->dst_addr_width;
1224 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1225 }
1226 sdmac->direction = dmaengine_cfg->direction;
1227 return sdma_config_channel(chan);
1235} 1228}
1236 1229
1237static enum dma_status sdma_tx_status(struct dma_chan *chan, 1230static enum dma_status sdma_tx_status(struct dma_chan *chan,
@@ -1303,15 +1296,15 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
1303 if (header->ram_code_start + header->ram_code_size > fw->size) 1296 if (header->ram_code_start + header->ram_code_size > fw->size)
1304 goto err_firmware; 1297 goto err_firmware;
1305 switch (header->version_major) { 1298 switch (header->version_major) {
1306 case 1: 1299 case 1:
1307 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; 1300 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1308 break; 1301 break;
1309 case 2: 1302 case 2:
1310 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2; 1303 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
1311 break; 1304 break;
1312 default: 1305 default:
1313 dev_err(sdma->dev, "unknown firmware version\n"); 1306 dev_err(sdma->dev, "unknown firmware version\n");
1314 goto err_firmware; 1307 goto err_firmware;
1315 } 1308 }
1316 1309
1317 addr = (void *)header + header->script_addrs_start; 1310 addr = (void *)header + header->script_addrs_start;
@@ -1479,7 +1472,7 @@ static int sdma_probe(struct platform_device *pdev)
1479 if (ret) 1472 if (ret)
1480 return ret; 1473 return ret;
1481 1474
1482 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); 1475 sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL);
1483 if (!sdma) 1476 if (!sdma)
1484 return -ENOMEM; 1477 return -ENOMEM;
1485 1478
@@ -1488,48 +1481,34 @@ static int sdma_probe(struct platform_device *pdev)
1488 sdma->dev = &pdev->dev; 1481 sdma->dev = &pdev->dev;
1489 sdma->drvdata = drvdata; 1482 sdma->drvdata = drvdata;
1490 1483
1491 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1492 irq = platform_get_irq(pdev, 0); 1484 irq = platform_get_irq(pdev, 0);
1493 if (!iores || irq < 0) { 1485 if (irq < 0)
1494 ret = -EINVAL; 1486 return irq;
1495 goto err_irq;
1496 }
1497 1487
1498 if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) { 1488 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1499 ret = -EBUSY; 1489 sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
1500 goto err_request_region; 1490 if (IS_ERR(sdma->regs))
1501 } 1491 return PTR_ERR(sdma->regs);
1502 1492
1503 sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 1493 sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1504 if (IS_ERR(sdma->clk_ipg)) { 1494 if (IS_ERR(sdma->clk_ipg))
1505 ret = PTR_ERR(sdma->clk_ipg); 1495 return PTR_ERR(sdma->clk_ipg);
1506 goto err_clk;
1507 }
1508 1496
1509 sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 1497 sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1510 if (IS_ERR(sdma->clk_ahb)) { 1498 if (IS_ERR(sdma->clk_ahb))
1511 ret = PTR_ERR(sdma->clk_ahb); 1499 return PTR_ERR(sdma->clk_ahb);
1512 goto err_clk;
1513 }
1514 1500
1515 clk_prepare(sdma->clk_ipg); 1501 clk_prepare(sdma->clk_ipg);
1516 clk_prepare(sdma->clk_ahb); 1502 clk_prepare(sdma->clk_ahb);
1517 1503
1518 sdma->regs = ioremap(iores->start, resource_size(iores)); 1504 ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
1519 if (!sdma->regs) { 1505 sdma);
1520 ret = -ENOMEM;
1521 goto err_ioremap;
1522 }
1523
1524 ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma);
1525 if (ret) 1506 if (ret)
1526 goto err_request_irq; 1507 return ret;
1527 1508
1528 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); 1509 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
1529 if (!sdma->script_addrs) { 1510 if (!sdma->script_addrs)
1530 ret = -ENOMEM; 1511 return -ENOMEM;
1531 goto err_alloc;
1532 }
1533 1512
1534 /* initially no scripts available */ 1513 /* initially no scripts available */
1535 saddr_arr = (s32 *)sdma->script_addrs; 1514 saddr_arr = (s32 *)sdma->script_addrs;
@@ -1600,7 +1579,12 @@ static int sdma_probe(struct platform_device *pdev)
1600 sdma->dma_device.device_tx_status = sdma_tx_status; 1579 sdma->dma_device.device_tx_status = sdma_tx_status;
1601 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; 1580 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
1602 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; 1581 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1603 sdma->dma_device.device_control = sdma_control; 1582 sdma->dma_device.device_config = sdma_config;
1583 sdma->dma_device.device_terminate_all = sdma_disable_channel;
1584 sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1585 sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1586 sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1587 sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1604 sdma->dma_device.device_issue_pending = sdma_issue_pending; 1588 sdma->dma_device.device_issue_pending = sdma_issue_pending;
1605 sdma->dma_device.dev->dma_parms = &sdma->dma_parms; 1589 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1606 dma_set_max_seg_size(sdma->dma_device.dev, 65535); 1590 dma_set_max_seg_size(sdma->dma_device.dev, 65535);
@@ -1629,38 +1613,22 @@ err_register:
1629 dma_async_device_unregister(&sdma->dma_device); 1613 dma_async_device_unregister(&sdma->dma_device);
1630err_init: 1614err_init:
1631 kfree(sdma->script_addrs); 1615 kfree(sdma->script_addrs);
1632err_alloc:
1633 free_irq(irq, sdma);
1634err_request_irq:
1635 iounmap(sdma->regs);
1636err_ioremap:
1637err_clk:
1638 release_mem_region(iores->start, resource_size(iores));
1639err_request_region:
1640err_irq:
1641 kfree(sdma);
1642 return ret; 1616 return ret;
1643} 1617}
1644 1618
1645static int sdma_remove(struct platform_device *pdev) 1619static int sdma_remove(struct platform_device *pdev)
1646{ 1620{
1647 struct sdma_engine *sdma = platform_get_drvdata(pdev); 1621 struct sdma_engine *sdma = platform_get_drvdata(pdev);
1648 struct resource *iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1649 int irq = platform_get_irq(pdev, 0);
1650 int i; 1622 int i;
1651 1623
1652 dma_async_device_unregister(&sdma->dma_device); 1624 dma_async_device_unregister(&sdma->dma_device);
1653 kfree(sdma->script_addrs); 1625 kfree(sdma->script_addrs);
1654 free_irq(irq, sdma);
1655 iounmap(sdma->regs);
1656 release_mem_region(iores->start, resource_size(iores));
1657 /* Kill the tasklet */ 1626 /* Kill the tasklet */
1658 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 1627 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1659 struct sdma_channel *sdmac = &sdma->channel[i]; 1628 struct sdma_channel *sdmac = &sdma->channel[i];
1660 1629
1661 tasklet_kill(&sdmac->tasklet); 1630 tasklet_kill(&sdmac->tasklet);
1662 } 1631 }
1663 kfree(sdma);
1664 1632
1665 platform_set_drvdata(pdev, NULL); 1633 platform_set_drvdata(pdev, NULL);
1666 dev_info(&pdev->dev, "Removed...\n"); 1634 dev_info(&pdev->dev, "Removed...\n");
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 1aab8130efa1..5aaead9b56f7 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -492,10 +492,10 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
492 return ret; 492 return ret;
493} 493}
494 494
495static int dma_slave_control(struct dma_chan *chan, unsigned long arg) 495static int intel_mid_dma_config(struct dma_chan *chan,
496 struct dma_slave_config *slave)
496{ 497{
497 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 498 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
498 struct dma_slave_config *slave = (struct dma_slave_config *)arg;
499 struct intel_mid_dma_slave *mid_slave; 499 struct intel_mid_dma_slave *mid_slave;
500 500
501 BUG_ON(!midc); 501 BUG_ON(!midc);
@@ -509,28 +509,14 @@ static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
509 midc->mid_slave = mid_slave; 509 midc->mid_slave = mid_slave;
510 return 0; 510 return 0;
511} 511}
512/** 512
513 * intel_mid_dma_device_control - DMA device control 513static int intel_mid_dma_terminate_all(struct dma_chan *chan)
514 * @chan: chan for DMA control
515 * @cmd: control cmd
516 * @arg: cmd arg value
517 *
518 * Perform DMA control command
519 */
520static int intel_mid_dma_device_control(struct dma_chan *chan,
521 enum dma_ctrl_cmd cmd, unsigned long arg)
522{ 514{
523 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 515 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
524 struct middma_device *mid = to_middma_device(chan->device); 516 struct middma_device *mid = to_middma_device(chan->device);
525 struct intel_mid_dma_desc *desc, *_desc; 517 struct intel_mid_dma_desc *desc, *_desc;
526 union intel_mid_dma_cfg_lo cfg_lo; 518 union intel_mid_dma_cfg_lo cfg_lo;
527 519
528 if (cmd == DMA_SLAVE_CONFIG)
529 return dma_slave_control(chan, arg);
530
531 if (cmd != DMA_TERMINATE_ALL)
532 return -ENXIO;
533
534 spin_lock_bh(&midc->lock); 520 spin_lock_bh(&midc->lock);
535 if (midc->busy == false) { 521 if (midc->busy == false) {
536 spin_unlock_bh(&midc->lock); 522 spin_unlock_bh(&midc->lock);
@@ -1148,7 +1134,8 @@ static int mid_setup_dma(struct pci_dev *pdev)
1148 dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy; 1134 dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
1149 dma->common.device_issue_pending = intel_mid_dma_issue_pending; 1135 dma->common.device_issue_pending = intel_mid_dma_issue_pending;
1150 dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg; 1136 dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
1151 dma->common.device_control = intel_mid_dma_device_control; 1137 dma->common.device_config = intel_mid_dma_config;
1138 dma->common.device_terminate_all = intel_mid_dma_terminate_all;
1152 1139
1153 /*enable dma cntrl*/ 1140 /*enable dma cntrl*/
1154 iowrite32(REG_BIT0, dma->dma_base + DMA_CFG); 1141 iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 32eae38291e5..77a6dcf25b98 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -214,6 +214,11 @@ static bool is_bwd_ioat(struct pci_dev *pdev)
214 case PCI_DEVICE_ID_INTEL_IOAT_BWD1: 214 case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
215 case PCI_DEVICE_ID_INTEL_IOAT_BWD2: 215 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
216 case PCI_DEVICE_ID_INTEL_IOAT_BWD3: 216 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
217 /* even though not Atom, BDX-DE has same DMA silicon */
218 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
219 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
220 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
221 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
217 return true; 222 return true;
218 default: 223 default:
219 return false; 224 return false;
@@ -489,6 +494,7 @@ static void ioat3_eh(struct ioat2_dma_chan *ioat)
489 struct ioat_chan_common *chan = &ioat->base; 494 struct ioat_chan_common *chan = &ioat->base;
490 struct pci_dev *pdev = to_pdev(chan); 495 struct pci_dev *pdev = to_pdev(chan);
491 struct ioat_dma_descriptor *hw; 496 struct ioat_dma_descriptor *hw;
497 struct dma_async_tx_descriptor *tx;
492 u64 phys_complete; 498 u64 phys_complete;
493 struct ioat_ring_ent *desc; 499 struct ioat_ring_ent *desc;
494 u32 err_handled = 0; 500 u32 err_handled = 0;
@@ -534,6 +540,16 @@ static void ioat3_eh(struct ioat2_dma_chan *ioat)
534 dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n", 540 dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
535 __func__, chanerr, err_handled); 541 __func__, chanerr, err_handled);
536 BUG(); 542 BUG();
543 } else { /* cleanup the faulty descriptor */
544 tx = &desc->txd;
545 if (tx->cookie) {
546 dma_cookie_complete(tx);
547 dma_descriptor_unmap(tx);
548 if (tx->callback) {
549 tx->callback(tx->callback_param);
550 tx->callback = NULL;
551 }
552 }
537 } 553 }
538 554
539 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); 555 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
@@ -1300,7 +1316,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1300 1316
1301 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1317 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1302 1318
1303 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { 1319 if (tmo == 0 ||
1320 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1304 dev_err(dev, "Self-test xor timed out\n"); 1321 dev_err(dev, "Self-test xor timed out\n");
1305 err = -ENODEV; 1322 err = -ENODEV;
1306 goto dma_unmap; 1323 goto dma_unmap;
@@ -1366,7 +1383,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1366 1383
1367 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1384 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1368 1385
1369 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { 1386 if (tmo == 0 ||
1387 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1370 dev_err(dev, "Self-test validate timed out\n"); 1388 dev_err(dev, "Self-test validate timed out\n");
1371 err = -ENODEV; 1389 err = -ENODEV;
1372 goto dma_unmap; 1390 goto dma_unmap;
@@ -1418,7 +1436,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1418 1436
1419 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1437 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1420 1438
1421 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { 1439 if (tmo == 0 ||
1440 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1422 dev_err(dev, "Self-test 2nd validate timed out\n"); 1441 dev_err(dev, "Self-test 2nd validate timed out\n");
1423 err = -ENODEV; 1442 err = -ENODEV;
1424 goto dma_unmap; 1443 goto dma_unmap;
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 62f83e983d8d..02177ecf09f8 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -57,6 +57,11 @@
57#define PCI_DEVICE_ID_INTEL_IOAT_BWD2 0x0C52 57#define PCI_DEVICE_ID_INTEL_IOAT_BWD2 0x0C52
58#define PCI_DEVICE_ID_INTEL_IOAT_BWD3 0x0C53 58#define PCI_DEVICE_ID_INTEL_IOAT_BWD3 0x0C53
59 59
60#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE0 0x6f50
61#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE1 0x6f51
62#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE2 0x6f52
63#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE3 0x6f53
64
60#define IOAT_VER_1_2 0x12 /* Version 1.2 */ 65#define IOAT_VER_1_2 0x12 /* Version 1.2 */
61#define IOAT_VER_2_0 0x20 /* Version 2.0 */ 66#define IOAT_VER_2_0 0x20 /* Version 2.0 */
62#define IOAT_VER_3_0 0x30 /* Version 3.0 */ 67#define IOAT_VER_3_0 0x30 /* Version 3.0 */
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index 1d051cd045db..5501eb072d69 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -111,6 +111,11 @@ static struct pci_device_id ioat_pci_tbl[] = {
111 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) }, 111 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
112 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) }, 112 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
113 113
114 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
115 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
116 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
117 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
118
114 { 0, } 119 { 0, }
115}; 120};
116MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); 121MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index c2b017ad139d..b54f62de9232 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1398,76 +1398,81 @@ static void idmac_issue_pending(struct dma_chan *chan)
1398 */ 1398 */
1399} 1399}
1400 1400
1401static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1401static int idmac_pause(struct dma_chan *chan)
1402 unsigned long arg)
1403{ 1402{
1404 struct idmac_channel *ichan = to_idmac_chan(chan); 1403 struct idmac_channel *ichan = to_idmac_chan(chan);
1405 struct idmac *idmac = to_idmac(chan->device); 1404 struct idmac *idmac = to_idmac(chan->device);
1406 struct ipu *ipu = to_ipu(idmac); 1405 struct ipu *ipu = to_ipu(idmac);
1407 struct list_head *list, *tmp; 1406 struct list_head *list, *tmp;
1408 unsigned long flags; 1407 unsigned long flags;
1409 int i;
1410 1408
1411 switch (cmd) { 1409 mutex_lock(&ichan->chan_mutex);
1412 case DMA_PAUSE:
1413 spin_lock_irqsave(&ipu->lock, flags);
1414 ipu_ic_disable_task(ipu, chan->chan_id);
1415 1410
1416 /* Return all descriptors into "prepared" state */ 1411 spin_lock_irqsave(&ipu->lock, flags);
1417 list_for_each_safe(list, tmp, &ichan->queue) 1412 ipu_ic_disable_task(ipu, chan->chan_id);
1418 list_del_init(list);
1419 1413
1420 ichan->sg[0] = NULL; 1414 /* Return all descriptors into "prepared" state */
1421 ichan->sg[1] = NULL; 1415 list_for_each_safe(list, tmp, &ichan->queue)
1416 list_del_init(list);
1422 1417
1423 spin_unlock_irqrestore(&ipu->lock, flags); 1418 ichan->sg[0] = NULL;
1419 ichan->sg[1] = NULL;
1424 1420
1425 ichan->status = IPU_CHANNEL_INITIALIZED; 1421 spin_unlock_irqrestore(&ipu->lock, flags);
1426 break;
1427 case DMA_TERMINATE_ALL:
1428 ipu_disable_channel(idmac, ichan,
1429 ichan->status >= IPU_CHANNEL_ENABLED);
1430 1422
1431 tasklet_disable(&ipu->tasklet); 1423 ichan->status = IPU_CHANNEL_INITIALIZED;
1432 1424
1433 /* ichan->queue is modified in ISR, have to spinlock */ 1425 mutex_unlock(&ichan->chan_mutex);
1434 spin_lock_irqsave(&ichan->lock, flags);
1435 list_splice_init(&ichan->queue, &ichan->free_list);
1436 1426
1437 if (ichan->desc) 1427 return 0;
1438 for (i = 0; i < ichan->n_tx_desc; i++) { 1428}
1439 struct idmac_tx_desc *desc = ichan->desc + i;
1440 if (list_empty(&desc->list))
1441 /* Descriptor was prepared, but not submitted */
1442 list_add(&desc->list, &ichan->free_list);
1443 1429
1444 async_tx_clear_ack(&desc->txd); 1430static int __idmac_terminate_all(struct dma_chan *chan)
1445 } 1431{
1432 struct idmac_channel *ichan = to_idmac_chan(chan);
1433 struct idmac *idmac = to_idmac(chan->device);
1434 struct ipu *ipu = to_ipu(idmac);
1435 unsigned long flags;
1436 int i;
1446 1437
1447 ichan->sg[0] = NULL; 1438 ipu_disable_channel(idmac, ichan,
1448 ichan->sg[1] = NULL; 1439 ichan->status >= IPU_CHANNEL_ENABLED);
1449 spin_unlock_irqrestore(&ichan->lock, flags);
1450 1440
1451 tasklet_enable(&ipu->tasklet); 1441 tasklet_disable(&ipu->tasklet);
1452 1442
1453 ichan->status = IPU_CHANNEL_INITIALIZED; 1443 /* ichan->queue is modified in ISR, have to spinlock */
1454 break; 1444 spin_lock_irqsave(&ichan->lock, flags);
1455 default: 1445 list_splice_init(&ichan->queue, &ichan->free_list);
1456 return -ENOSYS; 1446
1457 } 1447 if (ichan->desc)
1448 for (i = 0; i < ichan->n_tx_desc; i++) {
1449 struct idmac_tx_desc *desc = ichan->desc + i;
1450 if (list_empty(&desc->list))
1451 /* Descriptor was prepared, but not submitted */
1452 list_add(&desc->list, &ichan->free_list);
1453
1454 async_tx_clear_ack(&desc->txd);
1455 }
1456
1457 ichan->sg[0] = NULL;
1458 ichan->sg[1] = NULL;
1459 spin_unlock_irqrestore(&ichan->lock, flags);
1460
1461 tasklet_enable(&ipu->tasklet);
1462
1463 ichan->status = IPU_CHANNEL_INITIALIZED;
1458 1464
1459 return 0; 1465 return 0;
1460} 1466}
1461 1467
1462static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1468static int idmac_terminate_all(struct dma_chan *chan)
1463 unsigned long arg)
1464{ 1469{
1465 struct idmac_channel *ichan = to_idmac_chan(chan); 1470 struct idmac_channel *ichan = to_idmac_chan(chan);
1466 int ret; 1471 int ret;
1467 1472
1468 mutex_lock(&ichan->chan_mutex); 1473 mutex_lock(&ichan->chan_mutex);
1469 1474
1470 ret = __idmac_control(chan, cmd, arg); 1475 ret = __idmac_terminate_all(chan);
1471 1476
1472 mutex_unlock(&ichan->chan_mutex); 1477 mutex_unlock(&ichan->chan_mutex);
1473 1478
@@ -1568,7 +1573,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
1568 1573
1569 mutex_lock(&ichan->chan_mutex); 1574 mutex_lock(&ichan->chan_mutex);
1570 1575
1571 __idmac_control(chan, DMA_TERMINATE_ALL, 0); 1576 __idmac_terminate_all(chan);
1572 1577
1573 if (ichan->status > IPU_CHANNEL_FREE) { 1578 if (ichan->status > IPU_CHANNEL_FREE) {
1574#ifdef DEBUG 1579#ifdef DEBUG
@@ -1622,7 +1627,8 @@ static int __init ipu_idmac_init(struct ipu *ipu)
1622 1627
1623 /* Compulsory for DMA_SLAVE fields */ 1628 /* Compulsory for DMA_SLAVE fields */
1624 dma->device_prep_slave_sg = idmac_prep_slave_sg; 1629 dma->device_prep_slave_sg = idmac_prep_slave_sg;
1625 dma->device_control = idmac_control; 1630 dma->device_pause = idmac_pause;
1631 dma->device_terminate_all = idmac_terminate_all;
1626 1632
1627 INIT_LIST_HEAD(&dma->channels); 1633 INIT_LIST_HEAD(&dma->channels);
1628 for (i = 0; i < IPU_CHANNELS_NUM; i++) { 1634 for (i = 0; i < IPU_CHANNELS_NUM; i++) {
@@ -1655,7 +1661,7 @@ static void ipu_idmac_exit(struct ipu *ipu)
1655 for (i = 0; i < IPU_CHANNELS_NUM; i++) { 1661 for (i = 0; i < IPU_CHANNELS_NUM; i++) {
1656 struct idmac_channel *ichan = ipu->channel + i; 1662 struct idmac_channel *ichan = ipu->channel + i;
1657 1663
1658 idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL, 0); 1664 idmac_terminate_all(&ichan->dma_chan);
1659 } 1665 }
1660 1666
1661 dma_async_device_unregister(&idmac->dma); 1667 dma_async_device_unregister(&idmac->dma);
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index a1de14ab2c51..6f7f43529ccb 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -441,7 +441,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
441 num = 0; 441 num = 0;
442 442
443 if (!c->ccfg) { 443 if (!c->ccfg) {
444 /* default is memtomem, without calling device_control */ 444 /* default is memtomem, without calling device_config */
445 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN; 445 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
446 c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */ 446 c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */
447 c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */ 447 c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */
@@ -523,112 +523,126 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
523 return vchan_tx_prep(&c->vc, &ds->vd, flags); 523 return vchan_tx_prep(&c->vc, &ds->vd, flags);
524} 524}
525 525
526static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 526static int k3_dma_config(struct dma_chan *chan,
527 unsigned long arg) 527 struct dma_slave_config *cfg)
528{
529 struct k3_dma_chan *c = to_k3_chan(chan);
530 u32 maxburst = 0, val = 0;
531 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
532
533 if (cfg == NULL)
534 return -EINVAL;
535 c->dir = cfg->direction;
536 if (c->dir == DMA_DEV_TO_MEM) {
537 c->ccfg = CX_CFG_DSTINCR;
538 c->dev_addr = cfg->src_addr;
539 maxburst = cfg->src_maxburst;
540 width = cfg->src_addr_width;
541 } else if (c->dir == DMA_MEM_TO_DEV) {
542 c->ccfg = CX_CFG_SRCINCR;
543 c->dev_addr = cfg->dst_addr;
544 maxburst = cfg->dst_maxburst;
545 width = cfg->dst_addr_width;
546 }
547 switch (width) {
548 case DMA_SLAVE_BUSWIDTH_1_BYTE:
549 case DMA_SLAVE_BUSWIDTH_2_BYTES:
550 case DMA_SLAVE_BUSWIDTH_4_BYTES:
551 case DMA_SLAVE_BUSWIDTH_8_BYTES:
552 val = __ffs(width);
553 break;
554 default:
555 val = 3;
556 break;
557 }
558 c->ccfg |= (val << 12) | (val << 16);
559
560 if ((maxburst == 0) || (maxburst > 16))
561 val = 16;
562 else
563 val = maxburst - 1;
564 c->ccfg |= (val << 20) | (val << 24);
565 c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
566
567 /* specific request line */
568 c->ccfg |= c->vc.chan.chan_id << 4;
569
570 return 0;
571}
572
573static int k3_dma_terminate_all(struct dma_chan *chan)
528{ 574{
529 struct k3_dma_chan *c = to_k3_chan(chan); 575 struct k3_dma_chan *c = to_k3_chan(chan);
530 struct k3_dma_dev *d = to_k3_dma(chan->device); 576 struct k3_dma_dev *d = to_k3_dma(chan->device);
531 struct dma_slave_config *cfg = (void *)arg;
532 struct k3_dma_phy *p = c->phy; 577 struct k3_dma_phy *p = c->phy;
533 unsigned long flags; 578 unsigned long flags;
534 u32 maxburst = 0, val = 0;
535 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
536 LIST_HEAD(head); 579 LIST_HEAD(head);
537 580
538 switch (cmd) { 581 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
539 case DMA_SLAVE_CONFIG:
540 if (cfg == NULL)
541 return -EINVAL;
542 c->dir = cfg->direction;
543 if (c->dir == DMA_DEV_TO_MEM) {
544 c->ccfg = CX_CFG_DSTINCR;
545 c->dev_addr = cfg->src_addr;
546 maxburst = cfg->src_maxburst;
547 width = cfg->src_addr_width;
548 } else if (c->dir == DMA_MEM_TO_DEV) {
549 c->ccfg = CX_CFG_SRCINCR;
550 c->dev_addr = cfg->dst_addr;
551 maxburst = cfg->dst_maxburst;
552 width = cfg->dst_addr_width;
553 }
554 switch (width) {
555 case DMA_SLAVE_BUSWIDTH_1_BYTE:
556 case DMA_SLAVE_BUSWIDTH_2_BYTES:
557 case DMA_SLAVE_BUSWIDTH_4_BYTES:
558 case DMA_SLAVE_BUSWIDTH_8_BYTES:
559 val = __ffs(width);
560 break;
561 default:
562 val = 3;
563 break;
564 }
565 c->ccfg |= (val << 12) | (val << 16);
566 582
567 if ((maxburst == 0) || (maxburst > 16)) 583 /* Prevent this channel being scheduled */
568 val = 16; 584 spin_lock(&d->lock);
569 else 585 list_del_init(&c->node);
570 val = maxburst - 1; 586 spin_unlock(&d->lock);
571 c->ccfg |= (val << 20) | (val << 24);
572 c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
573 587
574 /* specific request line */ 588 /* Clear the tx descriptor lists */
575 c->ccfg |= c->vc.chan.chan_id << 4; 589 spin_lock_irqsave(&c->vc.lock, flags);
576 break; 590 vchan_get_all_descriptors(&c->vc, &head);
591 if (p) {
592 /* vchan is assigned to a pchan - stop the channel */
593 k3_dma_terminate_chan(p, d);
594 c->phy = NULL;
595 p->vchan = NULL;
596 p->ds_run = p->ds_done = NULL;
597 }
598 spin_unlock_irqrestore(&c->vc.lock, flags);
599 vchan_dma_desc_free_list(&c->vc, &head);
577 600
578 case DMA_TERMINATE_ALL: 601 return 0;
579 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); 602}
580 603
581 /* Prevent this channel being scheduled */ 604static int k3_dma_transfer_pause(struct dma_chan *chan)
582 spin_lock(&d->lock); 605{
583 list_del_init(&c->node); 606 struct k3_dma_chan *c = to_k3_chan(chan);
584 spin_unlock(&d->lock); 607 struct k3_dma_dev *d = to_k3_dma(chan->device);
608 struct k3_dma_phy *p = c->phy;
585 609
586 /* Clear the tx descriptor lists */ 610 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
587 spin_lock_irqsave(&c->vc.lock, flags); 611 if (c->status == DMA_IN_PROGRESS) {
588 vchan_get_all_descriptors(&c->vc, &head); 612 c->status = DMA_PAUSED;
589 if (p) { 613 if (p) {
590 /* vchan is assigned to a pchan - stop the channel */ 614 k3_dma_pause_dma(p, false);
591 k3_dma_terminate_chan(p, d); 615 } else {
592 c->phy = NULL; 616 spin_lock(&d->lock);
593 p->vchan = NULL; 617 list_del_init(&c->node);
594 p->ds_run = p->ds_done = NULL; 618 spin_unlock(&d->lock);
595 } 619 }
596 spin_unlock_irqrestore(&c->vc.lock, flags); 620 }
597 vchan_dma_desc_free_list(&c->vc, &head);
598 break;
599 621
600 case DMA_PAUSE: 622 return 0;
601 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); 623}
602 if (c->status == DMA_IN_PROGRESS) {
603 c->status = DMA_PAUSED;
604 if (p) {
605 k3_dma_pause_dma(p, false);
606 } else {
607 spin_lock(&d->lock);
608 list_del_init(&c->node);
609 spin_unlock(&d->lock);
610 }
611 }
612 break;
613 624
614 case DMA_RESUME: 625static int k3_dma_transfer_resume(struct dma_chan *chan)
615 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); 626{
616 spin_lock_irqsave(&c->vc.lock, flags); 627 struct k3_dma_chan *c = to_k3_chan(chan);
617 if (c->status == DMA_PAUSED) { 628 struct k3_dma_dev *d = to_k3_dma(chan->device);
618 c->status = DMA_IN_PROGRESS; 629 struct k3_dma_phy *p = c->phy;
619 if (p) { 630 unsigned long flags;
620 k3_dma_pause_dma(p, true); 631
621 } else if (!list_empty(&c->vc.desc_issued)) { 632 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
622 spin_lock(&d->lock); 633 spin_lock_irqsave(&c->vc.lock, flags);
623 list_add_tail(&c->node, &d->chan_pending); 634 if (c->status == DMA_PAUSED) {
624 spin_unlock(&d->lock); 635 c->status = DMA_IN_PROGRESS;
625 } 636 if (p) {
637 k3_dma_pause_dma(p, true);
638 } else if (!list_empty(&c->vc.desc_issued)) {
639 spin_lock(&d->lock);
640 list_add_tail(&c->node, &d->chan_pending);
641 spin_unlock(&d->lock);
626 } 642 }
627 spin_unlock_irqrestore(&c->vc.lock, flags);
628 break;
629 default:
630 return -ENXIO;
631 } 643 }
644 spin_unlock_irqrestore(&c->vc.lock, flags);
645
632 return 0; 646 return 0;
633} 647}
634 648
@@ -720,7 +734,10 @@ static int k3_dma_probe(struct platform_device *op)
720 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; 734 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
721 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg; 735 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
722 d->slave.device_issue_pending = k3_dma_issue_pending; 736 d->slave.device_issue_pending = k3_dma_issue_pending;
723 d->slave.device_control = k3_dma_control; 737 d->slave.device_config = k3_dma_config;
738 d->slave.device_pause = k3_dma_transfer_pause;
739 d->slave.device_resume = k3_dma_transfer_resume;
740 d->slave.device_terminate_all = k3_dma_terminate_all;
724 d->slave.copy_align = DMA_ALIGN; 741 d->slave.copy_align = DMA_ALIGN;
725 742
726 /* init virtual channel */ 743 /* init virtual channel */
@@ -787,7 +804,7 @@ static int k3_dma_remove(struct platform_device *op)
787} 804}
788 805
789#ifdef CONFIG_PM_SLEEP 806#ifdef CONFIG_PM_SLEEP
790static int k3_dma_suspend(struct device *dev) 807static int k3_dma_suspend_dev(struct device *dev)
791{ 808{
792 struct k3_dma_dev *d = dev_get_drvdata(dev); 809 struct k3_dma_dev *d = dev_get_drvdata(dev);
793 u32 stat = 0; 810 u32 stat = 0;
@@ -803,7 +820,7 @@ static int k3_dma_suspend(struct device *dev)
803 return 0; 820 return 0;
804} 821}
805 822
806static int k3_dma_resume(struct device *dev) 823static int k3_dma_resume_dev(struct device *dev)
807{ 824{
808 struct k3_dma_dev *d = dev_get_drvdata(dev); 825 struct k3_dma_dev *d = dev_get_drvdata(dev);
809 int ret = 0; 826 int ret = 0;
@@ -818,7 +835,7 @@ static int k3_dma_resume(struct device *dev)
818} 835}
819#endif 836#endif
820 837
821static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume); 838static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev);
822 839
823static struct platform_driver k3_pdma_driver = { 840static struct platform_driver k3_pdma_driver = {
824 .driver = { 841 .driver = {
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 8b8952f35e6c..8926f271904e 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -683,68 +683,70 @@ fail:
683 return NULL; 683 return NULL;
684} 684}
685 685
686static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, 686static int mmp_pdma_config(struct dma_chan *dchan,
687 unsigned long arg) 687 struct dma_slave_config *cfg)
688{ 688{
689 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); 689 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
690 struct dma_slave_config *cfg = (void *)arg;
691 unsigned long flags;
692 u32 maxburst = 0, addr = 0; 690 u32 maxburst = 0, addr = 0;
693 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; 691 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
694 692
695 if (!dchan) 693 if (!dchan)
696 return -EINVAL; 694 return -EINVAL;
697 695
698 switch (cmd) { 696 if (cfg->direction == DMA_DEV_TO_MEM) {
699 case DMA_TERMINATE_ALL: 697 chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
700 disable_chan(chan->phy); 698 maxburst = cfg->src_maxburst;
701 mmp_pdma_free_phy(chan); 699 width = cfg->src_addr_width;
702 spin_lock_irqsave(&chan->desc_lock, flags); 700 addr = cfg->src_addr;
703 mmp_pdma_free_desc_list(chan, &chan->chain_pending); 701 } else if (cfg->direction == DMA_MEM_TO_DEV) {
704 mmp_pdma_free_desc_list(chan, &chan->chain_running); 702 chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
705 spin_unlock_irqrestore(&chan->desc_lock, flags); 703 maxburst = cfg->dst_maxburst;
706 chan->idle = true; 704 width = cfg->dst_addr_width;
707 break; 705 addr = cfg->dst_addr;
708 case DMA_SLAVE_CONFIG:
709 if (cfg->direction == DMA_DEV_TO_MEM) {
710 chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
711 maxburst = cfg->src_maxburst;
712 width = cfg->src_addr_width;
713 addr = cfg->src_addr;
714 } else if (cfg->direction == DMA_MEM_TO_DEV) {
715 chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
716 maxburst = cfg->dst_maxburst;
717 width = cfg->dst_addr_width;
718 addr = cfg->dst_addr;
719 }
720
721 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
722 chan->dcmd |= DCMD_WIDTH1;
723 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
724 chan->dcmd |= DCMD_WIDTH2;
725 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
726 chan->dcmd |= DCMD_WIDTH4;
727
728 if (maxburst == 8)
729 chan->dcmd |= DCMD_BURST8;
730 else if (maxburst == 16)
731 chan->dcmd |= DCMD_BURST16;
732 else if (maxburst == 32)
733 chan->dcmd |= DCMD_BURST32;
734
735 chan->dir = cfg->direction;
736 chan->dev_addr = addr;
737 /* FIXME: drivers should be ported over to use the filter
738 * function. Once that's done, the following two lines can
739 * be removed.
740 */
741 if (cfg->slave_id)
742 chan->drcmr = cfg->slave_id;
743 break;
744 default:
745 return -ENOSYS;
746 } 706 }
747 707
708 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
709 chan->dcmd |= DCMD_WIDTH1;
710 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
711 chan->dcmd |= DCMD_WIDTH2;
712 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
713 chan->dcmd |= DCMD_WIDTH4;
714
715 if (maxburst == 8)
716 chan->dcmd |= DCMD_BURST8;
717 else if (maxburst == 16)
718 chan->dcmd |= DCMD_BURST16;
719 else if (maxburst == 32)
720 chan->dcmd |= DCMD_BURST32;
721
722 chan->dir = cfg->direction;
723 chan->dev_addr = addr;
724 /* FIXME: drivers should be ported over to use the filter
725 * function. Once that's done, the following two lines can
726 * be removed.
727 */
728 if (cfg->slave_id)
729 chan->drcmr = cfg->slave_id;
730
731 return 0;
732}
733
734static int mmp_pdma_terminate_all(struct dma_chan *dchan)
735{
736 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
737 unsigned long flags;
738
739 if (!dchan)
740 return -EINVAL;
741
742 disable_chan(chan->phy);
743 mmp_pdma_free_phy(chan);
744 spin_lock_irqsave(&chan->desc_lock, flags);
745 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
746 mmp_pdma_free_desc_list(chan, &chan->chain_running);
747 spin_unlock_irqrestore(&chan->desc_lock, flags);
748 chan->idle = true;
749
748 return 0; 750 return 0;
749} 751}
750 752
@@ -1061,7 +1063,8 @@ static int mmp_pdma_probe(struct platform_device *op)
1061 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; 1063 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
1062 pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic; 1064 pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
1063 pdev->device.device_issue_pending = mmp_pdma_issue_pending; 1065 pdev->device.device_issue_pending = mmp_pdma_issue_pending;
1064 pdev->device.device_control = mmp_pdma_control; 1066 pdev->device.device_config = mmp_pdma_config;
1067 pdev->device.device_terminate_all = mmp_pdma_terminate_all;
1065 pdev->device.copy_align = PDMA_ALIGNMENT; 1068 pdev->device.copy_align = PDMA_ALIGNMENT;
1066 1069
1067 if (pdev->dev->coherent_dma_mask) 1070 if (pdev->dev->coherent_dma_mask)
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index bfb46957c3dc..70c2fa9963cd 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -19,7 +19,6 @@
19#include <linux/dmaengine.h> 19#include <linux/dmaengine.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/device.h> 21#include <linux/device.h>
22#include <mach/regs-icu.h>
23#include <linux/platform_data/dma-mmp_tdma.h> 22#include <linux/platform_data/dma-mmp_tdma.h>
24#include <linux/of_device.h> 23#include <linux/of_device.h>
25#include <linux/of_dma.h> 24#include <linux/of_dma.h>
@@ -164,33 +163,46 @@ static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac)
164 tdmac->status = DMA_IN_PROGRESS; 163 tdmac->status = DMA_IN_PROGRESS;
165} 164}
166 165
167static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac) 166static int mmp_tdma_disable_chan(struct dma_chan *chan)
168{ 167{
168 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
169
169 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, 170 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
170 tdmac->reg_base + TDCR); 171 tdmac->reg_base + TDCR);
171 172
172 tdmac->status = DMA_COMPLETE; 173 tdmac->status = DMA_COMPLETE;
174
175 return 0;
173} 176}
174 177
175static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac) 178static int mmp_tdma_resume_chan(struct dma_chan *chan)
176{ 179{
180 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
181
177 writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN, 182 writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
178 tdmac->reg_base + TDCR); 183 tdmac->reg_base + TDCR);
179 tdmac->status = DMA_IN_PROGRESS; 184 tdmac->status = DMA_IN_PROGRESS;
185
186 return 0;
180} 187}
181 188
182static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac) 189static int mmp_tdma_pause_chan(struct dma_chan *chan)
183{ 190{
191 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
192
184 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, 193 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
185 tdmac->reg_base + TDCR); 194 tdmac->reg_base + TDCR);
186 tdmac->status = DMA_PAUSED; 195 tdmac->status = DMA_PAUSED;
196
197 return 0;
187} 198}
188 199
189static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac) 200static int mmp_tdma_config_chan(struct dma_chan *chan)
190{ 201{
202 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
191 unsigned int tdcr = 0; 203 unsigned int tdcr = 0;
192 204
193 mmp_tdma_disable_chan(tdmac); 205 mmp_tdma_disable_chan(chan);
194 206
195 if (tdmac->dir == DMA_MEM_TO_DEV) 207 if (tdmac->dir == DMA_MEM_TO_DEV)
196 tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC; 208 tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC;
@@ -452,42 +464,34 @@ err_out:
452 return NULL; 464 return NULL;
453} 465}
454 466
455static int mmp_tdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 467static int mmp_tdma_terminate_all(struct dma_chan *chan)
456 unsigned long arg)
457{ 468{
458 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 469 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
459 struct dma_slave_config *dmaengine_cfg = (void *)arg; 470
460 int ret = 0; 471 mmp_tdma_disable_chan(chan);
461 472 /* disable interrupt */
462 switch (cmd) { 473 mmp_tdma_enable_irq(tdmac, false);
463 case DMA_TERMINATE_ALL: 474
464 mmp_tdma_disable_chan(tdmac); 475 return 0;
465 /* disable interrupt */ 476}
466 mmp_tdma_enable_irq(tdmac, false); 477
467 break; 478static int mmp_tdma_config(struct dma_chan *chan,
468 case DMA_PAUSE: 479 struct dma_slave_config *dmaengine_cfg)
469 mmp_tdma_pause_chan(tdmac); 480{
470 break; 481 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
471 case DMA_RESUME: 482
472 mmp_tdma_resume_chan(tdmac); 483 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
473 break; 484 tdmac->dev_addr = dmaengine_cfg->src_addr;
474 case DMA_SLAVE_CONFIG: 485 tdmac->burst_sz = dmaengine_cfg->src_maxburst;
475 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { 486 tdmac->buswidth = dmaengine_cfg->src_addr_width;
476 tdmac->dev_addr = dmaengine_cfg->src_addr; 487 } else {
477 tdmac->burst_sz = dmaengine_cfg->src_maxburst; 488 tdmac->dev_addr = dmaengine_cfg->dst_addr;
478 tdmac->buswidth = dmaengine_cfg->src_addr_width; 489 tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
479 } else { 490 tdmac->buswidth = dmaengine_cfg->dst_addr_width;
480 tdmac->dev_addr = dmaengine_cfg->dst_addr;
481 tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
482 tdmac->buswidth = dmaengine_cfg->dst_addr_width;
483 }
484 tdmac->dir = dmaengine_cfg->direction;
485 return mmp_tdma_config_chan(tdmac);
486 default:
487 ret = -ENOSYS;
488 } 491 }
492 tdmac->dir = dmaengine_cfg->direction;
489 493
490 return ret; 494 return mmp_tdma_config_chan(chan);
491} 495}
492 496
493static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan, 497static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
@@ -668,7 +672,10 @@ static int mmp_tdma_probe(struct platform_device *pdev)
668 tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic; 672 tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic;
669 tdev->device.device_tx_status = mmp_tdma_tx_status; 673 tdev->device.device_tx_status = mmp_tdma_tx_status;
670 tdev->device.device_issue_pending = mmp_tdma_issue_pending; 674 tdev->device.device_issue_pending = mmp_tdma_issue_pending;
671 tdev->device.device_control = mmp_tdma_control; 675 tdev->device.device_config = mmp_tdma_config;
676 tdev->device.device_pause = mmp_tdma_pause_chan;
677 tdev->device.device_resume = mmp_tdma_resume_chan;
678 tdev->device.device_terminate_all = mmp_tdma_terminate_all;
672 tdev->device.copy_align = TDMA_ALIGNMENT; 679 tdev->device.copy_align = TDMA_ALIGNMENT;
673 680
674 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 681 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index 53032bac06e0..15cab7d79525 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -263,28 +263,6 @@ static int moxart_slave_config(struct dma_chan *chan,
263 return 0; 263 return 0;
264} 264}
265 265
266static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
267 unsigned long arg)
268{
269 int ret = 0;
270
271 switch (cmd) {
272 case DMA_PAUSE:
273 case DMA_RESUME:
274 return -EINVAL;
275 case DMA_TERMINATE_ALL:
276 moxart_terminate_all(chan);
277 break;
278 case DMA_SLAVE_CONFIG:
279 ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
280 break;
281 default:
282 ret = -ENOSYS;
283 }
284
285 return ret;
286}
287
288static struct dma_async_tx_descriptor *moxart_prep_slave_sg( 266static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
289 struct dma_chan *chan, struct scatterlist *sgl, 267 struct dma_chan *chan, struct scatterlist *sgl,
290 unsigned int sg_len, enum dma_transfer_direction dir, 268 unsigned int sg_len, enum dma_transfer_direction dir,
@@ -531,7 +509,8 @@ static void moxart_dma_init(struct dma_device *dma, struct device *dev)
531 dma->device_free_chan_resources = moxart_free_chan_resources; 509 dma->device_free_chan_resources = moxart_free_chan_resources;
532 dma->device_issue_pending = moxart_issue_pending; 510 dma->device_issue_pending = moxart_issue_pending;
533 dma->device_tx_status = moxart_tx_status; 511 dma->device_tx_status = moxart_tx_status;
534 dma->device_control = moxart_control; 512 dma->device_config = moxart_slave_config;
513 dma->device_terminate_all = moxart_terminate_all;
535 dma->dev = dev; 514 dma->dev = dev;
536 515
537 INIT_LIST_HEAD(&dma->channels); 516 INIT_LIST_HEAD(&dma->channels);
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 01bec4023de2..57d2457545f3 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -800,79 +800,69 @@ err_prep:
800 return NULL; 800 return NULL;
801} 801}
802 802
803static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 803static int mpc_dma_device_config(struct dma_chan *chan,
804 unsigned long arg) 804 struct dma_slave_config *cfg)
805{ 805{
806 struct mpc_dma_chan *mchan; 806 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
807 struct mpc_dma *mdma;
808 struct dma_slave_config *cfg;
809 unsigned long flags; 807 unsigned long flags;
810 808
811 mchan = dma_chan_to_mpc_dma_chan(chan); 809 /*
812 switch (cmd) { 810 * Software constraints:
813 case DMA_TERMINATE_ALL: 811 * - only transfers between a peripheral device and
814 /* Disable channel requests */ 812 * memory are supported;
815 mdma = dma_chan_to_mpc_dma(chan); 813 * - only peripheral devices with 4-byte FIFO access register
816 814 * are supported;
817 spin_lock_irqsave(&mchan->lock, flags); 815 * - minimal transfer chunk is 4 bytes and consequently
818 816 * source and destination addresses must be 4-byte aligned
819 out_8(&mdma->regs->dmacerq, chan->chan_id); 817 * and transfer size must be aligned on (4 * maxburst)
820 list_splice_tail_init(&mchan->prepared, &mchan->free); 818 * boundary;
821 list_splice_tail_init(&mchan->queued, &mchan->free); 819 * - during the transfer RAM address is being incremented by
822 list_splice_tail_init(&mchan->active, &mchan->free); 820 * the size of minimal transfer chunk;
823 821 * - peripheral port's address is constant during the transfer.
824 spin_unlock_irqrestore(&mchan->lock, flags); 822 */
825 823
826 return 0; 824 if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
825 cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
826 !IS_ALIGNED(cfg->src_addr, 4) ||
827 !IS_ALIGNED(cfg->dst_addr, 4)) {
828 return -EINVAL;
829 }
827 830
828 case DMA_SLAVE_CONFIG: 831 spin_lock_irqsave(&mchan->lock, flags);
829 /*
830 * Software constraints:
831 * - only transfers between a peripheral device and
832 * memory are supported;
833 * - only peripheral devices with 4-byte FIFO access register
834 * are supported;
835 * - minimal transfer chunk is 4 bytes and consequently
836 * source and destination addresses must be 4-byte aligned
837 * and transfer size must be aligned on (4 * maxburst)
838 * boundary;
839 * - during the transfer RAM address is being incremented by
840 * the size of minimal transfer chunk;
841 * - peripheral port's address is constant during the transfer.
842 */
843 832
844 cfg = (void *)arg; 833 mchan->src_per_paddr = cfg->src_addr;
834 mchan->src_tcd_nunits = cfg->src_maxburst;
835 mchan->dst_per_paddr = cfg->dst_addr;
836 mchan->dst_tcd_nunits = cfg->dst_maxburst;
845 837
846 if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || 838 /* Apply defaults */
847 cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || 839 if (mchan->src_tcd_nunits == 0)
848 !IS_ALIGNED(cfg->src_addr, 4) || 840 mchan->src_tcd_nunits = 1;
849 !IS_ALIGNED(cfg->dst_addr, 4)) { 841 if (mchan->dst_tcd_nunits == 0)
850 return -EINVAL; 842 mchan->dst_tcd_nunits = 1;
851 }
852 843
853 spin_lock_irqsave(&mchan->lock, flags); 844 spin_unlock_irqrestore(&mchan->lock, flags);
854 845
855 mchan->src_per_paddr = cfg->src_addr; 846 return 0;
856 mchan->src_tcd_nunits = cfg->src_maxburst; 847}
857 mchan->dst_per_paddr = cfg->dst_addr;
858 mchan->dst_tcd_nunits = cfg->dst_maxburst;
859 848
860 /* Apply defaults */ 849static int mpc_dma_device_terminate_all(struct dma_chan *chan)
861 if (mchan->src_tcd_nunits == 0) 850{
862 mchan->src_tcd_nunits = 1; 851 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
863 if (mchan->dst_tcd_nunits == 0) 852 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
864 mchan->dst_tcd_nunits = 1; 853 unsigned long flags;
865 854
866 spin_unlock_irqrestore(&mchan->lock, flags); 855 /* Disable channel requests */
856 spin_lock_irqsave(&mchan->lock, flags);
867 857
868 return 0; 858 out_8(&mdma->regs->dmacerq, chan->chan_id);
859 list_splice_tail_init(&mchan->prepared, &mchan->free);
860 list_splice_tail_init(&mchan->queued, &mchan->free);
861 list_splice_tail_init(&mchan->active, &mchan->free);
869 862
870 default: 863 spin_unlock_irqrestore(&mchan->lock, flags);
871 /* Unknown command */
872 break;
873 }
874 864
875 return -ENXIO; 865 return 0;
876} 866}
877 867
878static int mpc_dma_probe(struct platform_device *op) 868static int mpc_dma_probe(struct platform_device *op)
@@ -963,7 +953,8 @@ static int mpc_dma_probe(struct platform_device *op)
963 dma->device_tx_status = mpc_dma_tx_status; 953 dma->device_tx_status = mpc_dma_tx_status;
964 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; 954 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
965 dma->device_prep_slave_sg = mpc_dma_prep_slave_sg; 955 dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
966 dma->device_control = mpc_dma_device_control; 956 dma->device_config = mpc_dma_device_config;
957 dma->device_terminate_all = mpc_dma_device_terminate_all;
967 958
968 INIT_LIST_HEAD(&dma->channels); 959 INIT_LIST_HEAD(&dma->channels);
969 dma_cap_set(DMA_MEMCPY, dma->cap_mask); 960 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index d7ac558c2c1c..b03e8137b918 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -928,14 +928,6 @@ out:
928 return err; 928 return err;
929} 929}
930 930
931/* This driver does not implement any of the optional DMA operations. */
932static int
933mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
934 unsigned long arg)
935{
936 return -ENOSYS;
937}
938
939static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) 931static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
940{ 932{
941 struct dma_chan *chan, *_chan; 933 struct dma_chan *chan, *_chan;
@@ -1008,7 +1000,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
1008 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; 1000 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1009 dma_dev->device_tx_status = mv_xor_status; 1001 dma_dev->device_tx_status = mv_xor_status;
1010 dma_dev->device_issue_pending = mv_xor_issue_pending; 1002 dma_dev->device_issue_pending = mv_xor_issue_pending;
1011 dma_dev->device_control = mv_xor_control;
1012 dma_dev->dev = &pdev->dev; 1003 dma_dev->dev = &pdev->dev;
1013 1004
1014 /* set prep routines based on capability */ 1005 /* set prep routines based on capability */
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 5ea61201dbf0..829ec686dac3 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -202,8 +202,9 @@ static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
202 return container_of(chan, struct mxs_dma_chan, chan); 202 return container_of(chan, struct mxs_dma_chan, chan);
203} 203}
204 204
205static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) 205static void mxs_dma_reset_chan(struct dma_chan *chan)
206{ 206{
207 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
207 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 208 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
208 int chan_id = mxs_chan->chan.chan_id; 209 int chan_id = mxs_chan->chan.chan_id;
209 210
@@ -250,8 +251,9 @@ static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
250 mxs_chan->status = DMA_COMPLETE; 251 mxs_chan->status = DMA_COMPLETE;
251} 252}
252 253
253static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) 254static void mxs_dma_enable_chan(struct dma_chan *chan)
254{ 255{
256 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
255 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 257 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
256 int chan_id = mxs_chan->chan.chan_id; 258 int chan_id = mxs_chan->chan.chan_id;
257 259
@@ -272,13 +274,16 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
272 mxs_chan->reset = false; 274 mxs_chan->reset = false;
273} 275}
274 276
275static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) 277static void mxs_dma_disable_chan(struct dma_chan *chan)
276{ 278{
279 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
280
277 mxs_chan->status = DMA_COMPLETE; 281 mxs_chan->status = DMA_COMPLETE;
278} 282}
279 283
280static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) 284static int mxs_dma_pause_chan(struct dma_chan *chan)
281{ 285{
286 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
282 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 287 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
283 int chan_id = mxs_chan->chan.chan_id; 288 int chan_id = mxs_chan->chan.chan_id;
284 289
@@ -291,10 +296,12 @@ static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
291 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); 296 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
292 297
293 mxs_chan->status = DMA_PAUSED; 298 mxs_chan->status = DMA_PAUSED;
299 return 0;
294} 300}
295 301
296static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) 302static int mxs_dma_resume_chan(struct dma_chan *chan)
297{ 303{
304 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
298 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 305 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
299 int chan_id = mxs_chan->chan.chan_id; 306 int chan_id = mxs_chan->chan.chan_id;
300 307
@@ -307,6 +314,7 @@ static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
307 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR); 314 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR);
308 315
309 mxs_chan->status = DMA_IN_PROGRESS; 316 mxs_chan->status = DMA_IN_PROGRESS;
317 return 0;
310} 318}
311 319
312static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) 320static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -383,7 +391,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
383 "%s: error in channel %d\n", __func__, 391 "%s: error in channel %d\n", __func__,
384 chan); 392 chan);
385 mxs_chan->status = DMA_ERROR; 393 mxs_chan->status = DMA_ERROR;
386 mxs_dma_reset_chan(mxs_chan); 394 mxs_dma_reset_chan(&mxs_chan->chan);
387 } else if (mxs_chan->status != DMA_COMPLETE) { 395 } else if (mxs_chan->status != DMA_COMPLETE) {
388 if (mxs_chan->flags & MXS_DMA_SG_LOOP) { 396 if (mxs_chan->flags & MXS_DMA_SG_LOOP) {
389 mxs_chan->status = DMA_IN_PROGRESS; 397 mxs_chan->status = DMA_IN_PROGRESS;
@@ -432,7 +440,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
432 if (ret) 440 if (ret)
433 goto err_clk; 441 goto err_clk;
434 442
435 mxs_dma_reset_chan(mxs_chan); 443 mxs_dma_reset_chan(chan);
436 444
437 dma_async_tx_descriptor_init(&mxs_chan->desc, chan); 445 dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
438 mxs_chan->desc.tx_submit = mxs_dma_tx_submit; 446 mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
@@ -456,7 +464,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
456 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 464 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
457 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 465 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
458 466
459 mxs_dma_disable_chan(mxs_chan); 467 mxs_dma_disable_chan(chan);
460 468
461 free_irq(mxs_chan->chan_irq, mxs_dma); 469 free_irq(mxs_chan->chan_irq, mxs_dma);
462 470
@@ -651,28 +659,12 @@ err_out:
651 return NULL; 659 return NULL;
652} 660}
653 661
654static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 662static int mxs_dma_terminate_all(struct dma_chan *chan)
655 unsigned long arg)
656{ 663{
657 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 664 mxs_dma_reset_chan(chan);
658 int ret = 0; 665 mxs_dma_disable_chan(chan);
659
660 switch (cmd) {
661 case DMA_TERMINATE_ALL:
662 mxs_dma_reset_chan(mxs_chan);
663 mxs_dma_disable_chan(mxs_chan);
664 break;
665 case DMA_PAUSE:
666 mxs_dma_pause_chan(mxs_chan);
667 break;
668 case DMA_RESUME:
669 mxs_dma_resume_chan(mxs_chan);
670 break;
671 default:
672 ret = -ENOSYS;
673 }
674 666
675 return ret; 667 return 0;
676} 668}
677 669
678static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, 670static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
@@ -701,13 +693,6 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
701 return mxs_chan->status; 693 return mxs_chan->status;
702} 694}
703 695
704static void mxs_dma_issue_pending(struct dma_chan *chan)
705{
706 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
707
708 mxs_dma_enable_chan(mxs_chan);
709}
710
711static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) 696static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
712{ 697{
713 int ret; 698 int ret;
@@ -860,8 +845,14 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
860 mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status; 845 mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status;
861 mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg; 846 mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg;
862 mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic; 847 mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic;
863 mxs_dma->dma_device.device_control = mxs_dma_control; 848 mxs_dma->dma_device.device_pause = mxs_dma_pause_chan;
864 mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending; 849 mxs_dma->dma_device.device_resume = mxs_dma_resume_chan;
850 mxs_dma->dma_device.device_terminate_all = mxs_dma_terminate_all;
851 mxs_dma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
852 mxs_dma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
853 mxs_dma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
854 mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
855 mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan;
865 856
866 ret = dma_async_device_register(&mxs_dma->dma_device); 857 ret = dma_async_device_register(&mxs_dma->dma_device);
867 if (ret) { 858 if (ret) {
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index d7d61e1a01c3..88b77c98365d 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -504,7 +504,7 @@ static int nbpf_prep_one(struct nbpf_link_desc *ldesc,
504 * pauses DMA and reads out data received via DMA as well as those left 504 * pauses DMA and reads out data received via DMA as well as those left
505 * in the Rx FIFO. For this to work with the RAM side using burst 505 * in the Rx FIFO. For this to work with the RAM side using burst
506 * transfers we enable the SBE bit and terminate the transfer in our 506 * transfers we enable the SBE bit and terminate the transfer in our
507 * DMA_PAUSE handler. 507 * .device_pause handler.
508 */ 508 */
509 mem_xfer = nbpf_xfer_ds(chan->nbpf, size); 509 mem_xfer = nbpf_xfer_ds(chan->nbpf, size);
510 510
@@ -565,13 +565,6 @@ static void nbpf_configure(struct nbpf_device *nbpf)
565 nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT); 565 nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT);
566} 566}
567 567
568static void nbpf_pause(struct nbpf_channel *chan)
569{
570 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS);
571 /* See comment in nbpf_prep_one() */
572 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
573}
574
575/* Generic part */ 568/* Generic part */
576 569
577/* DMA ENGINE functions */ 570/* DMA ENGINE functions */
@@ -837,54 +830,58 @@ static void nbpf_chan_idle(struct nbpf_channel *chan)
837 } 830 }
838} 831}
839 832
840static int nbpf_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, 833static int nbpf_pause(struct dma_chan *dchan)
841 unsigned long arg)
842{ 834{
843 struct nbpf_channel *chan = nbpf_to_chan(dchan); 835 struct nbpf_channel *chan = nbpf_to_chan(dchan);
844 struct dma_slave_config *config;
845 836
846 dev_dbg(dchan->device->dev, "Entry %s(%d)\n", __func__, cmd); 837 dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
847 838
848 switch (cmd) { 839 chan->paused = true;
849 case DMA_TERMINATE_ALL: 840 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS);
850 dev_dbg(dchan->device->dev, "Terminating\n"); 841 /* See comment in nbpf_prep_one() */
851 nbpf_chan_halt(chan); 842 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
852 nbpf_chan_idle(chan);
853 break;
854 843
855 case DMA_SLAVE_CONFIG: 844 return 0;
856 if (!arg) 845}
857 return -EINVAL;
858 config = (struct dma_slave_config *)arg;
859 846
860 /* 847static int nbpf_terminate_all(struct dma_chan *dchan)
861 * We could check config->slave_id to match chan->terminal here, 848{
862 * but with DT they would be coming from the same source, so 849 struct nbpf_channel *chan = nbpf_to_chan(dchan);
863 * such a check would be superflous
864 */
865 850
866 chan->slave_dst_addr = config->dst_addr; 851 dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
867 chan->slave_dst_width = nbpf_xfer_size(chan->nbpf, 852 dev_dbg(dchan->device->dev, "Terminating\n");
868 config->dst_addr_width, 1);
869 chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf,
870 config->dst_addr_width,
871 config->dst_maxburst);
872 chan->slave_src_addr = config->src_addr;
873 chan->slave_src_width = nbpf_xfer_size(chan->nbpf,
874 config->src_addr_width, 1);
875 chan->slave_src_burst = nbpf_xfer_size(chan->nbpf,
876 config->src_addr_width,
877 config->src_maxburst);
878 break;
879 853
880 case DMA_PAUSE: 854 nbpf_chan_halt(chan);
881 chan->paused = true; 855 nbpf_chan_idle(chan);
882 nbpf_pause(chan);
883 break;
884 856
885 default: 857 return 0;
886 return -ENXIO; 858}
887 } 859
860static int nbpf_config(struct dma_chan *dchan,
861 struct dma_slave_config *config)
862{
863 struct nbpf_channel *chan = nbpf_to_chan(dchan);
864
865 dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
866
867 /*
868 * We could check config->slave_id to match chan->terminal here,
869 * but with DT they would be coming from the same source, so
870 * such a check would be superflous
871 */
872
873 chan->slave_dst_addr = config->dst_addr;
874 chan->slave_dst_width = nbpf_xfer_size(chan->nbpf,
875 config->dst_addr_width, 1);
876 chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf,
877 config->dst_addr_width,
878 config->dst_maxburst);
879 chan->slave_src_addr = config->src_addr;
880 chan->slave_src_width = nbpf_xfer_size(chan->nbpf,
881 config->src_addr_width, 1);
882 chan->slave_src_burst = nbpf_xfer_size(chan->nbpf,
883 config->src_addr_width,
884 config->src_maxburst);
888 885
889 return 0; 886 return 0;
890} 887}
@@ -1072,18 +1069,6 @@ static void nbpf_free_chan_resources(struct dma_chan *dchan)
1072 } 1069 }
1073} 1070}
1074 1071
1075static int nbpf_slave_caps(struct dma_chan *dchan,
1076 struct dma_slave_caps *caps)
1077{
1078 caps->src_addr_widths = NBPF_DMA_BUSWIDTHS;
1079 caps->dstn_addr_widths = NBPF_DMA_BUSWIDTHS;
1080 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1081 caps->cmd_pause = false;
1082 caps->cmd_terminate = true;
1083
1084 return 0;
1085}
1086
1087static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec, 1072static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec,
1088 struct of_dma *ofdma) 1073 struct of_dma *ofdma)
1089{ 1074{
@@ -1414,7 +1399,6 @@ static int nbpf_probe(struct platform_device *pdev)
1414 dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy; 1399 dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy;
1415 dma_dev->device_tx_status = nbpf_tx_status; 1400 dma_dev->device_tx_status = nbpf_tx_status;
1416 dma_dev->device_issue_pending = nbpf_issue_pending; 1401 dma_dev->device_issue_pending = nbpf_issue_pending;
1417 dma_dev->device_slave_caps = nbpf_slave_caps;
1418 1402
1419 /* 1403 /*
1420 * If we drop support for unaligned MEMCPY buffer addresses and / or 1404 * If we drop support for unaligned MEMCPY buffer addresses and / or
@@ -1426,7 +1410,13 @@ static int nbpf_probe(struct platform_device *pdev)
1426 1410
1427 /* Compulsory for DMA_SLAVE fields */ 1411 /* Compulsory for DMA_SLAVE fields */
1428 dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg; 1412 dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg;
1429 dma_dev->device_control = nbpf_control; 1413 dma_dev->device_config = nbpf_config;
1414 dma_dev->device_pause = nbpf_pause;
1415 dma_dev->device_terminate_all = nbpf_terminate_all;
1416
1417 dma_dev->src_addr_widths = NBPF_DMA_BUSWIDTHS;
1418 dma_dev->dst_addr_widths = NBPF_DMA_BUSWIDTHS;
1419 dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1430 1420
1431 platform_set_drvdata(pdev, nbpf); 1421 platform_set_drvdata(pdev, nbpf);
1432 1422
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index d5fbeaa1e7ba..ca31f1b45366 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -159,6 +159,10 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
159 return ERR_PTR(-ENODEV); 159 return ERR_PTR(-ENODEV);
160 } 160 }
161 161
162 /* Silently fail if there is not even the "dmas" property */
163 if (!of_find_property(np, "dmas", NULL))
164 return ERR_PTR(-ENODEV);
165
162 count = of_property_count_strings(np, "dma-names"); 166 count = of_property_count_strings(np, "dma-names");
163 if (count < 0) { 167 if (count < 0) {
164 pr_err("%s: dma-names property of node '%s' missing or empty\n", 168 pr_err("%s: dma-names property of node '%s' missing or empty\n",
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index c0016a68b446..7dd6dd121681 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -948,8 +948,10 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
948 return vchan_tx_prep(&c->vc, &d->vd, flags); 948 return vchan_tx_prep(&c->vc, &d->vd, flags);
949} 949}
950 950
951static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg) 951static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
952{ 952{
953 struct omap_chan *c = to_omap_dma_chan(chan);
954
953 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 955 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
954 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 956 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
955 return -EINVAL; 957 return -EINVAL;
@@ -959,8 +961,9 @@ static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *c
959 return 0; 961 return 0;
960} 962}
961 963
962static int omap_dma_terminate_all(struct omap_chan *c) 964static int omap_dma_terminate_all(struct dma_chan *chan)
963{ 965{
966 struct omap_chan *c = to_omap_dma_chan(chan);
964 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); 967 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
965 unsigned long flags; 968 unsigned long flags;
966 LIST_HEAD(head); 969 LIST_HEAD(head);
@@ -996,8 +999,10 @@ static int omap_dma_terminate_all(struct omap_chan *c)
996 return 0; 999 return 0;
997} 1000}
998 1001
999static int omap_dma_pause(struct omap_chan *c) 1002static int omap_dma_pause(struct dma_chan *chan)
1000{ 1003{
1004 struct omap_chan *c = to_omap_dma_chan(chan);
1005
1001 /* Pause/Resume only allowed with cyclic mode */ 1006 /* Pause/Resume only allowed with cyclic mode */
1002 if (!c->cyclic) 1007 if (!c->cyclic)
1003 return -EINVAL; 1008 return -EINVAL;
@@ -1010,8 +1015,10 @@ static int omap_dma_pause(struct omap_chan *c)
1010 return 0; 1015 return 0;
1011} 1016}
1012 1017
1013static int omap_dma_resume(struct omap_chan *c) 1018static int omap_dma_resume(struct dma_chan *chan)
1014{ 1019{
1020 struct omap_chan *c = to_omap_dma_chan(chan);
1021
1015 /* Pause/Resume only allowed with cyclic mode */ 1022 /* Pause/Resume only allowed with cyclic mode */
1016 if (!c->cyclic) 1023 if (!c->cyclic)
1017 return -EINVAL; 1024 return -EINVAL;
@@ -1029,37 +1036,6 @@ static int omap_dma_resume(struct omap_chan *c)
1029 return 0; 1036 return 0;
1030} 1037}
1031 1038
1032static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1033 unsigned long arg)
1034{
1035 struct omap_chan *c = to_omap_dma_chan(chan);
1036 int ret;
1037
1038 switch (cmd) {
1039 case DMA_SLAVE_CONFIG:
1040 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
1041 break;
1042
1043 case DMA_TERMINATE_ALL:
1044 ret = omap_dma_terminate_all(c);
1045 break;
1046
1047 case DMA_PAUSE:
1048 ret = omap_dma_pause(c);
1049 break;
1050
1051 case DMA_RESUME:
1052 ret = omap_dma_resume(c);
1053 break;
1054
1055 default:
1056 ret = -ENXIO;
1057 break;
1058 }
1059
1060 return ret;
1061}
1062
1063static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) 1039static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
1064{ 1040{
1065 struct omap_chan *c; 1041 struct omap_chan *c;
@@ -1094,19 +1070,6 @@ static void omap_dma_free(struct omap_dmadev *od)
1094 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 1070 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1095 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 1071 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1096 1072
1097static int omap_dma_device_slave_caps(struct dma_chan *dchan,
1098 struct dma_slave_caps *caps)
1099{
1100 caps->src_addr_widths = OMAP_DMA_BUSWIDTHS;
1101 caps->dstn_addr_widths = OMAP_DMA_BUSWIDTHS;
1102 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1103 caps->cmd_pause = true;
1104 caps->cmd_terminate = true;
1105 caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1106
1107 return 0;
1108}
1109
1110static int omap_dma_probe(struct platform_device *pdev) 1073static int omap_dma_probe(struct platform_device *pdev)
1111{ 1074{
1112 struct omap_dmadev *od; 1075 struct omap_dmadev *od;
@@ -1136,8 +1099,14 @@ static int omap_dma_probe(struct platform_device *pdev)
1136 od->ddev.device_issue_pending = omap_dma_issue_pending; 1099 od->ddev.device_issue_pending = omap_dma_issue_pending;
1137 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; 1100 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
1138 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; 1101 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
1139 od->ddev.device_control = omap_dma_control; 1102 od->ddev.device_config = omap_dma_slave_config;
1140 od->ddev.device_slave_caps = omap_dma_device_slave_caps; 1103 od->ddev.device_pause = omap_dma_pause;
1104 od->ddev.device_resume = omap_dma_resume;
1105 od->ddev.device_terminate_all = omap_dma_terminate_all;
1106 od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
1107 od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
1108 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1109 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1141 od->ddev.dev = &pdev->dev; 1110 od->ddev.dev = &pdev->dev;
1142 INIT_LIST_HEAD(&od->ddev.channels); 1111 INIT_LIST_HEAD(&od->ddev.channels);
1143 INIT_LIST_HEAD(&od->pending); 1112 INIT_LIST_HEAD(&od->pending);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 6e0e47d76b23..35c143cb88da 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -665,16 +665,12 @@ err_desc_get:
665 return NULL; 665 return NULL;
666} 666}
667 667
668static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 668static int pd_device_terminate_all(struct dma_chan *chan)
669 unsigned long arg)
670{ 669{
671 struct pch_dma_chan *pd_chan = to_pd_chan(chan); 670 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
672 struct pch_dma_desc *desc, *_d; 671 struct pch_dma_desc *desc, *_d;
673 LIST_HEAD(list); 672 LIST_HEAD(list);
674 673
675 if (cmd != DMA_TERMINATE_ALL)
676 return -ENXIO;
677
678 spin_lock_irq(&pd_chan->lock); 674 spin_lock_irq(&pd_chan->lock);
679 675
680 pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); 676 pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
@@ -932,7 +928,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
932 pd->dma.device_tx_status = pd_tx_status; 928 pd->dma.device_tx_status = pd_tx_status;
933 pd->dma.device_issue_pending = pd_issue_pending; 929 pd->dma.device_issue_pending = pd_issue_pending;
934 pd->dma.device_prep_slave_sg = pd_prep_slave_sg; 930 pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
935 pd->dma.device_control = pd_device_control; 931 pd->dma.device_terminate_all = pd_device_terminate_all;
936 932
937 err = dma_async_device_register(&pd->dma); 933 err = dma_async_device_register(&pd->dma);
938 if (err) { 934 if (err) {
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index bdf40b530032..0e1f56772855 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -504,6 +504,9 @@ struct dma_pl330_desc {
504 504
505 enum desc_status status; 505 enum desc_status status;
506 506
507 int bytes_requested;
508 bool last;
509
507 /* The channel which currently holds this desc */ 510 /* The channel which currently holds this desc */
508 struct dma_pl330_chan *pchan; 511 struct dma_pl330_chan *pchan;
509 512
@@ -1048,6 +1051,10 @@ static bool _trigger(struct pl330_thread *thrd)
1048 if (!req) 1051 if (!req)
1049 return true; 1052 return true;
1050 1053
1054 /* Return if req is running */
1055 if (idx == thrd->req_running)
1056 return true;
1057
1051 desc = req->desc; 1058 desc = req->desc;
1052 1059
1053 ns = desc->rqcfg.nonsecure ? 1 : 0; 1060 ns = desc->rqcfg.nonsecure ? 1 : 0;
@@ -1587,6 +1594,8 @@ static int pl330_update(struct pl330_dmac *pl330)
1587 descdone = thrd->req[active].desc; 1594 descdone = thrd->req[active].desc;
1588 thrd->req[active].desc = NULL; 1595 thrd->req[active].desc = NULL;
1589 1596
1597 thrd->req_running = -1;
1598
1590 /* Get going again ASAP */ 1599 /* Get going again ASAP */
1591 _start(thrd); 1600 _start(thrd);
1592 1601
@@ -2086,77 +2095,89 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
2086 return 1; 2095 return 1;
2087} 2096}
2088 2097
2089static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) 2098static int pl330_config(struct dma_chan *chan,
2099 struct dma_slave_config *slave_config)
2100{
2101 struct dma_pl330_chan *pch = to_pchan(chan);
2102
2103 if (slave_config->direction == DMA_MEM_TO_DEV) {
2104 if (slave_config->dst_addr)
2105 pch->fifo_addr = slave_config->dst_addr;
2106 if (slave_config->dst_addr_width)
2107 pch->burst_sz = __ffs(slave_config->dst_addr_width);
2108 if (slave_config->dst_maxburst)
2109 pch->burst_len = slave_config->dst_maxburst;
2110 } else if (slave_config->direction == DMA_DEV_TO_MEM) {
2111 if (slave_config->src_addr)
2112 pch->fifo_addr = slave_config->src_addr;
2113 if (slave_config->src_addr_width)
2114 pch->burst_sz = __ffs(slave_config->src_addr_width);
2115 if (slave_config->src_maxburst)
2116 pch->burst_len = slave_config->src_maxburst;
2117 }
2118
2119 return 0;
2120}
2121
2122static int pl330_terminate_all(struct dma_chan *chan)
2090{ 2123{
2091 struct dma_pl330_chan *pch = to_pchan(chan); 2124 struct dma_pl330_chan *pch = to_pchan(chan);
2092 struct dma_pl330_desc *desc; 2125 struct dma_pl330_desc *desc;
2093 unsigned long flags; 2126 unsigned long flags;
2094 struct pl330_dmac *pl330 = pch->dmac; 2127 struct pl330_dmac *pl330 = pch->dmac;
2095 struct dma_slave_config *slave_config;
2096 LIST_HEAD(list); 2128 LIST_HEAD(list);
2097 2129
2098 switch (cmd) { 2130 spin_lock_irqsave(&pch->lock, flags);
2099 case DMA_TERMINATE_ALL: 2131 spin_lock(&pl330->lock);
2100 pm_runtime_get_sync(pl330->ddma.dev); 2132 _stop(pch->thread);
2101 spin_lock_irqsave(&pch->lock, flags); 2133 spin_unlock(&pl330->lock);
2134
2135 pch->thread->req[0].desc = NULL;
2136 pch->thread->req[1].desc = NULL;
2137 pch->thread->req_running = -1;
2138
2139 /* Mark all desc done */
2140 list_for_each_entry(desc, &pch->submitted_list, node) {
2141 desc->status = FREE;
2142 dma_cookie_complete(&desc->txd);
2143 }
2102 2144
2103 spin_lock(&pl330->lock); 2145 list_for_each_entry(desc, &pch->work_list , node) {
2104 _stop(pch->thread); 2146 desc->status = FREE;
2105 spin_unlock(&pl330->lock); 2147 dma_cookie_complete(&desc->txd);
2148 }
2106 2149
2107 pch->thread->req[0].desc = NULL; 2150 list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool);
2108 pch->thread->req[1].desc = NULL; 2151 list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
2109 pch->thread->req_running = -1; 2152 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
2153 spin_unlock_irqrestore(&pch->lock, flags);
2110 2154
2111 /* Mark all desc done */ 2155 return 0;
2112 list_for_each_entry(desc, &pch->submitted_list, node) { 2156}
2113 desc->status = FREE;
2114 dma_cookie_complete(&desc->txd);
2115 }
2116 2157
2117 list_for_each_entry(desc, &pch->work_list , node) { 2158/*
2118 desc->status = FREE; 2159 * We don't support DMA_RESUME command because of hardware
2119 dma_cookie_complete(&desc->txd); 2160 * limitations, so after pausing the channel we cannot restore
2120 } 2161 * it to active state. We have to terminate channel and setup
2162 * DMA transfer again. This pause feature was implemented to
2163 * allow safely read residue before channel termination.
2164 */
2165int pl330_pause(struct dma_chan *chan)
2166{
2167 struct dma_pl330_chan *pch = to_pchan(chan);
2168 struct pl330_dmac *pl330 = pch->dmac;
2169 unsigned long flags;
2121 2170
2122 list_for_each_entry(desc, &pch->completed_list , node) { 2171 pm_runtime_get_sync(pl330->ddma.dev);
2123 desc->status = FREE; 2172 spin_lock_irqsave(&pch->lock, flags);
2124 dma_cookie_complete(&desc->txd);
2125 }
2126 2173
2127 if (!list_empty(&pch->work_list)) 2174 spin_lock(&pl330->lock);
2128 pm_runtime_put(pl330->ddma.dev); 2175 _stop(pch->thread);
2176 spin_unlock(&pl330->lock);
2129 2177
2130 list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool); 2178 spin_unlock_irqrestore(&pch->lock, flags);
2131 list_splice_tail_init(&pch->work_list, &pl330->desc_pool); 2179 pm_runtime_mark_last_busy(pl330->ddma.dev);
2132 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); 2180 pm_runtime_put_autosuspend(pl330->ddma.dev);
2133 spin_unlock_irqrestore(&pch->lock, flags);
2134 pm_runtime_mark_last_busy(pl330->ddma.dev);
2135 pm_runtime_put_autosuspend(pl330->ddma.dev);
2136 break;
2137 case DMA_SLAVE_CONFIG:
2138 slave_config = (struct dma_slave_config *)arg;
2139
2140 if (slave_config->direction == DMA_MEM_TO_DEV) {
2141 if (slave_config->dst_addr)
2142 pch->fifo_addr = slave_config->dst_addr;
2143 if (slave_config->dst_addr_width)
2144 pch->burst_sz = __ffs(slave_config->dst_addr_width);
2145 if (slave_config->dst_maxburst)
2146 pch->burst_len = slave_config->dst_maxburst;
2147 } else if (slave_config->direction == DMA_DEV_TO_MEM) {
2148 if (slave_config->src_addr)
2149 pch->fifo_addr = slave_config->src_addr;
2150 if (slave_config->src_addr_width)
2151 pch->burst_sz = __ffs(slave_config->src_addr_width);
2152 if (slave_config->src_maxburst)
2153 pch->burst_len = slave_config->src_maxburst;
2154 }
2155 break;
2156 default:
2157 dev_err(pch->dmac->ddma.dev, "Not supported command.\n");
2158 return -ENXIO;
2159 }
2160 2181
2161 return 0; 2182 return 0;
2162} 2183}
@@ -2182,11 +2203,74 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
2182 pm_runtime_put_autosuspend(pch->dmac->ddma.dev); 2203 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2183} 2204}
2184 2205
2206int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
2207 struct dma_pl330_desc *desc)
2208{
2209 struct pl330_thread *thrd = pch->thread;
2210 struct pl330_dmac *pl330 = pch->dmac;
2211 void __iomem *regs = thrd->dmac->base;
2212 u32 val, addr;
2213
2214 pm_runtime_get_sync(pl330->ddma.dev);
2215 val = addr = 0;
2216 if (desc->rqcfg.src_inc) {
2217 val = readl(regs + SA(thrd->id));
2218 addr = desc->px.src_addr;
2219 } else {
2220 val = readl(regs + DA(thrd->id));
2221 addr = desc->px.dst_addr;
2222 }
2223 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2224 pm_runtime_put_autosuspend(pl330->ddma.dev);
2225 return val - addr;
2226}
2227
2185static enum dma_status 2228static enum dma_status
2186pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 2229pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2187 struct dma_tx_state *txstate) 2230 struct dma_tx_state *txstate)
2188{ 2231{
2189 return dma_cookie_status(chan, cookie, txstate); 2232 enum dma_status ret;
2233 unsigned long flags;
2234 struct dma_pl330_desc *desc, *running = NULL;
2235 struct dma_pl330_chan *pch = to_pchan(chan);
2236 unsigned int transferred, residual = 0;
2237
2238 ret = dma_cookie_status(chan, cookie, txstate);
2239
2240 if (!txstate)
2241 return ret;
2242
2243 if (ret == DMA_COMPLETE)
2244 goto out;
2245
2246 spin_lock_irqsave(&pch->lock, flags);
2247
2248 if (pch->thread->req_running != -1)
2249 running = pch->thread->req[pch->thread->req_running].desc;
2250
2251 /* Check in pending list */
2252 list_for_each_entry(desc, &pch->work_list, node) {
2253 if (desc->status == DONE)
2254 transferred = desc->bytes_requested;
2255 else if (running && desc == running)
2256 transferred =
2257 pl330_get_current_xferred_count(pch, desc);
2258 else
2259 transferred = 0;
2260 residual += desc->bytes_requested - transferred;
2261 if (desc->txd.cookie == cookie) {
2262 ret = desc->status;
2263 break;
2264 }
2265 if (desc->last)
2266 residual = 0;
2267 }
2268 spin_unlock_irqrestore(&pch->lock, flags);
2269
2270out:
2271 dma_set_residue(txstate, residual);
2272
2273 return ret;
2190} 2274}
2191 2275
2192static void pl330_issue_pending(struct dma_chan *chan) 2276static void pl330_issue_pending(struct dma_chan *chan)
@@ -2231,12 +2315,14 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2231 desc->txd.callback = last->txd.callback; 2315 desc->txd.callback = last->txd.callback;
2232 desc->txd.callback_param = last->txd.callback_param; 2316 desc->txd.callback_param = last->txd.callback_param;
2233 } 2317 }
2318 last->last = false;
2234 2319
2235 dma_cookie_assign(&desc->txd); 2320 dma_cookie_assign(&desc->txd);
2236 2321
2237 list_move_tail(&desc->node, &pch->submitted_list); 2322 list_move_tail(&desc->node, &pch->submitted_list);
2238 } 2323 }
2239 2324
2325 last->last = true;
2240 cookie = dma_cookie_assign(&last->txd); 2326 cookie = dma_cookie_assign(&last->txd);
2241 list_add_tail(&last->node, &pch->submitted_list); 2327 list_add_tail(&last->node, &pch->submitted_list);
2242 spin_unlock_irqrestore(&pch->lock, flags); 2328 spin_unlock_irqrestore(&pch->lock, flags);
@@ -2459,6 +2545,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
2459 desc->rqtype = direction; 2545 desc->rqtype = direction;
2460 desc->rqcfg.brst_size = pch->burst_sz; 2546 desc->rqcfg.brst_size = pch->burst_sz;
2461 desc->rqcfg.brst_len = 1; 2547 desc->rqcfg.brst_len = 1;
2548 desc->bytes_requested = period_len;
2462 fill_px(&desc->px, dst, src, period_len); 2549 fill_px(&desc->px, dst, src, period_len);
2463 2550
2464 if (!first) 2551 if (!first)
@@ -2601,6 +2688,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2601 desc->rqcfg.brst_size = pch->burst_sz; 2688 desc->rqcfg.brst_size = pch->burst_sz;
2602 desc->rqcfg.brst_len = 1; 2689 desc->rqcfg.brst_len = 1;
2603 desc->rqtype = direction; 2690 desc->rqtype = direction;
2691 desc->bytes_requested = sg_dma_len(sg);
2604 } 2692 }
2605 2693
2606 /* Return the last desc in the chain */ 2694 /* Return the last desc in the chain */
@@ -2623,19 +2711,6 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
2623 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ 2711 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
2624 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) 2712 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
2625 2713
2626static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
2627 struct dma_slave_caps *caps)
2628{
2629 caps->src_addr_widths = PL330_DMA_BUSWIDTHS;
2630 caps->dstn_addr_widths = PL330_DMA_BUSWIDTHS;
2631 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2632 caps->cmd_pause = false;
2633 caps->cmd_terminate = true;
2634 caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
2635
2636 return 0;
2637}
2638
2639/* 2714/*
2640 * Runtime PM callbacks are provided by amba/bus.c driver. 2715 * Runtime PM callbacks are provided by amba/bus.c driver.
2641 * 2716 *
@@ -2793,9 +2868,14 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2793 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic; 2868 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
2794 pd->device_tx_status = pl330_tx_status; 2869 pd->device_tx_status = pl330_tx_status;
2795 pd->device_prep_slave_sg = pl330_prep_slave_sg; 2870 pd->device_prep_slave_sg = pl330_prep_slave_sg;
2796 pd->device_control = pl330_control; 2871 pd->device_config = pl330_config;
2872 pd->device_pause = pl330_pause;
2873 pd->device_terminate_all = pl330_terminate_all;
2797 pd->device_issue_pending = pl330_issue_pending; 2874 pd->device_issue_pending = pl330_issue_pending;
2798 pd->device_slave_caps = pl330_dma_device_slave_caps; 2875 pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
2876 pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
2877 pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2878 pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2799 2879
2800 ret = dma_async_device_register(pd); 2880 ret = dma_async_device_register(pd);
2801 if (ret) { 2881 if (ret) {
@@ -2847,7 +2927,7 @@ probe_err3:
2847 2927
2848 /* Flush the channel */ 2928 /* Flush the channel */
2849 if (pch->thread) { 2929 if (pch->thread) {
2850 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); 2930 pl330_terminate_all(&pch->chan);
2851 pl330_free_chan_resources(&pch->chan); 2931 pl330_free_chan_resources(&pch->chan);
2852 } 2932 }
2853 } 2933 }
@@ -2878,7 +2958,7 @@ static int pl330_remove(struct amba_device *adev)
2878 2958
2879 /* Flush the channel */ 2959 /* Flush the channel */
2880 if (pch->thread) { 2960 if (pch->thread) {
2881 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); 2961 pl330_terminate_all(&pch->chan);
2882 pl330_free_chan_resources(&pch->chan); 2962 pl330_free_chan_resources(&pch->chan);
2883 } 2963 }
2884 } 2964 }
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c
index 3122a99ec06b..d7a33b3ac466 100644
--- a/drivers/dma/qcom_bam_dma.c
+++ b/drivers/dma/qcom_bam_dma.c
@@ -530,11 +530,18 @@ static void bam_free_chan(struct dma_chan *chan)
530 * Sets slave configuration for channel 530 * Sets slave configuration for channel
531 * 531 *
532 */ 532 */
533static void bam_slave_config(struct bam_chan *bchan, 533static int bam_slave_config(struct dma_chan *chan,
534 struct dma_slave_config *cfg) 534 struct dma_slave_config *cfg)
535{ 535{
536 struct bam_chan *bchan = to_bam_chan(chan);
537 unsigned long flag;
538
539 spin_lock_irqsave(&bchan->vc.lock, flag);
536 memcpy(&bchan->slave, cfg, sizeof(*cfg)); 540 memcpy(&bchan->slave, cfg, sizeof(*cfg));
537 bchan->reconfigure = 1; 541 bchan->reconfigure = 1;
542 spin_unlock_irqrestore(&bchan->vc.lock, flag);
543
544 return 0;
538} 545}
539 546
540/** 547/**
@@ -627,8 +634,9 @@ err_out:
627 * No callbacks are done 634 * No callbacks are done
628 * 635 *
629 */ 636 */
630static void bam_dma_terminate_all(struct bam_chan *bchan) 637static int bam_dma_terminate_all(struct dma_chan *chan)
631{ 638{
639 struct bam_chan *bchan = to_bam_chan(chan);
632 unsigned long flag; 640 unsigned long flag;
633 LIST_HEAD(head); 641 LIST_HEAD(head);
634 642
@@ -643,56 +651,46 @@ static void bam_dma_terminate_all(struct bam_chan *bchan)
643 spin_unlock_irqrestore(&bchan->vc.lock, flag); 651 spin_unlock_irqrestore(&bchan->vc.lock, flag);
644 652
645 vchan_dma_desc_free_list(&bchan->vc, &head); 653 vchan_dma_desc_free_list(&bchan->vc, &head);
654
655 return 0;
646} 656}
647 657
648/** 658/**
649 * bam_control - DMA device control 659 * bam_pause - Pause DMA channel
650 * @chan: dma channel 660 * @chan: dma channel
651 * @cmd: control cmd
652 * @arg: cmd argument
653 * 661 *
654 * Perform DMA control command 662 */
663static int bam_pause(struct dma_chan *chan)
664{
665 struct bam_chan *bchan = to_bam_chan(chan);
666 struct bam_device *bdev = bchan->bdev;
667 unsigned long flag;
668
669 spin_lock_irqsave(&bchan->vc.lock, flag);
670 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
671 bchan->paused = 1;
672 spin_unlock_irqrestore(&bchan->vc.lock, flag);
673
674 return 0;
675}
676
677/**
678 * bam_resume - Resume DMA channel operations
679 * @chan: dma channel
655 * 680 *
656 */ 681 */
657static int bam_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 682static int bam_resume(struct dma_chan *chan)
658 unsigned long arg)
659{ 683{
660 struct bam_chan *bchan = to_bam_chan(chan); 684 struct bam_chan *bchan = to_bam_chan(chan);
661 struct bam_device *bdev = bchan->bdev; 685 struct bam_device *bdev = bchan->bdev;
662 int ret = 0;
663 unsigned long flag; 686 unsigned long flag;
664 687
665 switch (cmd) { 688 spin_lock_irqsave(&bchan->vc.lock, flag);
666 case DMA_PAUSE: 689 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
667 spin_lock_irqsave(&bchan->vc.lock, flag); 690 bchan->paused = 0;
668 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); 691 spin_unlock_irqrestore(&bchan->vc.lock, flag);
669 bchan->paused = 1;
670 spin_unlock_irqrestore(&bchan->vc.lock, flag);
671 break;
672
673 case DMA_RESUME:
674 spin_lock_irqsave(&bchan->vc.lock, flag);
675 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
676 bchan->paused = 0;
677 spin_unlock_irqrestore(&bchan->vc.lock, flag);
678 break;
679
680 case DMA_TERMINATE_ALL:
681 bam_dma_terminate_all(bchan);
682 break;
683
684 case DMA_SLAVE_CONFIG:
685 spin_lock_irqsave(&bchan->vc.lock, flag);
686 bam_slave_config(bchan, (struct dma_slave_config *)arg);
687 spin_unlock_irqrestore(&bchan->vc.lock, flag);
688 break;
689
690 default:
691 ret = -ENXIO;
692 break;
693 }
694 692
695 return ret; 693 return 0;
696} 694}
697 695
698/** 696/**
@@ -1148,7 +1146,10 @@ static int bam_dma_probe(struct platform_device *pdev)
1148 bdev->common.device_alloc_chan_resources = bam_alloc_chan; 1146 bdev->common.device_alloc_chan_resources = bam_alloc_chan;
1149 bdev->common.device_free_chan_resources = bam_free_chan; 1147 bdev->common.device_free_chan_resources = bam_free_chan;
1150 bdev->common.device_prep_slave_sg = bam_prep_slave_sg; 1148 bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
1151 bdev->common.device_control = bam_control; 1149 bdev->common.device_config = bam_slave_config;
1150 bdev->common.device_pause = bam_pause;
1151 bdev->common.device_resume = bam_resume;
1152 bdev->common.device_terminate_all = bam_dma_terminate_all;
1152 bdev->common.device_issue_pending = bam_issue_pending; 1153 bdev->common.device_issue_pending = bam_issue_pending;
1153 bdev->common.device_tx_status = bam_tx_status; 1154 bdev->common.device_tx_status = bam_tx_status;
1154 bdev->common.dev = bdev->dev; 1155 bdev->common.dev = bdev->dev;
@@ -1187,7 +1188,7 @@ static int bam_dma_remove(struct platform_device *pdev)
1187 devm_free_irq(bdev->dev, bdev->irq, bdev); 1188 devm_free_irq(bdev->dev, bdev->irq, bdev);
1188 1189
1189 for (i = 0; i < bdev->num_channels; i++) { 1190 for (i = 0; i < bdev->num_channels; i++) {
1190 bam_dma_terminate_all(&bdev->channels[i]); 1191 bam_dma_terminate_all(&bdev->channels[i].vc.chan);
1191 tasklet_kill(&bdev->channels[i].vc.task); 1192 tasklet_kill(&bdev->channels[i].vc.task);
1192 1193
1193 dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, 1194 dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 6941a77521c3..2f91da3db836 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -384,20 +384,30 @@ static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan)
384 return tc * txd->width; 384 return tc * txd->width;
385} 385}
386 386
387static int s3c24xx_dma_set_runtime_config(struct s3c24xx_dma_chan *s3cchan, 387static int s3c24xx_dma_set_runtime_config(struct dma_chan *chan,
388 struct dma_slave_config *config) 388 struct dma_slave_config *config)
389{ 389{
390 if (!s3cchan->slave) 390 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
391 return -EINVAL; 391 unsigned long flags;
392 int ret = 0;
392 393
393 /* Reject definitely invalid configurations */ 394 /* Reject definitely invalid configurations */
394 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 395 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
395 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 396 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
396 return -EINVAL; 397 return -EINVAL;
397 398
399 spin_lock_irqsave(&s3cchan->vc.lock, flags);
400
401 if (!s3cchan->slave) {
402 ret = -EINVAL;
403 goto out;
404 }
405
398 s3cchan->cfg = *config; 406 s3cchan->cfg = *config;
399 407
400 return 0; 408out:
409 spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
410 return ret;
401} 411}
402 412
403/* 413/*
@@ -703,8 +713,7 @@ static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
703 * The DMA ENGINE API 713 * The DMA ENGINE API
704 */ 714 */
705 715
706static int s3c24xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 716static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
707 unsigned long arg)
708{ 717{
709 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); 718 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
710 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 719 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
@@ -713,40 +722,28 @@ static int s3c24xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
713 722
714 spin_lock_irqsave(&s3cchan->vc.lock, flags); 723 spin_lock_irqsave(&s3cchan->vc.lock, flags);
715 724
716 switch (cmd) { 725 if (!s3cchan->phy && !s3cchan->at) {
717 case DMA_SLAVE_CONFIG: 726 dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
718 ret = s3c24xx_dma_set_runtime_config(s3cchan, 727 s3cchan->id);
719 (struct dma_slave_config *)arg); 728 ret = -EINVAL;
720 break; 729 goto unlock;
721 case DMA_TERMINATE_ALL: 730 }
722 if (!s3cchan->phy && !s3cchan->at) {
723 dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
724 s3cchan->id);
725 ret = -EINVAL;
726 break;
727 }
728 731
729 s3cchan->state = S3C24XX_DMA_CHAN_IDLE; 732 s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
730 733
731 /* Mark physical channel as free */ 734 /* Mark physical channel as free */
732 if (s3cchan->phy) 735 if (s3cchan->phy)
733 s3c24xx_dma_phy_free(s3cchan); 736 s3c24xx_dma_phy_free(s3cchan);
734 737
735 /* Dequeue current job */ 738 /* Dequeue current job */
736 if (s3cchan->at) { 739 if (s3cchan->at) {
737 s3c24xx_dma_desc_free(&s3cchan->at->vd); 740 s3c24xx_dma_desc_free(&s3cchan->at->vd);
738 s3cchan->at = NULL; 741 s3cchan->at = NULL;
739 }
740
741 /* Dequeue jobs not yet fired as well */
742 s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
743 break;
744 default:
745 /* Unknown command */
746 ret = -ENXIO;
747 break;
748 } 742 }
749 743
744 /* Dequeue jobs not yet fired as well */
745 s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
746unlock:
750 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); 747 spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
751 748
752 return ret; 749 return ret;
@@ -1300,7 +1297,8 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
1300 s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy; 1297 s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
1301 s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status; 1298 s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status;
1302 s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending; 1299 s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
1303 s3cdma->memcpy.device_control = s3c24xx_dma_control; 1300 s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config;
1301 s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all;
1304 1302
1305 /* Initialize slave engine for SoC internal dedicated peripherals */ 1303 /* Initialize slave engine for SoC internal dedicated peripherals */
1306 dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask); 1304 dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
@@ -1315,7 +1313,8 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
1315 s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending; 1313 s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
1316 s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg; 1314 s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
1317 s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic; 1315 s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
1318 s3cdma->slave.device_control = s3c24xx_dma_control; 1316 s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
1317 s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
1319 1318
1320 /* Register as many memcpy channels as there are physical channels */ 1319 /* Register as many memcpy channels as there are physical channels */
1321 ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy, 1320 ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 96bb62c39c41..5adf5407a8cb 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -669,8 +669,10 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
669 return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 669 return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
670} 670}
671 671
672static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) 672static int sa11x0_dma_device_config(struct dma_chan *chan,
673 struct dma_slave_config *cfg)
673{ 674{
675 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
674 u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW); 676 u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
675 dma_addr_t addr; 677 dma_addr_t addr;
676 enum dma_slave_buswidth width; 678 enum dma_slave_buswidth width;
@@ -704,99 +706,101 @@ static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_c
704 return 0; 706 return 0;
705} 707}
706 708
707static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 709static int sa11x0_dma_device_pause(struct dma_chan *chan)
708 unsigned long arg)
709{ 710{
710 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 711 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
711 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 712 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
712 struct sa11x0_dma_phy *p; 713 struct sa11x0_dma_phy *p;
713 LIST_HEAD(head); 714 LIST_HEAD(head);
714 unsigned long flags; 715 unsigned long flags;
715 int ret;
716 716
717 switch (cmd) { 717 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
718 case DMA_SLAVE_CONFIG: 718 spin_lock_irqsave(&c->vc.lock, flags);
719 return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); 719 if (c->status == DMA_IN_PROGRESS) {
720 720 c->status = DMA_PAUSED;
721 case DMA_TERMINATE_ALL:
722 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
723 /* Clear the tx descriptor lists */
724 spin_lock_irqsave(&c->vc.lock, flags);
725 vchan_get_all_descriptors(&c->vc, &head);
726 721
727 p = c->phy; 722 p = c->phy;
728 if (p) { 723 if (p) {
729 dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); 724 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
730 /* vchan is assigned to a pchan - stop the channel */ 725 } else {
731 writel(DCSR_RUN | DCSR_IE |
732 DCSR_STRTA | DCSR_DONEA |
733 DCSR_STRTB | DCSR_DONEB,
734 p->base + DMA_DCSR_C);
735
736 if (p->txd_load) {
737 if (p->txd_load != p->txd_done)
738 list_add_tail(&p->txd_load->vd.node, &head);
739 p->txd_load = NULL;
740 }
741 if (p->txd_done) {
742 list_add_tail(&p->txd_done->vd.node, &head);
743 p->txd_done = NULL;
744 }
745 c->phy = NULL;
746 spin_lock(&d->lock); 726 spin_lock(&d->lock);
747 p->vchan = NULL; 727 list_del_init(&c->node);
748 spin_unlock(&d->lock); 728 spin_unlock(&d->lock);
749 tasklet_schedule(&d->task);
750 } 729 }
751 spin_unlock_irqrestore(&c->vc.lock, flags); 730 }
752 vchan_dma_desc_free_list(&c->vc, &head); 731 spin_unlock_irqrestore(&c->vc.lock, flags);
753 ret = 0;
754 break;
755 732
756 case DMA_PAUSE: 733 return 0;
757 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); 734}
758 spin_lock_irqsave(&c->vc.lock, flags);
759 if (c->status == DMA_IN_PROGRESS) {
760 c->status = DMA_PAUSED;
761 735
762 p = c->phy; 736static int sa11x0_dma_device_resume(struct dma_chan *chan)
763 if (p) { 737{
764 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); 738 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
765 } else { 739 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
766 spin_lock(&d->lock); 740 struct sa11x0_dma_phy *p;
767 list_del_init(&c->node); 741 LIST_HEAD(head);
768 spin_unlock(&d->lock); 742 unsigned long flags;
769 }
770 }
771 spin_unlock_irqrestore(&c->vc.lock, flags);
772 ret = 0;
773 break;
774 743
775 case DMA_RESUME: 744 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
776 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); 745 spin_lock_irqsave(&c->vc.lock, flags);
777 spin_lock_irqsave(&c->vc.lock, flags); 746 if (c->status == DMA_PAUSED) {
778 if (c->status == DMA_PAUSED) { 747 c->status = DMA_IN_PROGRESS;
779 c->status = DMA_IN_PROGRESS; 748
780 749 p = c->phy;
781 p = c->phy; 750 if (p) {
782 if (p) { 751 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
783 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); 752 } else if (!list_empty(&c->vc.desc_issued)) {
784 } else if (!list_empty(&c->vc.desc_issued)) { 753 spin_lock(&d->lock);
785 spin_lock(&d->lock); 754 list_add_tail(&c->node, &d->chan_pending);
786 list_add_tail(&c->node, &d->chan_pending); 755 spin_unlock(&d->lock);
787 spin_unlock(&d->lock);
788 }
789 } 756 }
790 spin_unlock_irqrestore(&c->vc.lock, flags); 757 }
791 ret = 0; 758 spin_unlock_irqrestore(&c->vc.lock, flags);
792 break;
793 759
794 default: 760 return 0;
795 ret = -ENXIO; 761}
796 break; 762
763static int sa11x0_dma_device_terminate_all(struct dma_chan *chan)
764{
765 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
766 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
767 struct sa11x0_dma_phy *p;
768 LIST_HEAD(head);
769 unsigned long flags;
770
771 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
772 /* Clear the tx descriptor lists */
773 spin_lock_irqsave(&c->vc.lock, flags);
774 vchan_get_all_descriptors(&c->vc, &head);
775
776 p = c->phy;
777 if (p) {
778 dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
779 /* vchan is assigned to a pchan - stop the channel */
780 writel(DCSR_RUN | DCSR_IE |
781 DCSR_STRTA | DCSR_DONEA |
782 DCSR_STRTB | DCSR_DONEB,
783 p->base + DMA_DCSR_C);
784
785 if (p->txd_load) {
786 if (p->txd_load != p->txd_done)
787 list_add_tail(&p->txd_load->vd.node, &head);
788 p->txd_load = NULL;
789 }
790 if (p->txd_done) {
791 list_add_tail(&p->txd_done->vd.node, &head);
792 p->txd_done = NULL;
793 }
794 c->phy = NULL;
795 spin_lock(&d->lock);
796 p->vchan = NULL;
797 spin_unlock(&d->lock);
798 tasklet_schedule(&d->task);
797 } 799 }
800 spin_unlock_irqrestore(&c->vc.lock, flags);
801 vchan_dma_desc_free_list(&c->vc, &head);
798 802
799 return ret; 803 return 0;
800} 804}
801 805
802struct sa11x0_dma_channel_desc { 806struct sa11x0_dma_channel_desc {
@@ -833,7 +837,10 @@ static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
833 dmadev->dev = dev; 837 dmadev->dev = dev;
834 dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources; 838 dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
835 dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources; 839 dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
836 dmadev->device_control = sa11x0_dma_control; 840 dmadev->device_config = sa11x0_dma_device_config;
841 dmadev->device_pause = sa11x0_dma_device_pause;
842 dmadev->device_resume = sa11x0_dma_device_resume;
843 dmadev->device_terminate_all = sa11x0_dma_device_terminate_all;
837 dmadev->device_tx_status = sa11x0_dma_tx_status; 844 dmadev->device_tx_status = sa11x0_dma_tx_status;
838 dmadev->device_issue_pending = sa11x0_dma_issue_pending; 845 dmadev->device_issue_pending = sa11x0_dma_issue_pending;
839 846
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 0349125a2e20..8190ad225a1b 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -2,6 +2,10 @@
2# DMA engine configuration for sh 2# DMA engine configuration for sh
3# 3#
4 4
5config RENESAS_DMA
6 bool
7 select DMA_ENGINE
8
5# 9#
6# DMA Engine Helpers 10# DMA Engine Helpers
7# 11#
@@ -12,7 +16,7 @@ config SH_DMAE_BASE
12 depends on !SUPERH || SH_DMA 16 depends on !SUPERH || SH_DMA
13 depends on !SH_DMA_API 17 depends on !SH_DMA_API
14 default y 18 default y
15 select DMA_ENGINE 19 select RENESAS_DMA
16 help 20 help
17 Enable support for the Renesas SuperH DMA controllers. 21 Enable support for the Renesas SuperH DMA controllers.
18 22
@@ -52,3 +56,11 @@ config RCAR_AUDMAC_PP
52 depends on SH_DMAE_BASE 56 depends on SH_DMAE_BASE
53 help 57 help
54 Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers. 58 Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers.
59
60config RCAR_DMAC
61 tristate "Renesas R-Car Gen2 DMA Controller"
62 depends on ARCH_SHMOBILE || COMPILE_TEST
63 select RENESAS_DMA
64 help
65 This driver supports the general purpose DMA controller found in the
66 Renesas R-Car second generation SoCs.
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index 0a5cfdb76e45..2852f9db61a4 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -16,3 +16,4 @@ obj-$(CONFIG_SH_DMAE) += shdma.o
16obj-$(CONFIG_SUDMAC) += sudmac.o 16obj-$(CONFIG_SUDMAC) += sudmac.o
17obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o 17obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
18obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o 18obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o
19obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
new file mode 100644
index 000000000000..a18d16cc4795
--- /dev/null
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -0,0 +1,1770 @@
1/*
2 * Renesas R-Car Gen2 DMA Controller Driver
3 *
4 * Copyright (C) 2014 Renesas Electronics Inc.
5 *
6 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
7 *
8 * This is free software; you can redistribute it and/or modify
9 * it under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/dma-mapping.h>
14#include <linux/dmaengine.h>
15#include <linux/interrupt.h>
16#include <linux/list.h>
17#include <linux/module.h>
18#include <linux/mutex.h>
19#include <linux/of.h>
20#include <linux/of_dma.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/pm_runtime.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26
27#include "../dmaengine.h"
28
29/*
30 * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer
31 * @node: entry in the parent's chunks list
32 * @src_addr: device source address
33 * @dst_addr: device destination address
34 * @size: transfer size in bytes
35 */
36struct rcar_dmac_xfer_chunk {
37 struct list_head node;
38
39 dma_addr_t src_addr;
40 dma_addr_t dst_addr;
41 u32 size;
42};
43
44/*
45 * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk
46 * @sar: value of the SAR register (source address)
47 * @dar: value of the DAR register (destination address)
48 * @tcr: value of the TCR register (transfer count)
49 */
50struct rcar_dmac_hw_desc {
51 u32 sar;
52 u32 dar;
53 u32 tcr;
54 u32 reserved;
55} __attribute__((__packed__));
56
57/*
58 * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor
59 * @async_tx: base DMA asynchronous transaction descriptor
60 * @direction: direction of the DMA transfer
61 * @xfer_shift: log2 of the transfer size
62 * @chcr: value of the channel configuration register for this transfer
63 * @node: entry in the channel's descriptors lists
64 * @chunks: list of transfer chunks for this transfer
65 * @running: the transfer chunk being currently processed
66 * @nchunks: number of transfer chunks for this transfer
67 * @hwdescs.use: whether the transfer descriptor uses hardware descriptors
68 * @hwdescs.mem: hardware descriptors memory for the transfer
69 * @hwdescs.dma: device address of the hardware descriptors memory
70 * @hwdescs.size: size of the hardware descriptors in bytes
71 * @size: transfer size in bytes
72 * @cyclic: when set indicates that the DMA transfer is cyclic
73 */
74struct rcar_dmac_desc {
75 struct dma_async_tx_descriptor async_tx;
76 enum dma_transfer_direction direction;
77 unsigned int xfer_shift;
78 u32 chcr;
79
80 struct list_head node;
81 struct list_head chunks;
82 struct rcar_dmac_xfer_chunk *running;
83 unsigned int nchunks;
84
85 struct {
86 bool use;
87 struct rcar_dmac_hw_desc *mem;
88 dma_addr_t dma;
89 size_t size;
90 } hwdescs;
91
92 unsigned int size;
93 bool cyclic;
94};
95
96#define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx)
97
98/*
99 * struct rcar_dmac_desc_page - One page worth of descriptors
100 * @node: entry in the channel's pages list
101 * @descs: array of DMA descriptors
102 * @chunks: array of transfer chunk descriptors
103 */
104struct rcar_dmac_desc_page {
105 struct list_head node;
106
107 union {
108 struct rcar_dmac_desc descs[0];
109 struct rcar_dmac_xfer_chunk chunks[0];
110 };
111};
112
113#define RCAR_DMAC_DESCS_PER_PAGE \
114 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
115 sizeof(struct rcar_dmac_desc))
116#define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \
117 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
118 sizeof(struct rcar_dmac_xfer_chunk))
119
120/*
121 * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
122 * @chan: base DMA channel object
123 * @iomem: channel I/O memory base
124 * @index: index of this channel in the controller
125 * @src_xfer_size: size (in bytes) of hardware transfers on the source side
126 * @dst_xfer_size: size (in bytes) of hardware transfers on the destination side
127 * @src_slave_addr: slave source memory address
128 * @dst_slave_addr: slave destination memory address
129 * @mid_rid: hardware MID/RID for the DMA client using this channel
130 * @lock: protects the channel CHCR register and the desc members
131 * @desc.free: list of free descriptors
132 * @desc.pending: list of pending descriptors (submitted with tx_submit)
133 * @desc.active: list of active descriptors (activated with issue_pending)
134 * @desc.done: list of completed descriptors
135 * @desc.wait: list of descriptors waiting for an ack
136 * @desc.running: the descriptor being processed (a member of the active list)
137 * @desc.chunks_free: list of free transfer chunk descriptors
138 * @desc.pages: list of pages used by allocated descriptors
139 */
140struct rcar_dmac_chan {
141 struct dma_chan chan;
142 void __iomem *iomem;
143 unsigned int index;
144
145 unsigned int src_xfer_size;
146 unsigned int dst_xfer_size;
147 dma_addr_t src_slave_addr;
148 dma_addr_t dst_slave_addr;
149 int mid_rid;
150
151 spinlock_t lock;
152
153 struct {
154 struct list_head free;
155 struct list_head pending;
156 struct list_head active;
157 struct list_head done;
158 struct list_head wait;
159 struct rcar_dmac_desc *running;
160
161 struct list_head chunks_free;
162
163 struct list_head pages;
164 } desc;
165};
166
167#define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan)
168
169/*
170 * struct rcar_dmac - R-Car Gen2 DMA Controller
171 * @engine: base DMA engine object
172 * @dev: the hardware device
173 * @iomem: remapped I/O memory base
174 * @n_channels: number of available channels
175 * @channels: array of DMAC channels
176 * @modules: bitmask of client modules in use
177 */
178struct rcar_dmac {
179 struct dma_device engine;
180 struct device *dev;
181 void __iomem *iomem;
182
183 unsigned int n_channels;
184 struct rcar_dmac_chan *channels;
185
186 unsigned long modules[256 / BITS_PER_LONG];
187};
188
189#define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
190
191/* -----------------------------------------------------------------------------
192 * Registers
193 */
194
195#define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i))
196
197#define RCAR_DMAISTA 0x0020
198#define RCAR_DMASEC 0x0030
199#define RCAR_DMAOR 0x0060
200#define RCAR_DMAOR_PRI_FIXED (0 << 8)
201#define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
202#define RCAR_DMAOR_AE (1 << 2)
203#define RCAR_DMAOR_DME (1 << 0)
204#define RCAR_DMACHCLR 0x0080
205#define RCAR_DMADPSEC 0x00a0
206
207#define RCAR_DMASAR 0x0000
208#define RCAR_DMADAR 0x0004
209#define RCAR_DMATCR 0x0008
210#define RCAR_DMATCR_MASK 0x00ffffff
211#define RCAR_DMATSR 0x0028
212#define RCAR_DMACHCR 0x000c
213#define RCAR_DMACHCR_CAE (1 << 31)
214#define RCAR_DMACHCR_CAIE (1 << 30)
215#define RCAR_DMACHCR_DPM_DISABLED (0 << 28)
216#define RCAR_DMACHCR_DPM_ENABLED (1 << 28)
217#define RCAR_DMACHCR_DPM_REPEAT (2 << 28)
218#define RCAR_DMACHCR_DPM_INFINITE (3 << 28)
219#define RCAR_DMACHCR_RPT_SAR (1 << 27)
220#define RCAR_DMACHCR_RPT_DAR (1 << 26)
221#define RCAR_DMACHCR_RPT_TCR (1 << 25)
222#define RCAR_DMACHCR_DPB (1 << 22)
223#define RCAR_DMACHCR_DSE (1 << 19)
224#define RCAR_DMACHCR_DSIE (1 << 18)
225#define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3))
226#define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3))
227#define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3))
228#define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3))
229#define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3))
230#define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3))
231#define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3))
232#define RCAR_DMACHCR_DM_FIXED (0 << 14)
233#define RCAR_DMACHCR_DM_INC (1 << 14)
234#define RCAR_DMACHCR_DM_DEC (2 << 14)
235#define RCAR_DMACHCR_SM_FIXED (0 << 12)
236#define RCAR_DMACHCR_SM_INC (1 << 12)
237#define RCAR_DMACHCR_SM_DEC (2 << 12)
238#define RCAR_DMACHCR_RS_AUTO (4 << 8)
239#define RCAR_DMACHCR_RS_DMARS (8 << 8)
240#define RCAR_DMACHCR_IE (1 << 2)
241#define RCAR_DMACHCR_TE (1 << 1)
242#define RCAR_DMACHCR_DE (1 << 0)
243#define RCAR_DMATCRB 0x0018
244#define RCAR_DMATSRB 0x0038
245#define RCAR_DMACHCRB 0x001c
246#define RCAR_DMACHCRB_DCNT(n) ((n) << 24)
247#define RCAR_DMACHCRB_DPTR_MASK (0xff << 16)
248#define RCAR_DMACHCRB_DPTR_SHIFT 16
249#define RCAR_DMACHCRB_DRST (1 << 15)
250#define RCAR_DMACHCRB_DTS (1 << 8)
251#define RCAR_DMACHCRB_SLM_NORMAL (0 << 4)
252#define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4)
253#define RCAR_DMACHCRB_PRI(n) ((n) << 0)
254#define RCAR_DMARS 0x0040
255#define RCAR_DMABUFCR 0x0048
256#define RCAR_DMABUFCR_MBU(n) ((n) << 16)
257#define RCAR_DMABUFCR_ULB(n) ((n) << 0)
258#define RCAR_DMADPBASE 0x0050
259#define RCAR_DMADPBASE_MASK 0xfffffff0
260#define RCAR_DMADPBASE_SEL (1 << 0)
261#define RCAR_DMADPCR 0x0054
262#define RCAR_DMADPCR_DIPT(n) ((n) << 24)
263#define RCAR_DMAFIXSAR 0x0010
264#define RCAR_DMAFIXDAR 0x0014
265#define RCAR_DMAFIXDPBASE 0x0060
266
267/* Hardcode the MEMCPY transfer size to 4 bytes. */
268#define RCAR_DMAC_MEMCPY_XFER_SIZE 4
269
270/* -----------------------------------------------------------------------------
271 * Device access
272 */
273
274static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
275{
276 if (reg == RCAR_DMAOR)
277 writew(data, dmac->iomem + reg);
278 else
279 writel(data, dmac->iomem + reg);
280}
281
282static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
283{
284 if (reg == RCAR_DMAOR)
285 return readw(dmac->iomem + reg);
286 else
287 return readl(dmac->iomem + reg);
288}
289
290static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
291{
292 if (reg == RCAR_DMARS)
293 return readw(chan->iomem + reg);
294 else
295 return readl(chan->iomem + reg);
296}
297
298static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
299{
300 if (reg == RCAR_DMARS)
301 writew(data, chan->iomem + reg);
302 else
303 writel(data, chan->iomem + reg);
304}
305
306/* -----------------------------------------------------------------------------
307 * Initialization and configuration
308 */
309
310static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
311{
312 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
313
314 return (chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)) == RCAR_DMACHCR_DE;
315}
316
317static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
318{
319 struct rcar_dmac_desc *desc = chan->desc.running;
320 u32 chcr = desc->chcr;
321
322 WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan));
323
324 if (chan->mid_rid >= 0)
325 rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
326
327 if (desc->hwdescs.use) {
328 struct rcar_dmac_xfer_chunk *chunk;
329
330 dev_dbg(chan->chan.device->dev,
331 "chan%u: queue desc %p: %u@%pad\n",
332 chan->index, desc, desc->nchunks, &desc->hwdescs.dma);
333
334#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
335 rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE,
336 desc->hwdescs.dma >> 32);
337#endif
338 rcar_dmac_chan_write(chan, RCAR_DMADPBASE,
339 (desc->hwdescs.dma & 0xfffffff0) |
340 RCAR_DMADPBASE_SEL);
341 rcar_dmac_chan_write(chan, RCAR_DMACHCRB,
342 RCAR_DMACHCRB_DCNT(desc->nchunks - 1) |
343 RCAR_DMACHCRB_DRST);
344
345 /*
346 * Errata: When descriptor memory is accessed through an IOMMU
347 * the DMADAR register isn't initialized automatically from the
348 * first descriptor at beginning of transfer by the DMAC like it
349 * should. Initialize it manually with the destination address
350 * of the first chunk.
351 */
352 chunk = list_first_entry(&desc->chunks,
353 struct rcar_dmac_xfer_chunk, node);
354 rcar_dmac_chan_write(chan, RCAR_DMADAR,
355 chunk->dst_addr & 0xffffffff);
356
357 /*
358 * Program the descriptor stage interrupt to occur after the end
359 * of the first stage.
360 */
361 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1));
362
363 chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR
364 | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB;
365
366 /*
367 * If the descriptor isn't cyclic enable normal descriptor mode
368 * and the transfer completion interrupt.
369 */
370 if (!desc->cyclic)
371 chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE;
372 /*
373 * If the descriptor is cyclic and has a callback enable the
374 * descriptor stage interrupt in infinite repeat mode.
375 */
376 else if (desc->async_tx.callback)
377 chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE;
378 /*
379 * Otherwise just select infinite repeat mode without any
380 * interrupt.
381 */
382 else
383 chcr |= RCAR_DMACHCR_DPM_INFINITE;
384 } else {
385 struct rcar_dmac_xfer_chunk *chunk = desc->running;
386
387 dev_dbg(chan->chan.device->dev,
388 "chan%u: queue chunk %p: %u@%pad -> %pad\n",
389 chan->index, chunk, chunk->size, &chunk->src_addr,
390 &chunk->dst_addr);
391
392#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
393 rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
394 chunk->src_addr >> 32);
395 rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
396 chunk->dst_addr >> 32);
397#endif
398 rcar_dmac_chan_write(chan, RCAR_DMASAR,
399 chunk->src_addr & 0xffffffff);
400 rcar_dmac_chan_write(chan, RCAR_DMADAR,
401 chunk->dst_addr & 0xffffffff);
402 rcar_dmac_chan_write(chan, RCAR_DMATCR,
403 chunk->size >> desc->xfer_shift);
404
405 chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE;
406 }
407
408 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr | RCAR_DMACHCR_DE);
409}
410
411static int rcar_dmac_init(struct rcar_dmac *dmac)
412{
413 u16 dmaor;
414
415 /* Clear all channels and enable the DMAC globally. */
416 rcar_dmac_write(dmac, RCAR_DMACHCLR, 0x7fff);
417 rcar_dmac_write(dmac, RCAR_DMAOR,
418 RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
419
420 dmaor = rcar_dmac_read(dmac, RCAR_DMAOR);
421 if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) {
422 dev_warn(dmac->dev, "DMAOR initialization failed.\n");
423 return -EIO;
424 }
425
426 return 0;
427}
428
429/* -----------------------------------------------------------------------------
430 * Descriptors submission
431 */
432
433static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
434{
435 struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan);
436 struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx);
437 unsigned long flags;
438 dma_cookie_t cookie;
439
440 spin_lock_irqsave(&chan->lock, flags);
441
442 cookie = dma_cookie_assign(tx);
443
444 dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n",
445 chan->index, tx->cookie, desc);
446
447 list_add_tail(&desc->node, &chan->desc.pending);
448 desc->running = list_first_entry(&desc->chunks,
449 struct rcar_dmac_xfer_chunk, node);
450
451 spin_unlock_irqrestore(&chan->lock, flags);
452
453 return cookie;
454}
455
456/* -----------------------------------------------------------------------------
457 * Descriptors allocation and free
458 */
459
460/*
461 * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors
462 * @chan: the DMA channel
463 * @gfp: allocation flags
464 */
465static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
466{
467 struct rcar_dmac_desc_page *page;
468 LIST_HEAD(list);
469 unsigned int i;
470
471 page = (void *)get_zeroed_page(gfp);
472 if (!page)
473 return -ENOMEM;
474
475 for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) {
476 struct rcar_dmac_desc *desc = &page->descs[i];
477
478 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
479 desc->async_tx.tx_submit = rcar_dmac_tx_submit;
480 INIT_LIST_HEAD(&desc->chunks);
481
482 list_add_tail(&desc->node, &list);
483 }
484
485 spin_lock_irq(&chan->lock);
486 list_splice_tail(&list, &chan->desc.free);
487 list_add_tail(&page->node, &chan->desc.pages);
488 spin_unlock_irq(&chan->lock);
489
490 return 0;
491}
492
493/*
494 * rcar_dmac_desc_put - Release a DMA transfer descriptor
495 * @chan: the DMA channel
496 * @desc: the descriptor
497 *
498 * Put the descriptor and its transfer chunk descriptors back in the channel's
499 * free descriptors lists. The descriptor's chunks list will be reinitialized to
500 * an empty list as a result.
501 *
502 * The descriptor must have been removed from the channel's lists before calling
503 * this function.
504 */
505static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
506 struct rcar_dmac_desc *desc)
507{
508 unsigned long flags;
509
510 spin_lock_irqsave(&chan->lock, flags);
511 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
512 list_add_tail(&desc->node, &chan->desc.free);
513 spin_unlock_irqrestore(&chan->lock, flags);
514}
515
516static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
517{
518 struct rcar_dmac_desc *desc, *_desc;
519 LIST_HEAD(list);
520
521 /*
522 * We have to temporarily move all descriptors from the wait list to a
523 * local list as iterating over the wait list, even with
524 * list_for_each_entry_safe, isn't safe if we release the channel lock
525 * around the rcar_dmac_desc_put() call.
526 */
527 spin_lock_irq(&chan->lock);
528 list_splice_init(&chan->desc.wait, &list);
529 spin_unlock_irq(&chan->lock);
530
531 list_for_each_entry_safe(desc, _desc, &list, node) {
532 if (async_tx_test_ack(&desc->async_tx)) {
533 list_del(&desc->node);
534 rcar_dmac_desc_put(chan, desc);
535 }
536 }
537
538 if (list_empty(&list))
539 return;
540
541 /* Put the remaining descriptors back in the wait list. */
542 spin_lock_irq(&chan->lock);
543 list_splice(&list, &chan->desc.wait);
544 spin_unlock_irq(&chan->lock);
545}
546
547/*
548 * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer
549 * @chan: the DMA channel
550 *
551 * Locking: This function must be called in a non-atomic context.
552 *
553 * Return: A pointer to the allocated descriptor or NULL if no descriptor can
554 * be allocated.
555 */
556static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
557{
558 struct rcar_dmac_desc *desc;
559 int ret;
560
561 /* Recycle acked descriptors before attempting allocation. */
562 rcar_dmac_desc_recycle_acked(chan);
563
564 spin_lock_irq(&chan->lock);
565
566 while (list_empty(&chan->desc.free)) {
567 /*
568 * No free descriptors, allocate a page worth of them and try
569 * again, as someone else could race us to get the newly
570 * allocated descriptors. If the allocation fails return an
571 * error.
572 */
573 spin_unlock_irq(&chan->lock);
574 ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
575 if (ret < 0)
576 return NULL;
577 spin_lock_irq(&chan->lock);
578 }
579
580 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node);
581 list_del(&desc->node);
582
583 spin_unlock_irq(&chan->lock);
584
585 return desc;
586}
587
588/*
589 * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks
590 * @chan: the DMA channel
591 * @gfp: allocation flags
592 */
593static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
594{
595 struct rcar_dmac_desc_page *page;
596 LIST_HEAD(list);
597 unsigned int i;
598
599 page = (void *)get_zeroed_page(gfp);
600 if (!page)
601 return -ENOMEM;
602
603 for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) {
604 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i];
605
606 list_add_tail(&chunk->node, &list);
607 }
608
609 spin_lock_irq(&chan->lock);
610 list_splice_tail(&list, &chan->desc.chunks_free);
611 list_add_tail(&page->node, &chan->desc.pages);
612 spin_unlock_irq(&chan->lock);
613
614 return 0;
615}
616
617/*
618 * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer
619 * @chan: the DMA channel
620 *
621 * Locking: This function must be called in a non-atomic context.
622 *
623 * Return: A pointer to the allocated transfer chunk descriptor or NULL if no
624 * descriptor can be allocated.
625 */
626static struct rcar_dmac_xfer_chunk *
627rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
628{
629 struct rcar_dmac_xfer_chunk *chunk;
630 int ret;
631
632 spin_lock_irq(&chan->lock);
633
634 while (list_empty(&chan->desc.chunks_free)) {
635 /*
636 * No free descriptors, allocate a page worth of them and try
637 * again, as someone else could race us to get the newly
638 * allocated descriptors. If the allocation fails return an
639 * error.
640 */
641 spin_unlock_irq(&chan->lock);
642 ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
643 if (ret < 0)
644 return NULL;
645 spin_lock_irq(&chan->lock);
646 }
647
648 chunk = list_first_entry(&chan->desc.chunks_free,
649 struct rcar_dmac_xfer_chunk, node);
650 list_del(&chunk->node);
651
652 spin_unlock_irq(&chan->lock);
653
654 return chunk;
655}
656
657static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan,
658 struct rcar_dmac_desc *desc, size_t size)
659{
660 /*
661 * dma_alloc_coherent() allocates memory in page size increments. To
662 * avoid reallocating the hardware descriptors when the allocated size
663 * wouldn't change align the requested size to a multiple of the page
664 * size.
665 */
666 size = PAGE_ALIGN(size);
667
668 if (desc->hwdescs.size == size)
669 return;
670
671 if (desc->hwdescs.mem) {
672 dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size,
673 desc->hwdescs.mem, desc->hwdescs.dma);
674 desc->hwdescs.mem = NULL;
675 desc->hwdescs.size = 0;
676 }
677
678 if (!size)
679 return;
680
681 desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size,
682 &desc->hwdescs.dma, GFP_NOWAIT);
683 if (!desc->hwdescs.mem)
684 return;
685
686 desc->hwdescs.size = size;
687}
688
689static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
690 struct rcar_dmac_desc *desc)
691{
692 struct rcar_dmac_xfer_chunk *chunk;
693 struct rcar_dmac_hw_desc *hwdesc;
694
695 rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc));
696
697 hwdesc = desc->hwdescs.mem;
698 if (!hwdesc)
699 return -ENOMEM;
700
701 list_for_each_entry(chunk, &desc->chunks, node) {
702 hwdesc->sar = chunk->src_addr;
703 hwdesc->dar = chunk->dst_addr;
704 hwdesc->tcr = chunk->size >> desc->xfer_shift;
705 hwdesc++;
706 }
707
708 return 0;
709}
710
711/* -----------------------------------------------------------------------------
712 * Stop and reset
713 */
714
715static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
716{
717 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
718
719 chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
720 RCAR_DMACHCR_TE | RCAR_DMACHCR_DE);
721 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
722}
723
724static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
725{
726 struct rcar_dmac_desc *desc, *_desc;
727 unsigned long flags;
728 LIST_HEAD(descs);
729
730 spin_lock_irqsave(&chan->lock, flags);
731
732 /* Move all non-free descriptors to the local lists. */
733 list_splice_init(&chan->desc.pending, &descs);
734 list_splice_init(&chan->desc.active, &descs);
735 list_splice_init(&chan->desc.done, &descs);
736 list_splice_init(&chan->desc.wait, &descs);
737
738 chan->desc.running = NULL;
739
740 spin_unlock_irqrestore(&chan->lock, flags);
741
742 list_for_each_entry_safe(desc, _desc, &descs, node) {
743 list_del(&desc->node);
744 rcar_dmac_desc_put(chan, desc);
745 }
746}
747
748static void rcar_dmac_stop(struct rcar_dmac *dmac)
749{
750 rcar_dmac_write(dmac, RCAR_DMAOR, 0);
751}
752
753static void rcar_dmac_abort(struct rcar_dmac *dmac)
754{
755 unsigned int i;
756
757 /* Stop all channels. */
758 for (i = 0; i < dmac->n_channels; ++i) {
759 struct rcar_dmac_chan *chan = &dmac->channels[i];
760
761 /* Stop and reinitialize the channel. */
762 spin_lock(&chan->lock);
763 rcar_dmac_chan_halt(chan);
764 spin_unlock(&chan->lock);
765
766 rcar_dmac_chan_reinit(chan);
767 }
768}
769
770/* -----------------------------------------------------------------------------
771 * Descriptors preparation
772 */
773
774static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
775 struct rcar_dmac_desc *desc)
776{
777 static const u32 chcr_ts[] = {
778 RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B,
779 RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B,
780 RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B,
781 RCAR_DMACHCR_TS_64B,
782 };
783
784 unsigned int xfer_size;
785 u32 chcr;
786
787 switch (desc->direction) {
788 case DMA_DEV_TO_MEM:
789 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
790 | RCAR_DMACHCR_RS_DMARS;
791 xfer_size = chan->src_xfer_size;
792 break;
793
794 case DMA_MEM_TO_DEV:
795 chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
796 | RCAR_DMACHCR_RS_DMARS;
797 xfer_size = chan->dst_xfer_size;
798 break;
799
800 case DMA_MEM_TO_MEM:
801 default:
802 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC
803 | RCAR_DMACHCR_RS_AUTO;
804 xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE;
805 break;
806 }
807
808 desc->xfer_shift = ilog2(xfer_size);
809 desc->chcr = chcr | chcr_ts[desc->xfer_shift];
810}
811
812/*
813 * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list
814 *
815 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
816 * converted to scatter-gather to guarantee consistent locking and a correct
817 * list manipulation. For slave DMA direction carries the usual meaning, and,
818 * logically, the SG list is RAM and the addr variable contains slave address,
819 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
820 * and the SG list contains only one element and points at the source buffer.
821 */
822static struct dma_async_tx_descriptor *
823rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
824 unsigned int sg_len, dma_addr_t dev_addr,
825 enum dma_transfer_direction dir, unsigned long dma_flags,
826 bool cyclic)
827{
828 struct rcar_dmac_xfer_chunk *chunk;
829 struct rcar_dmac_desc *desc;
830 struct scatterlist *sg;
831 unsigned int nchunks = 0;
832 unsigned int max_chunk_size;
833 unsigned int full_size = 0;
834 bool highmem = false;
835 unsigned int i;
836
837 desc = rcar_dmac_desc_get(chan);
838 if (!desc)
839 return NULL;
840
841 desc->async_tx.flags = dma_flags;
842 desc->async_tx.cookie = -EBUSY;
843
844 desc->cyclic = cyclic;
845 desc->direction = dir;
846
847 rcar_dmac_chan_configure_desc(chan, desc);
848
849 max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift;
850
851 /*
852 * Allocate and fill the transfer chunk descriptors. We own the only
853 * reference to the DMA descriptor, there's no need for locking.
854 */
855 for_each_sg(sgl, sg, sg_len, i) {
856 dma_addr_t mem_addr = sg_dma_address(sg);
857 unsigned int len = sg_dma_len(sg);
858
859 full_size += len;
860
861 while (len) {
862 unsigned int size = min(len, max_chunk_size);
863
864#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
865 /*
866 * Prevent individual transfers from crossing 4GB
867 * boundaries.
868 */
869 if (dev_addr >> 32 != (dev_addr + size - 1) >> 32)
870 size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
871 if (mem_addr >> 32 != (mem_addr + size - 1) >> 32)
872 size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
873
874 /*
875 * Check if either of the source or destination address
876 * can't be expressed in 32 bits. If so we can't use
877 * hardware descriptor lists.
878 */
879 if (dev_addr >> 32 || mem_addr >> 32)
880 highmem = true;
881#endif
882
883 chunk = rcar_dmac_xfer_chunk_get(chan);
884 if (!chunk) {
885 rcar_dmac_desc_put(chan, desc);
886 return NULL;
887 }
888
889 if (dir == DMA_DEV_TO_MEM) {
890 chunk->src_addr = dev_addr;
891 chunk->dst_addr = mem_addr;
892 } else {
893 chunk->src_addr = mem_addr;
894 chunk->dst_addr = dev_addr;
895 }
896
897 chunk->size = size;
898
899 dev_dbg(chan->chan.device->dev,
900 "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n",
901 chan->index, chunk, desc, i, sg, size, len,
902 &chunk->src_addr, &chunk->dst_addr);
903
904 mem_addr += size;
905 if (dir == DMA_MEM_TO_MEM)
906 dev_addr += size;
907
908 len -= size;
909
910 list_add_tail(&chunk->node, &desc->chunks);
911 nchunks++;
912 }
913 }
914
915 desc->nchunks = nchunks;
916 desc->size = full_size;
917
918 /*
919 * Use hardware descriptor lists if possible when more than one chunk
920 * needs to be transferred (otherwise they don't make much sense).
921 *
922 * The highmem check currently covers the whole transfer. As an
923 * optimization we could use descriptor lists for consecutive lowmem
924 * chunks and direct manual mode for highmem chunks. Whether the
925 * performance improvement would be significant enough compared to the
926 * additional complexity remains to be investigated.
927 */
928 desc->hwdescs.use = !highmem && nchunks > 1;
929 if (desc->hwdescs.use) {
930 if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
931 desc->hwdescs.use = false;
932 }
933
934 return &desc->async_tx;
935}
936
937/* -----------------------------------------------------------------------------
938 * DMA engine operations
939 */
940
941static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan)
942{
943 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
944 int ret;
945
946 INIT_LIST_HEAD(&rchan->desc.chunks_free);
947 INIT_LIST_HEAD(&rchan->desc.pages);
948
949 /* Preallocate descriptors. */
950 ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL);
951 if (ret < 0)
952 return -ENOMEM;
953
954 ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL);
955 if (ret < 0)
956 return -ENOMEM;
957
958 return pm_runtime_get_sync(chan->device->dev);
959}
960
961static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
962{
963 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
964 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
965 struct rcar_dmac_desc_page *page, *_page;
966 struct rcar_dmac_desc *desc;
967 LIST_HEAD(list);
968
969 /* Protect against ISR */
970 spin_lock_irq(&rchan->lock);
971 rcar_dmac_chan_halt(rchan);
972 spin_unlock_irq(&rchan->lock);
973
974 /* Now no new interrupts will occur */
975
976 if (rchan->mid_rid >= 0) {
977 /* The caller is holding dma_list_mutex */
978 clear_bit(rchan->mid_rid, dmac->modules);
979 rchan->mid_rid = -EINVAL;
980 }
981
982 list_splice_init(&rchan->desc.free, &list);
983 list_splice_init(&rchan->desc.pending, &list);
984 list_splice_init(&rchan->desc.active, &list);
985 list_splice_init(&rchan->desc.done, &list);
986 list_splice_init(&rchan->desc.wait, &list);
987
988 list_for_each_entry(desc, &list, node)
989 rcar_dmac_realloc_hwdesc(rchan, desc, 0);
990
991 list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) {
992 list_del(&page->node);
993 free_page((unsigned long)page);
994 }
995
996 pm_runtime_put(chan->device->dev);
997}
998
999static struct dma_async_tx_descriptor *
1000rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
1001 dma_addr_t dma_src, size_t len, unsigned long flags)
1002{
1003 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1004 struct scatterlist sgl;
1005
1006 if (!len)
1007 return NULL;
1008
1009 sg_init_table(&sgl, 1);
1010 sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len,
1011 offset_in_page(dma_src));
1012 sg_dma_address(&sgl) = dma_src;
1013 sg_dma_len(&sgl) = len;
1014
1015 return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest,
1016 DMA_MEM_TO_MEM, flags, false);
1017}
1018
1019static struct dma_async_tx_descriptor *
1020rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1021 unsigned int sg_len, enum dma_transfer_direction dir,
1022 unsigned long flags, void *context)
1023{
1024 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1025 dma_addr_t dev_addr;
1026
1027 /* Someone calling slave DMA on a generic channel? */
1028 if (rchan->mid_rid < 0 || !sg_len) {
1029 dev_warn(chan->device->dev,
1030 "%s: bad parameter: len=%d, id=%d\n",
1031 __func__, sg_len, rchan->mid_rid);
1032 return NULL;
1033 }
1034
1035 dev_addr = dir == DMA_DEV_TO_MEM
1036 ? rchan->src_slave_addr : rchan->dst_slave_addr;
1037 return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
1038 dir, flags, false);
1039}
1040
1041#define RCAR_DMAC_MAX_SG_LEN 32
1042
1043static struct dma_async_tx_descriptor *
1044rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
1045 size_t buf_len, size_t period_len,
1046 enum dma_transfer_direction dir, unsigned long flags)
1047{
1048 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1049 struct dma_async_tx_descriptor *desc;
1050 struct scatterlist *sgl;
1051 dma_addr_t dev_addr;
1052 unsigned int sg_len;
1053 unsigned int i;
1054
1055 /* Someone calling slave DMA on a generic channel? */
1056 if (rchan->mid_rid < 0 || buf_len < period_len) {
1057 dev_warn(chan->device->dev,
1058 "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
1059 __func__, buf_len, period_len, rchan->mid_rid);
1060 return NULL;
1061 }
1062
1063 sg_len = buf_len / period_len;
1064 if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
1065 dev_err(chan->device->dev,
1066 "chan%u: sg length %d exceds limit %d",
1067 rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
1068 return NULL;
1069 }
1070
1071 /*
1072 * Allocate the sg list dynamically as it would consume too much stack
1073 * space.
1074 */
1075 sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT);
1076 if (!sgl)
1077 return NULL;
1078
1079 sg_init_table(sgl, sg_len);
1080
1081 for (i = 0; i < sg_len; ++i) {
1082 dma_addr_t src = buf_addr + (period_len * i);
1083
1084 sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
1085 offset_in_page(src));
1086 sg_dma_address(&sgl[i]) = src;
1087 sg_dma_len(&sgl[i]) = period_len;
1088 }
1089
1090 dev_addr = dir == DMA_DEV_TO_MEM
1091 ? rchan->src_slave_addr : rchan->dst_slave_addr;
1092 desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
1093 dir, flags, true);
1094
1095 kfree(sgl);
1096 return desc;
1097}
1098
1099static int rcar_dmac_device_config(struct dma_chan *chan,
1100 struct dma_slave_config *cfg)
1101{
1102 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1103
1104 /*
1105 * We could lock this, but you shouldn't be configuring the
1106 * channel, while using it...
1107 */
1108 rchan->src_slave_addr = cfg->src_addr;
1109 rchan->dst_slave_addr = cfg->dst_addr;
1110 rchan->src_xfer_size = cfg->src_addr_width;
1111 rchan->dst_xfer_size = cfg->dst_addr_width;
1112
1113 return 0;
1114}
1115
1116static int rcar_dmac_chan_terminate_all(struct dma_chan *chan)
1117{
1118 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1119 unsigned long flags;
1120
1121 spin_lock_irqsave(&rchan->lock, flags);
1122 rcar_dmac_chan_halt(rchan);
1123 spin_unlock_irqrestore(&rchan->lock, flags);
1124
1125 /*
1126 * FIXME: No new interrupt can occur now, but the IRQ thread might still
1127 * be running.
1128 */
1129
1130 rcar_dmac_chan_reinit(rchan);
1131
1132 return 0;
1133}
1134
1135static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
1136 dma_cookie_t cookie)
1137{
1138 struct rcar_dmac_desc *desc = chan->desc.running;
1139 struct rcar_dmac_xfer_chunk *running = NULL;
1140 struct rcar_dmac_xfer_chunk *chunk;
1141 unsigned int residue = 0;
1142 unsigned int dptr = 0;
1143
1144 if (!desc)
1145 return 0;
1146
1147 /*
1148 * If the cookie doesn't correspond to the currently running transfer
1149 * then the descriptor hasn't been processed yet, and the residue is
1150 * equal to the full descriptor size.
1151 */
1152 if (cookie != desc->async_tx.cookie)
1153 return desc->size;
1154
1155 /*
1156 * In descriptor mode the descriptor running pointer is not maintained
1157 * by the interrupt handler, find the running descriptor from the
1158 * descriptor pointer field in the CHCRB register. In non-descriptor
1159 * mode just use the running descriptor pointer.
1160 */
1161 if (desc->hwdescs.use) {
1162 dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1163 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1164 WARN_ON(dptr >= desc->nchunks);
1165 } else {
1166 running = desc->running;
1167 }
1168
1169 /* Compute the size of all chunks still to be transferred. */
1170 list_for_each_entry_reverse(chunk, &desc->chunks, node) {
1171 if (chunk == running || ++dptr == desc->nchunks)
1172 break;
1173
1174 residue += chunk->size;
1175 }
1176
1177 /* Add the residue for the current chunk. */
1178 residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift;
1179
1180 return residue;
1181}
1182
1183static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
1184 dma_cookie_t cookie,
1185 struct dma_tx_state *txstate)
1186{
1187 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1188 enum dma_status status;
1189 unsigned long flags;
1190 unsigned int residue;
1191
1192 status = dma_cookie_status(chan, cookie, txstate);
1193 if (status == DMA_COMPLETE || !txstate)
1194 return status;
1195
1196 spin_lock_irqsave(&rchan->lock, flags);
1197 residue = rcar_dmac_chan_get_residue(rchan, cookie);
1198 spin_unlock_irqrestore(&rchan->lock, flags);
1199
1200 dma_set_residue(txstate, residue);
1201
1202 return status;
1203}
1204
1205static void rcar_dmac_issue_pending(struct dma_chan *chan)
1206{
1207 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1208 unsigned long flags;
1209
1210 spin_lock_irqsave(&rchan->lock, flags);
1211
1212 if (list_empty(&rchan->desc.pending))
1213 goto done;
1214
1215 /* Append the pending list to the active list. */
1216 list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active);
1217
1218 /*
1219 * If no transfer is running pick the first descriptor from the active
1220 * list and start the transfer.
1221 */
1222 if (!rchan->desc.running) {
1223 struct rcar_dmac_desc *desc;
1224
1225 desc = list_first_entry(&rchan->desc.active,
1226 struct rcar_dmac_desc, node);
1227 rchan->desc.running = desc;
1228
1229 rcar_dmac_chan_start_xfer(rchan);
1230 }
1231
1232done:
1233 spin_unlock_irqrestore(&rchan->lock, flags);
1234}
1235
1236/* -----------------------------------------------------------------------------
1237 * IRQ handling
1238 */
1239
1240static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan)
1241{
1242 struct rcar_dmac_desc *desc = chan->desc.running;
1243 unsigned int stage;
1244
1245 if (WARN_ON(!desc || !desc->cyclic)) {
1246 /*
1247 * This should never happen, there should always be a running
1248 * cyclic descriptor when a descriptor stage end interrupt is
1249 * triggered. Warn and return.
1250 */
1251 return IRQ_NONE;
1252 }
1253
1254 /* Program the interrupt pointer to the next stage. */
1255 stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1256 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1257 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage));
1258
1259 return IRQ_WAKE_THREAD;
1260}
1261
1262static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan)
1263{
1264 struct rcar_dmac_desc *desc = chan->desc.running;
1265 irqreturn_t ret = IRQ_WAKE_THREAD;
1266
1267 if (WARN_ON_ONCE(!desc)) {
1268 /*
1269 * This should never happen, there should always be a running
1270 * descriptor when a transfer end interrupt is triggered. Warn
1271 * and return.
1272 */
1273 return IRQ_NONE;
1274 }
1275
1276 /*
1277 * The transfer end interrupt isn't generated for each chunk when using
1278 * descriptor mode. Only update the running chunk pointer in
1279 * non-descriptor mode.
1280 */
1281 if (!desc->hwdescs.use) {
1282 /*
1283 * If we haven't completed the last transfer chunk simply move
1284 * to the next one. Only wake the IRQ thread if the transfer is
1285 * cyclic.
1286 */
1287 if (!list_is_last(&desc->running->node, &desc->chunks)) {
1288 desc->running = list_next_entry(desc->running, node);
1289 if (!desc->cyclic)
1290 ret = IRQ_HANDLED;
1291 goto done;
1292 }
1293
1294 /*
1295 * We've completed the last transfer chunk. If the transfer is
1296 * cyclic, move back to the first one.
1297 */
1298 if (desc->cyclic) {
1299 desc->running =
1300 list_first_entry(&desc->chunks,
1301 struct rcar_dmac_xfer_chunk,
1302 node);
1303 goto done;
1304 }
1305 }
1306
1307 /* The descriptor is complete, move it to the done list. */
1308 list_move_tail(&desc->node, &chan->desc.done);
1309
1310 /* Queue the next descriptor, if any. */
1311 if (!list_empty(&chan->desc.active))
1312 chan->desc.running = list_first_entry(&chan->desc.active,
1313 struct rcar_dmac_desc,
1314 node);
1315 else
1316 chan->desc.running = NULL;
1317
1318done:
1319 if (chan->desc.running)
1320 rcar_dmac_chan_start_xfer(chan);
1321
1322 return ret;
1323}
1324
1325static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
1326{
1327 u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE;
1328 struct rcar_dmac_chan *chan = dev;
1329 irqreturn_t ret = IRQ_NONE;
1330 u32 chcr;
1331
1332 spin_lock(&chan->lock);
1333
1334 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
1335 if (chcr & RCAR_DMACHCR_TE)
1336 mask |= RCAR_DMACHCR_DE;
1337 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
1338
1339 if (chcr & RCAR_DMACHCR_DSE)
1340 ret |= rcar_dmac_isr_desc_stage_end(chan);
1341
1342 if (chcr & RCAR_DMACHCR_TE)
1343 ret |= rcar_dmac_isr_transfer_end(chan);
1344
1345 spin_unlock(&chan->lock);
1346
1347 return ret;
1348}
1349
1350static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
1351{
1352 struct rcar_dmac_chan *chan = dev;
1353 struct rcar_dmac_desc *desc;
1354
1355 spin_lock_irq(&chan->lock);
1356
1357 /* For cyclic transfers notify the user after every chunk. */
1358 if (chan->desc.running && chan->desc.running->cyclic) {
1359 dma_async_tx_callback callback;
1360 void *callback_param;
1361
1362 desc = chan->desc.running;
1363 callback = desc->async_tx.callback;
1364 callback_param = desc->async_tx.callback_param;
1365
1366 if (callback) {
1367 spin_unlock_irq(&chan->lock);
1368 callback(callback_param);
1369 spin_lock_irq(&chan->lock);
1370 }
1371 }
1372
1373 /*
1374 * Call the callback function for all descriptors on the done list and
1375 * move them to the ack wait list.
1376 */
1377 while (!list_empty(&chan->desc.done)) {
1378 desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc,
1379 node);
1380 dma_cookie_complete(&desc->async_tx);
1381 list_del(&desc->node);
1382
1383 if (desc->async_tx.callback) {
1384 spin_unlock_irq(&chan->lock);
1385 /*
1386 * We own the only reference to this descriptor, we can
1387 * safely dereference it without holding the channel
1388 * lock.
1389 */
1390 desc->async_tx.callback(desc->async_tx.callback_param);
1391 spin_lock_irq(&chan->lock);
1392 }
1393
1394 list_add_tail(&desc->node, &chan->desc.wait);
1395 }
1396
1397 spin_unlock_irq(&chan->lock);
1398
1399 /* Recycle all acked descriptors. */
1400 rcar_dmac_desc_recycle_acked(chan);
1401
1402 return IRQ_HANDLED;
1403}
1404
1405static irqreturn_t rcar_dmac_isr_error(int irq, void *data)
1406{
1407 struct rcar_dmac *dmac = data;
1408
1409 if (!(rcar_dmac_read(dmac, RCAR_DMAOR) & RCAR_DMAOR_AE))
1410 return IRQ_NONE;
1411
1412 /*
1413 * An unrecoverable error occurred on an unknown channel. Halt the DMAC,
1414 * abort transfers on all channels, and reinitialize the DMAC.
1415 */
1416 rcar_dmac_stop(dmac);
1417 rcar_dmac_abort(dmac);
1418 rcar_dmac_init(dmac);
1419
1420 return IRQ_HANDLED;
1421}
1422
1423/* -----------------------------------------------------------------------------
1424 * OF xlate and channel filter
1425 */
1426
1427static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
1428{
1429 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
1430 struct of_phandle_args *dma_spec = arg;
1431
1432 /*
1433 * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate
1434 * function knows from which device it wants to allocate a channel from,
1435 * and would be perfectly capable of selecting the channel it wants.
1436 * Forcing it to call dma_request_channel() and iterate through all
1437 * channels from all controllers is just pointless.
1438 */
1439 if (chan->device->device_config != rcar_dmac_device_config ||
1440 dma_spec->np != chan->device->dev->of_node)
1441 return false;
1442
1443 return !test_and_set_bit(dma_spec->args[0], dmac->modules);
1444}
1445
1446static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
1447 struct of_dma *ofdma)
1448{
1449 struct rcar_dmac_chan *rchan;
1450 struct dma_chan *chan;
1451 dma_cap_mask_t mask;
1452
1453 if (dma_spec->args_count != 1)
1454 return NULL;
1455
1456 /* Only slave DMA channels can be allocated via DT */
1457 dma_cap_zero(mask);
1458 dma_cap_set(DMA_SLAVE, mask);
1459
1460 chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec);
1461 if (!chan)
1462 return NULL;
1463
1464 rchan = to_rcar_dmac_chan(chan);
1465 rchan->mid_rid = dma_spec->args[0];
1466
1467 return chan;
1468}
1469
1470/* -----------------------------------------------------------------------------
1471 * Power management
1472 */
1473
1474#ifdef CONFIG_PM_SLEEP
1475static int rcar_dmac_sleep_suspend(struct device *dev)
1476{
1477 /*
1478 * TODO: Wait for the current transfer to complete and stop the device.
1479 */
1480 return 0;
1481}
1482
1483static int rcar_dmac_sleep_resume(struct device *dev)
1484{
1485 /* TODO: Resume transfers, if any. */
1486 return 0;
1487}
1488#endif
1489
1490#ifdef CONFIG_PM
1491static int rcar_dmac_runtime_suspend(struct device *dev)
1492{
1493 return 0;
1494}
1495
1496static int rcar_dmac_runtime_resume(struct device *dev)
1497{
1498 struct rcar_dmac *dmac = dev_get_drvdata(dev);
1499
1500 return rcar_dmac_init(dmac);
1501}
1502#endif
1503
1504static const struct dev_pm_ops rcar_dmac_pm = {
1505 SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend, rcar_dmac_sleep_resume)
1506 SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
1507 NULL)
1508};
1509
1510/* -----------------------------------------------------------------------------
1511 * Probe and remove
1512 */
1513
1514static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
1515 struct rcar_dmac_chan *rchan,
1516 unsigned int index)
1517{
1518 struct platform_device *pdev = to_platform_device(dmac->dev);
1519 struct dma_chan *chan = &rchan->chan;
1520 char pdev_irqname[5];
1521 char *irqname;
1522 int irq;
1523 int ret;
1524
1525 rchan->index = index;
1526 rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index);
1527 rchan->mid_rid = -EINVAL;
1528
1529 spin_lock_init(&rchan->lock);
1530
1531 INIT_LIST_HEAD(&rchan->desc.free);
1532 INIT_LIST_HEAD(&rchan->desc.pending);
1533 INIT_LIST_HEAD(&rchan->desc.active);
1534 INIT_LIST_HEAD(&rchan->desc.done);
1535 INIT_LIST_HEAD(&rchan->desc.wait);
1536
1537 /* Request the channel interrupt. */
1538 sprintf(pdev_irqname, "ch%u", index);
1539 irq = platform_get_irq_byname(pdev, pdev_irqname);
1540 if (irq < 0) {
1541 dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
1542 return -ENODEV;
1543 }
1544
1545 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
1546 dev_name(dmac->dev), index);
1547 if (!irqname)
1548 return -ENOMEM;
1549
1550 ret = devm_request_threaded_irq(dmac->dev, irq, rcar_dmac_isr_channel,
1551 rcar_dmac_isr_channel_thread, 0,
1552 irqname, rchan);
1553 if (ret) {
1554 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", irq, ret);
1555 return ret;
1556 }
1557
1558 /*
1559 * Initialize the DMA engine channel and add it to the DMA engine
1560 * channels list.
1561 */
1562 chan->device = &dmac->engine;
1563 dma_cookie_init(chan);
1564
1565 list_add_tail(&chan->device_node, &dmac->engine.channels);
1566
1567 return 0;
1568}
1569
1570static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
1571{
1572 struct device_node *np = dev->of_node;
1573 int ret;
1574
1575 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
1576 if (ret < 0) {
1577 dev_err(dev, "unable to read dma-channels property\n");
1578 return ret;
1579 }
1580
1581 if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
1582 dev_err(dev, "invalid number of channels %u\n",
1583 dmac->n_channels);
1584 return -EINVAL;
1585 }
1586
1587 return 0;
1588}
1589
1590static int rcar_dmac_probe(struct platform_device *pdev)
1591{
1592 const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
1593 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
1594 DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
1595 DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
1596 unsigned int channels_offset = 0;
1597 struct dma_device *engine;
1598 struct rcar_dmac *dmac;
1599 struct resource *mem;
1600 unsigned int i;
1601 char *irqname;
1602 int irq;
1603 int ret;
1604
1605 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
1606 if (!dmac)
1607 return -ENOMEM;
1608
1609 dmac->dev = &pdev->dev;
1610 platform_set_drvdata(pdev, dmac);
1611
1612 ret = rcar_dmac_parse_of(&pdev->dev, dmac);
1613 if (ret < 0)
1614 return ret;
1615
1616 /*
1617 * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be
1618 * flushed correctly, resulting in memory corruption. DMAC 0 channel 0
1619 * is connected to microTLB 0 on currently supported platforms, so we
1620 * can't use it with the IPMMU. As the IOMMU API operates at the device
1621 * level we can't disable it selectively, so ignore channel 0 for now if
1622 * the device is part of an IOMMU group.
1623 */
1624 if (pdev->dev.iommu_group) {
1625 dmac->n_channels--;
1626 channels_offset = 1;
1627 }
1628
1629 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
1630 sizeof(*dmac->channels), GFP_KERNEL);
1631 if (!dmac->channels)
1632 return -ENOMEM;
1633
1634 /* Request resources. */
1635 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1636 dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
1637 if (IS_ERR(dmac->iomem))
1638 return PTR_ERR(dmac->iomem);
1639
1640 irq = platform_get_irq_byname(pdev, "error");
1641 if (irq < 0) {
1642 dev_err(&pdev->dev, "no error IRQ specified\n");
1643 return -ENODEV;
1644 }
1645
1646 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:error",
1647 dev_name(dmac->dev));
1648 if (!irqname)
1649 return -ENOMEM;
1650
1651 ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0,
1652 irqname, dmac);
1653 if (ret) {
1654 dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
1655 irq, ret);
1656 return ret;
1657 }
1658
1659 /* Enable runtime PM and initialize the device. */
1660 pm_runtime_enable(&pdev->dev);
1661 ret = pm_runtime_get_sync(&pdev->dev);
1662 if (ret < 0) {
1663 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
1664 return ret;
1665 }
1666
1667 ret = rcar_dmac_init(dmac);
1668 pm_runtime_put(&pdev->dev);
1669
1670 if (ret) {
1671 dev_err(&pdev->dev, "failed to reset device\n");
1672 goto error;
1673 }
1674
1675 /* Initialize the channels. */
1676 INIT_LIST_HEAD(&dmac->engine.channels);
1677
1678 for (i = 0; i < dmac->n_channels; ++i) {
1679 ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
1680 i + channels_offset);
1681 if (ret < 0)
1682 goto error;
1683 }
1684
1685 /* Register the DMAC as a DMA provider for DT. */
1686 ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
1687 NULL);
1688 if (ret < 0)
1689 goto error;
1690
1691 /*
1692 * Register the DMA engine device.
1693 *
1694 * Default transfer size of 32 bytes requires 32-byte alignment.
1695 */
1696 engine = &dmac->engine;
1697 dma_cap_set(DMA_MEMCPY, engine->cap_mask);
1698 dma_cap_set(DMA_SLAVE, engine->cap_mask);
1699
1700 engine->dev = &pdev->dev;
1701 engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
1702
1703 engine->src_addr_widths = widths;
1704 engine->dst_addr_widths = widths;
1705 engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1706 engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1707
1708 engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
1709 engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
1710 engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
1711 engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
1712 engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
1713 engine->device_config = rcar_dmac_device_config;
1714 engine->device_terminate_all = rcar_dmac_chan_terminate_all;
1715 engine->device_tx_status = rcar_dmac_tx_status;
1716 engine->device_issue_pending = rcar_dmac_issue_pending;
1717
1718 ret = dma_async_device_register(engine);
1719 if (ret < 0)
1720 goto error;
1721
1722 return 0;
1723
1724error:
1725 of_dma_controller_free(pdev->dev.of_node);
1726 pm_runtime_disable(&pdev->dev);
1727 return ret;
1728}
1729
1730static int rcar_dmac_remove(struct platform_device *pdev)
1731{
1732 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1733
1734 of_dma_controller_free(pdev->dev.of_node);
1735 dma_async_device_unregister(&dmac->engine);
1736
1737 pm_runtime_disable(&pdev->dev);
1738
1739 return 0;
1740}
1741
1742static void rcar_dmac_shutdown(struct platform_device *pdev)
1743{
1744 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1745
1746 rcar_dmac_stop(dmac);
1747}
1748
1749static const struct of_device_id rcar_dmac_of_ids[] = {
1750 { .compatible = "renesas,rcar-dmac", },
1751 { /* Sentinel */ }
1752};
1753MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
1754
1755static struct platform_driver rcar_dmac_driver = {
1756 .driver = {
1757 .pm = &rcar_dmac_pm,
1758 .name = "rcar-dmac",
1759 .of_match_table = rcar_dmac_of_ids,
1760 },
1761 .probe = rcar_dmac_probe,
1762 .remove = rcar_dmac_remove,
1763 .shutdown = rcar_dmac_shutdown,
1764};
1765
1766module_platform_driver(rcar_dmac_driver);
1767
1768MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");
1769MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
1770MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
index 20a6f6f2a018..749f26ecd3b3 100644
--- a/drivers/dma/sh/rcar-hpbdma.c
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -534,6 +534,8 @@ static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)
534 534
535static int hpb_dmae_probe(struct platform_device *pdev) 535static int hpb_dmae_probe(struct platform_device *pdev)
536{ 536{
537 const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
538 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES;
537 struct hpb_dmae_pdata *pdata = pdev->dev.platform_data; 539 struct hpb_dmae_pdata *pdata = pdev->dev.platform_data;
538 struct hpb_dmae_device *hpbdev; 540 struct hpb_dmae_device *hpbdev;
539 struct dma_device *dma_dev; 541 struct dma_device *dma_dev;
@@ -595,6 +597,10 @@ static int hpb_dmae_probe(struct platform_device *pdev)
595 597
596 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 598 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
597 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); 599 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
600 dma_dev->src_addr_widths = widths;
601 dma_dev->dst_addr_widths = widths;
602 dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
603 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
598 604
599 hpbdev->shdma_dev.ops = &hpb_dmae_ops; 605 hpbdev->shdma_dev.ops = &hpb_dmae_ops;
600 hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc); 606 hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc);
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 3a2adb131d46..8ee383d339a5 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -729,57 +729,50 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
729 return desc; 729 return desc;
730} 730}
731 731
732static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 732static int shdma_terminate_all(struct dma_chan *chan)
733 unsigned long arg)
734{ 733{
735 struct shdma_chan *schan = to_shdma_chan(chan); 734 struct shdma_chan *schan = to_shdma_chan(chan);
736 struct shdma_dev *sdev = to_shdma_dev(chan->device); 735 struct shdma_dev *sdev = to_shdma_dev(chan->device);
737 const struct shdma_ops *ops = sdev->ops; 736 const struct shdma_ops *ops = sdev->ops;
738 struct dma_slave_config *config;
739 unsigned long flags; 737 unsigned long flags;
740 int ret;
741 738
742 switch (cmd) { 739 spin_lock_irqsave(&schan->chan_lock, flags);
743 case DMA_TERMINATE_ALL: 740 ops->halt_channel(schan);
744 spin_lock_irqsave(&schan->chan_lock, flags);
745 ops->halt_channel(schan);
746 741
747 if (ops->get_partial && !list_empty(&schan->ld_queue)) { 742 if (ops->get_partial && !list_empty(&schan->ld_queue)) {
748 /* Record partial transfer */ 743 /* Record partial transfer */
749 struct shdma_desc *desc = list_first_entry(&schan->ld_queue, 744 struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
750 struct shdma_desc, node); 745 struct shdma_desc, node);
751 desc->partial = ops->get_partial(schan, desc); 746 desc->partial = ops->get_partial(schan, desc);
752 } 747 }
753 748
754 spin_unlock_irqrestore(&schan->chan_lock, flags); 749 spin_unlock_irqrestore(&schan->chan_lock, flags);
755 750
756 shdma_chan_ld_cleanup(schan, true); 751 shdma_chan_ld_cleanup(schan, true);
757 break;
758 case DMA_SLAVE_CONFIG:
759 /*
760 * So far only .slave_id is used, but the slave drivers are
761 * encouraged to also set a transfer direction and an address.
762 */
763 if (!arg)
764 return -EINVAL;
765 /*
766 * We could lock this, but you shouldn't be configuring the
767 * channel, while using it...
768 */
769 config = (struct dma_slave_config *)arg;
770 ret = shdma_setup_slave(schan, config->slave_id,
771 config->direction == DMA_DEV_TO_MEM ?
772 config->src_addr : config->dst_addr);
773 if (ret < 0)
774 return ret;
775 break;
776 default:
777 return -ENXIO;
778 }
779 752
780 return 0; 753 return 0;
781} 754}
782 755
756static int shdma_config(struct dma_chan *chan,
757 struct dma_slave_config *config)
758{
759 struct shdma_chan *schan = to_shdma_chan(chan);
760
761 /*
762 * So far only .slave_id is used, but the slave drivers are
763 * encouraged to also set a transfer direction and an address.
764 */
765 if (!config)
766 return -EINVAL;
767 /*
768 * We could lock this, but you shouldn't be configuring the
769 * channel, while using it...
770 */
771 return shdma_setup_slave(schan, config->slave_id,
772 config->direction == DMA_DEV_TO_MEM ?
773 config->src_addr : config->dst_addr);
774}
775
783static void shdma_issue_pending(struct dma_chan *chan) 776static void shdma_issue_pending(struct dma_chan *chan)
784{ 777{
785 struct shdma_chan *schan = to_shdma_chan(chan); 778 struct shdma_chan *schan = to_shdma_chan(chan);
@@ -1002,7 +995,8 @@ int shdma_init(struct device *dev, struct shdma_dev *sdev,
1002 /* Compulsory for DMA_SLAVE fields */ 995 /* Compulsory for DMA_SLAVE fields */
1003 dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; 996 dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
1004 dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic; 997 dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
1005 dma_dev->device_control = shdma_control; 998 dma_dev->device_config = shdma_config;
999 dma_dev->device_terminate_all = shdma_terminate_all;
1006 1000
1007 dma_dev->dev = dev; 1001 dma_dev->dev = dev;
1008 1002
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index aec8a84784a4..b2431aa30033 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -588,6 +588,7 @@ static void sh_dmae_shutdown(struct platform_device *pdev)
588 sh_dmae_ctl_stop(shdev); 588 sh_dmae_ctl_stop(shdev);
589} 589}
590 590
591#ifdef CONFIG_PM
591static int sh_dmae_runtime_suspend(struct device *dev) 592static int sh_dmae_runtime_suspend(struct device *dev)
592{ 593{
593 return 0; 594 return 0;
@@ -599,8 +600,9 @@ static int sh_dmae_runtime_resume(struct device *dev)
599 600
600 return sh_dmae_rst(shdev); 601 return sh_dmae_rst(shdev);
601} 602}
603#endif
602 604
603#ifdef CONFIG_PM 605#ifdef CONFIG_PM_SLEEP
604static int sh_dmae_suspend(struct device *dev) 606static int sh_dmae_suspend(struct device *dev)
605{ 607{
606 return 0; 608 return 0;
@@ -632,16 +634,12 @@ static int sh_dmae_resume(struct device *dev)
632 634
633 return 0; 635 return 0;
634} 636}
635#else
636#define sh_dmae_suspend NULL
637#define sh_dmae_resume NULL
638#endif 637#endif
639 638
640static const struct dev_pm_ops sh_dmae_pm = { 639static const struct dev_pm_ops sh_dmae_pm = {
641 .suspend = sh_dmae_suspend, 640 SET_SYSTEM_SLEEP_PM_OPS(sh_dmae_suspend, sh_dmae_resume)
642 .resume = sh_dmae_resume, 641 SET_RUNTIME_PM_OPS(sh_dmae_runtime_suspend, sh_dmae_runtime_resume,
643 .runtime_suspend = sh_dmae_runtime_suspend, 642 NULL)
644 .runtime_resume = sh_dmae_runtime_resume,
645}; 643};
646 644
647static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan) 645static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
@@ -684,6 +682,10 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
684 682
685static int sh_dmae_probe(struct platform_device *pdev) 683static int sh_dmae_probe(struct platform_device *pdev)
686{ 684{
685 const enum dma_slave_buswidth widths =
686 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
687 DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES |
688 DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES;
687 const struct sh_dmae_pdata *pdata; 689 const struct sh_dmae_pdata *pdata;
688 unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {}; 690 unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
689 int chan_irq[SH_DMAE_MAX_CHANNELS]; 691 int chan_irq[SH_DMAE_MAX_CHANNELS];
@@ -746,6 +748,11 @@ static int sh_dmae_probe(struct platform_device *pdev)
746 return PTR_ERR(shdev->dmars); 748 return PTR_ERR(shdev->dmars);
747 } 749 }
748 750
751 dma_dev->src_addr_widths = widths;
752 dma_dev->dst_addr_widths = widths;
753 dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
754 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
755
749 if (!pdata->slave_only) 756 if (!pdata->slave_only)
750 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 757 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
751 if (pdata->slave && pdata->slave_num) 758 if (pdata->slave && pdata->slave_num)
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 3492a5f91d31..d0086e9f2082 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -281,9 +281,10 @@ static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
281 return cookie; 281 return cookie;
282} 282}
283 283
284static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan, 284static int sirfsoc_dma_slave_config(struct dma_chan *chan,
285 struct dma_slave_config *config) 285 struct dma_slave_config *config)
286{ 286{
287 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
287 unsigned long flags; 288 unsigned long flags;
288 289
289 if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || 290 if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
@@ -297,8 +298,9 @@ static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
297 return 0; 298 return 0;
298} 299}
299 300
300static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan) 301static int sirfsoc_dma_terminate_all(struct dma_chan *chan)
301{ 302{
303 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
302 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); 304 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
303 int cid = schan->chan.chan_id; 305 int cid = schan->chan.chan_id;
304 unsigned long flags; 306 unsigned long flags;
@@ -327,8 +329,9 @@ static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
327 return 0; 329 return 0;
328} 330}
329 331
330static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan) 332static int sirfsoc_dma_pause_chan(struct dma_chan *chan)
331{ 333{
334 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
332 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); 335 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
333 int cid = schan->chan.chan_id; 336 int cid = schan->chan.chan_id;
334 unsigned long flags; 337 unsigned long flags;
@@ -348,8 +351,9 @@ static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan)
348 return 0; 351 return 0;
349} 352}
350 353
351static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan) 354static int sirfsoc_dma_resume_chan(struct dma_chan *chan)
352{ 355{
356 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
353 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); 357 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
354 int cid = schan->chan.chan_id; 358 int cid = schan->chan.chan_id;
355 unsigned long flags; 359 unsigned long flags;
@@ -369,30 +373,6 @@ static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan)
369 return 0; 373 return 0;
370} 374}
371 375
372static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
373 unsigned long arg)
374{
375 struct dma_slave_config *config;
376 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
377
378 switch (cmd) {
379 case DMA_PAUSE:
380 return sirfsoc_dma_pause_chan(schan);
381 case DMA_RESUME:
382 return sirfsoc_dma_resume_chan(schan);
383 case DMA_TERMINATE_ALL:
384 return sirfsoc_dma_terminate_all(schan);
385 case DMA_SLAVE_CONFIG:
386 config = (struct dma_slave_config *)arg;
387 return sirfsoc_dma_slave_config(schan, config);
388
389 default:
390 break;
391 }
392
393 return -ENOSYS;
394}
395
396/* Alloc channel resources */ 376/* Alloc channel resources */
397static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan) 377static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
398{ 378{
@@ -648,18 +628,6 @@ EXPORT_SYMBOL(sirfsoc_dma_filter_id);
648 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ 628 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
649 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) 629 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
650 630
651static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan,
652 struct dma_slave_caps *caps)
653{
654 caps->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
655 caps->dstn_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
656 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
657 caps->cmd_pause = true;
658 caps->cmd_terminate = true;
659
660 return 0;
661}
662
663static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec, 631static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
664 struct of_dma *ofdma) 632 struct of_dma *ofdma)
665{ 633{
@@ -739,11 +707,16 @@ static int sirfsoc_dma_probe(struct platform_device *op)
739 dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; 707 dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
740 dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; 708 dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
741 dma->device_issue_pending = sirfsoc_dma_issue_pending; 709 dma->device_issue_pending = sirfsoc_dma_issue_pending;
742 dma->device_control = sirfsoc_dma_control; 710 dma->device_config = sirfsoc_dma_slave_config;
711 dma->device_pause = sirfsoc_dma_pause_chan;
712 dma->device_resume = sirfsoc_dma_resume_chan;
713 dma->device_terminate_all = sirfsoc_dma_terminate_all;
743 dma->device_tx_status = sirfsoc_dma_tx_status; 714 dma->device_tx_status = sirfsoc_dma_tx_status;
744 dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; 715 dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
745 dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; 716 dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
746 dma->device_slave_caps = sirfsoc_dma_device_slave_caps; 717 dma->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
718 dma->dst_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
719 dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
747 720
748 INIT_LIST_HEAD(&dma->channels); 721 INIT_LIST_HEAD(&dma->channels);
749 dma_cap_set(DMA_SLAVE, dma->cap_mask); 722 dma_cap_set(DMA_SLAVE, dma->cap_mask);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 15d49461c0d2..68aca3334a17 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1429,11 +1429,17 @@ static bool d40_tx_is_linked(struct d40_chan *d40c)
1429 return is_link; 1429 return is_link;
1430} 1430}
1431 1431
1432static int d40_pause(struct d40_chan *d40c) 1432static int d40_pause(struct dma_chan *chan)
1433{ 1433{
1434 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1434 int res = 0; 1435 int res = 0;
1435 unsigned long flags; 1436 unsigned long flags;
1436 1437
1438 if (d40c->phy_chan == NULL) {
1439 chan_err(d40c, "Channel is not allocated!\n");
1440 return -EINVAL;
1441 }
1442
1437 if (!d40c->busy) 1443 if (!d40c->busy)
1438 return 0; 1444 return 0;
1439 1445
@@ -1448,11 +1454,17 @@ static int d40_pause(struct d40_chan *d40c)
1448 return res; 1454 return res;
1449} 1455}
1450 1456
1451static int d40_resume(struct d40_chan *d40c) 1457static int d40_resume(struct dma_chan *chan)
1452{ 1458{
1459 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1453 int res = 0; 1460 int res = 0;
1454 unsigned long flags; 1461 unsigned long flags;
1455 1462
1463 if (d40c->phy_chan == NULL) {
1464 chan_err(d40c, "Channel is not allocated!\n");
1465 return -EINVAL;
1466 }
1467
1456 if (!d40c->busy) 1468 if (!d40c->busy)
1457 return 0; 1469 return 0;
1458 1470
@@ -2604,12 +2616,17 @@ static void d40_issue_pending(struct dma_chan *chan)
2604 spin_unlock_irqrestore(&d40c->lock, flags); 2616 spin_unlock_irqrestore(&d40c->lock, flags);
2605} 2617}
2606 2618
2607static void d40_terminate_all(struct dma_chan *chan) 2619static int d40_terminate_all(struct dma_chan *chan)
2608{ 2620{
2609 unsigned long flags; 2621 unsigned long flags;
2610 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2622 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2611 int ret; 2623 int ret;
2612 2624
2625 if (d40c->phy_chan == NULL) {
2626 chan_err(d40c, "Channel is not allocated!\n");
2627 return -EINVAL;
2628 }
2629
2613 spin_lock_irqsave(&d40c->lock, flags); 2630 spin_lock_irqsave(&d40c->lock, flags);
2614 2631
2615 pm_runtime_get_sync(d40c->base->dev); 2632 pm_runtime_get_sync(d40c->base->dev);
@@ -2627,6 +2644,7 @@ static void d40_terminate_all(struct dma_chan *chan)
2627 d40c->busy = false; 2644 d40c->busy = false;
2628 2645
2629 spin_unlock_irqrestore(&d40c->lock, flags); 2646 spin_unlock_irqrestore(&d40c->lock, flags);
2647 return 0;
2630} 2648}
2631 2649
2632static int 2650static int
@@ -2673,6 +2691,11 @@ static int d40_set_runtime_config(struct dma_chan *chan,
2673 u32 src_maxburst, dst_maxburst; 2691 u32 src_maxburst, dst_maxburst;
2674 int ret; 2692 int ret;
2675 2693
2694 if (d40c->phy_chan == NULL) {
2695 chan_err(d40c, "Channel is not allocated!\n");
2696 return -EINVAL;
2697 }
2698
2676 src_addr_width = config->src_addr_width; 2699 src_addr_width = config->src_addr_width;
2677 src_maxburst = config->src_maxburst; 2700 src_maxburst = config->src_maxburst;
2678 dst_addr_width = config->dst_addr_width; 2701 dst_addr_width = config->dst_addr_width;
@@ -2781,35 +2804,6 @@ static int d40_set_runtime_config(struct dma_chan *chan,
2781 return 0; 2804 return 0;
2782} 2805}
2783 2806
2784static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2785 unsigned long arg)
2786{
2787 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2788
2789 if (d40c->phy_chan == NULL) {
2790 chan_err(d40c, "Channel is not allocated!\n");
2791 return -EINVAL;
2792 }
2793
2794 switch (cmd) {
2795 case DMA_TERMINATE_ALL:
2796 d40_terminate_all(chan);
2797 return 0;
2798 case DMA_PAUSE:
2799 return d40_pause(d40c);
2800 case DMA_RESUME:
2801 return d40_resume(d40c);
2802 case DMA_SLAVE_CONFIG:
2803 return d40_set_runtime_config(chan,
2804 (struct dma_slave_config *) arg);
2805 default:
2806 break;
2807 }
2808
2809 /* Other commands are unimplemented */
2810 return -ENXIO;
2811}
2812
2813/* Initialization functions */ 2807/* Initialization functions */
2814 2808
2815static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, 2809static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
@@ -2870,7 +2864,10 @@ static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2870 dev->device_free_chan_resources = d40_free_chan_resources; 2864 dev->device_free_chan_resources = d40_free_chan_resources;
2871 dev->device_issue_pending = d40_issue_pending; 2865 dev->device_issue_pending = d40_issue_pending;
2872 dev->device_tx_status = d40_tx_status; 2866 dev->device_tx_status = d40_tx_status;
2873 dev->device_control = d40_control; 2867 dev->device_config = d40_set_runtime_config;
2868 dev->device_pause = d40_pause;
2869 dev->device_resume = d40_resume;
2870 dev->device_terminate_all = d40_terminate_all;
2874 dev->dev = base->dev; 2871 dev->dev = base->dev;
2875} 2872}
2876 2873
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 159f1736a16f..7ebcf9bec698 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -355,38 +355,6 @@ static void sun6i_dma_free_desc(struct virt_dma_desc *vd)
355 kfree(txd); 355 kfree(txd);
356} 356}
357 357
358static int sun6i_dma_terminate_all(struct sun6i_vchan *vchan)
359{
360 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device);
361 struct sun6i_pchan *pchan = vchan->phy;
362 unsigned long flags;
363 LIST_HEAD(head);
364
365 spin_lock(&sdev->lock);
366 list_del_init(&vchan->node);
367 spin_unlock(&sdev->lock);
368
369 spin_lock_irqsave(&vchan->vc.lock, flags);
370
371 vchan_get_all_descriptors(&vchan->vc, &head);
372
373 if (pchan) {
374 writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE);
375 writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE);
376
377 vchan->phy = NULL;
378 pchan->vchan = NULL;
379 pchan->desc = NULL;
380 pchan->done = NULL;
381 }
382
383 spin_unlock_irqrestore(&vchan->vc.lock, flags);
384
385 vchan_dma_desc_free_list(&vchan->vc, &head);
386
387 return 0;
388}
389
390static int sun6i_dma_start_desc(struct sun6i_vchan *vchan) 358static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
391{ 359{
392 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device); 360 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device);
@@ -675,57 +643,92 @@ err_lli_free:
675 return NULL; 643 return NULL;
676} 644}
677 645
678static int sun6i_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 646static int sun6i_dma_config(struct dma_chan *chan,
679 unsigned long arg) 647 struct dma_slave_config *config)
648{
649 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
650
651 memcpy(&vchan->cfg, config, sizeof(*config));
652
653 return 0;
654}
655
656static int sun6i_dma_pause(struct dma_chan *chan)
657{
658 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
659 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
660 struct sun6i_pchan *pchan = vchan->phy;
661
662 dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc);
663
664 if (pchan) {
665 writel(DMA_CHAN_PAUSE_PAUSE,
666 pchan->base + DMA_CHAN_PAUSE);
667 } else {
668 spin_lock(&sdev->lock);
669 list_del_init(&vchan->node);
670 spin_unlock(&sdev->lock);
671 }
672
673 return 0;
674}
675
676static int sun6i_dma_resume(struct dma_chan *chan)
680{ 677{
681 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); 678 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
682 struct sun6i_vchan *vchan = to_sun6i_vchan(chan); 679 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
683 struct sun6i_pchan *pchan = vchan->phy; 680 struct sun6i_pchan *pchan = vchan->phy;
684 unsigned long flags; 681 unsigned long flags;
685 int ret = 0;
686 682
687 switch (cmd) { 683 dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
688 case DMA_RESUME:
689 dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
690 684
691 spin_lock_irqsave(&vchan->vc.lock, flags); 685 spin_lock_irqsave(&vchan->vc.lock, flags);
692 686
693 if (pchan) { 687 if (pchan) {
694 writel(DMA_CHAN_PAUSE_RESUME, 688 writel(DMA_CHAN_PAUSE_RESUME,
695 pchan->base + DMA_CHAN_PAUSE); 689 pchan->base + DMA_CHAN_PAUSE);
696 } else if (!list_empty(&vchan->vc.desc_issued)) { 690 } else if (!list_empty(&vchan->vc.desc_issued)) {
697 spin_lock(&sdev->lock); 691 spin_lock(&sdev->lock);
698 list_add_tail(&vchan->node, &sdev->pending); 692 list_add_tail(&vchan->node, &sdev->pending);
699 spin_unlock(&sdev->lock); 693 spin_unlock(&sdev->lock);
700 } 694 }
701 695
702 spin_unlock_irqrestore(&vchan->vc.lock, flags); 696 spin_unlock_irqrestore(&vchan->vc.lock, flags);
703 break;
704 697
705 case DMA_PAUSE: 698 return 0;
706 dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc); 699}
707 700
708 if (pchan) { 701static int sun6i_dma_terminate_all(struct dma_chan *chan)
709 writel(DMA_CHAN_PAUSE_PAUSE, 702{
710 pchan->base + DMA_CHAN_PAUSE); 703 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
711 } else { 704 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
712 spin_lock(&sdev->lock); 705 struct sun6i_pchan *pchan = vchan->phy;
713 list_del_init(&vchan->node); 706 unsigned long flags;
714 spin_unlock(&sdev->lock); 707 LIST_HEAD(head);
715 } 708
716 break; 709 spin_lock(&sdev->lock);
717 710 list_del_init(&vchan->node);
718 case DMA_TERMINATE_ALL: 711 spin_unlock(&sdev->lock);
719 ret = sun6i_dma_terminate_all(vchan); 712
720 break; 713 spin_lock_irqsave(&vchan->vc.lock, flags);
721 case DMA_SLAVE_CONFIG: 714
722 memcpy(&vchan->cfg, (void *)arg, sizeof(struct dma_slave_config)); 715 vchan_get_all_descriptors(&vchan->vc, &head);
723 break; 716
724 default: 717 if (pchan) {
725 ret = -ENXIO; 718 writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE);
726 break; 719 writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE);
720
721 vchan->phy = NULL;
722 pchan->vchan = NULL;
723 pchan->desc = NULL;
724 pchan->done = NULL;
727 } 725 }
728 return ret; 726
727 spin_unlock_irqrestore(&vchan->vc.lock, flags);
728
729 vchan_dma_desc_free_list(&vchan->vc, &head);
730
731 return 0;
729} 732}
730 733
731static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan, 734static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan,
@@ -960,9 +963,20 @@ static int sun6i_dma_probe(struct platform_device *pdev)
960 sdc->slave.device_issue_pending = sun6i_dma_issue_pending; 963 sdc->slave.device_issue_pending = sun6i_dma_issue_pending;
961 sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg; 964 sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg;
962 sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; 965 sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy;
963 sdc->slave.device_control = sun6i_dma_control;
964 sdc->slave.copy_align = 4; 966 sdc->slave.copy_align = 4;
965 967 sdc->slave.device_config = sun6i_dma_config;
968 sdc->slave.device_pause = sun6i_dma_pause;
969 sdc->slave.device_resume = sun6i_dma_resume;
970 sdc->slave.device_terminate_all = sun6i_dma_terminate_all;
971 sdc->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
972 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
973 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
974 sdc->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
975 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
976 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
977 sdc->slave.directions = BIT(DMA_DEV_TO_MEM) |
978 BIT(DMA_MEM_TO_DEV);
979 sdc->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
966 sdc->slave.dev = &pdev->dev; 980 sdc->slave.dev = &pdev->dev;
967 981
968 sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels, 982 sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels,
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index d8450c3f35f0..eaf585e8286b 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -723,7 +723,7 @@ end:
723 return; 723 return;
724} 724}
725 725
726static void tegra_dma_terminate_all(struct dma_chan *dc) 726static int tegra_dma_terminate_all(struct dma_chan *dc)
727{ 727{
728 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 728 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
729 struct tegra_dma_sg_req *sgreq; 729 struct tegra_dma_sg_req *sgreq;
@@ -736,7 +736,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
736 spin_lock_irqsave(&tdc->lock, flags); 736 spin_lock_irqsave(&tdc->lock, flags);
737 if (list_empty(&tdc->pending_sg_req)) { 737 if (list_empty(&tdc->pending_sg_req)) {
738 spin_unlock_irqrestore(&tdc->lock, flags); 738 spin_unlock_irqrestore(&tdc->lock, flags);
739 return; 739 return 0;
740 } 740 }
741 741
742 if (!tdc->busy) 742 if (!tdc->busy)
@@ -777,6 +777,7 @@ skip_dma_stop:
777 dma_desc->cb_count = 0; 777 dma_desc->cb_count = 0;
778 } 778 }
779 spin_unlock_irqrestore(&tdc->lock, flags); 779 spin_unlock_irqrestore(&tdc->lock, flags);
780 return 0;
780} 781}
781 782
782static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, 783static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
@@ -827,25 +828,6 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
827 return ret; 828 return ret;
828} 829}
829 830
830static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd,
831 unsigned long arg)
832{
833 switch (cmd) {
834 case DMA_SLAVE_CONFIG:
835 return tegra_dma_slave_config(dc,
836 (struct dma_slave_config *)arg);
837
838 case DMA_TERMINATE_ALL:
839 tegra_dma_terminate_all(dc);
840 return 0;
841
842 default:
843 break;
844 }
845
846 return -ENXIO;
847}
848
849static inline int get_bus_width(struct tegra_dma_channel *tdc, 831static inline int get_bus_width(struct tegra_dma_channel *tdc,
850 enum dma_slave_buswidth slave_bw) 832 enum dma_slave_buswidth slave_bw)
851{ 833{
@@ -1443,7 +1425,23 @@ static int tegra_dma_probe(struct platform_device *pdev)
1443 tegra_dma_free_chan_resources; 1425 tegra_dma_free_chan_resources;
1444 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; 1426 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1445 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; 1427 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
1446 tdma->dma_dev.device_control = tegra_dma_device_control; 1428 tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1429 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1430 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1431 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1432 tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1433 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1434 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1435 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1436 tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1437 /*
1438 * XXX The hardware appears to support
1439 * DMA_RESIDUE_GRANULARITY_BURST-level reporting, but it's
1440 * only used by this driver during tegra_dma_terminate_all()
1441 */
1442 tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1443 tdma->dma_dev.device_config = tegra_dma_slave_config;
1444 tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
1447 tdma->dma_dev.device_tx_status = tegra_dma_tx_status; 1445 tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1448 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending; 1446 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1449 1447
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 2407ccf1a64b..c4c3d93fdd1b 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -561,8 +561,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
561 return &td_desc->txd; 561 return &td_desc->txd;
562} 562}
563 563
564static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 564static int td_terminate_all(struct dma_chan *chan)
565 unsigned long arg)
566{ 565{
567 struct timb_dma_chan *td_chan = 566 struct timb_dma_chan *td_chan =
568 container_of(chan, struct timb_dma_chan, chan); 567 container_of(chan, struct timb_dma_chan, chan);
@@ -570,9 +569,6 @@ static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
570 569
571 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); 570 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
572 571
573 if (cmd != DMA_TERMINATE_ALL)
574 return -ENXIO;
575
576 /* first the easy part, put the queue into the free list */ 572 /* first the easy part, put the queue into the free list */
577 spin_lock_bh(&td_chan->lock); 573 spin_lock_bh(&td_chan->lock);
578 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue, 574 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
@@ -697,7 +693,7 @@ static int td_probe(struct platform_device *pdev)
697 dma_cap_set(DMA_SLAVE, td->dma.cap_mask); 693 dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
698 dma_cap_set(DMA_PRIVATE, td->dma.cap_mask); 694 dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
699 td->dma.device_prep_slave_sg = td_prep_slave_sg; 695 td->dma.device_prep_slave_sg = td_prep_slave_sg;
700 td->dma.device_control = td_control; 696 td->dma.device_terminate_all = td_terminate_all;
701 697
702 td->dma.dev = &pdev->dev; 698 td->dma.dev = &pdev->dev;
703 699
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 0659ec9c4488..8849318b32b7 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -901,17 +901,12 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
901 return &first->txd; 901 return &first->txd;
902} 902}
903 903
904static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 904static int txx9dmac_terminate_all(struct dma_chan *chan)
905 unsigned long arg)
906{ 905{
907 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 906 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
908 struct txx9dmac_desc *desc, *_desc; 907 struct txx9dmac_desc *desc, *_desc;
909 LIST_HEAD(list); 908 LIST_HEAD(list);
910 909
911 /* Only supports DMA_TERMINATE_ALL */
912 if (cmd != DMA_TERMINATE_ALL)
913 return -EINVAL;
914
915 dev_vdbg(chan2dev(chan), "terminate_all\n"); 910 dev_vdbg(chan2dev(chan), "terminate_all\n");
916 spin_lock_bh(&dc->lock); 911 spin_lock_bh(&dc->lock);
917 912
@@ -1109,7 +1104,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
1109 dc->dma.dev = &pdev->dev; 1104 dc->dma.dev = &pdev->dev;
1110 dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; 1105 dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
1111 dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; 1106 dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
1112 dc->dma.device_control = txx9dmac_control; 1107 dc->dma.device_terminate_all = txx9dmac_terminate_all;
1113 dc->dma.device_tx_status = txx9dmac_tx_status; 1108 dc->dma.device_tx_status = txx9dmac_tx_status;
1114 dc->dma.device_issue_pending = txx9dmac_issue_pending; 1109 dc->dma.device_issue_pending = txx9dmac_issue_pending;
1115 if (pdata && pdata->memcpy_chan == ch) { 1110 if (pdata && pdata->memcpy_chan == ch) {
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 4a3a8f3137b3..bdd2a5dd7220 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -1001,13 +1001,17 @@ error:
1001 * xilinx_vdma_terminate_all - Halt the channel and free descriptors 1001 * xilinx_vdma_terminate_all - Halt the channel and free descriptors
1002 * @chan: Driver specific VDMA Channel pointer 1002 * @chan: Driver specific VDMA Channel pointer
1003 */ 1003 */
1004static void xilinx_vdma_terminate_all(struct xilinx_vdma_chan *chan) 1004static int xilinx_vdma_terminate_all(struct dma_chan *dchan)
1005{ 1005{
1006 struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
1007
1006 /* Halt the DMA engine */ 1008 /* Halt the DMA engine */
1007 xilinx_vdma_halt(chan); 1009 xilinx_vdma_halt(chan);
1008 1010
1009 /* Remove and free all of the descriptors in the lists */ 1011 /* Remove and free all of the descriptors in the lists */
1010 xilinx_vdma_free_descriptors(chan); 1012 xilinx_vdma_free_descriptors(chan);
1013
1014 return 0;
1011} 1015}
1012 1016
1013/** 1017/**
@@ -1075,27 +1079,6 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
1075} 1079}
1076EXPORT_SYMBOL(xilinx_vdma_channel_set_config); 1080EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
1077 1081
1078/**
1079 * xilinx_vdma_device_control - Configure DMA channel of the device
1080 * @dchan: DMA Channel pointer
1081 * @cmd: DMA control command
1082 * @arg: Channel configuration
1083 *
1084 * Return: '0' on success and failure value on error
1085 */
1086static int xilinx_vdma_device_control(struct dma_chan *dchan,
1087 enum dma_ctrl_cmd cmd, unsigned long arg)
1088{
1089 struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
1090
1091 if (cmd != DMA_TERMINATE_ALL)
1092 return -ENXIO;
1093
1094 xilinx_vdma_terminate_all(chan);
1095
1096 return 0;
1097}
1098
1099/* ----------------------------------------------------------------------------- 1082/* -----------------------------------------------------------------------------
1100 * Probe and remove 1083 * Probe and remove
1101 */ 1084 */
@@ -1300,7 +1283,7 @@ static int xilinx_vdma_probe(struct platform_device *pdev)
1300 xilinx_vdma_free_chan_resources; 1283 xilinx_vdma_free_chan_resources;
1301 xdev->common.device_prep_interleaved_dma = 1284 xdev->common.device_prep_interleaved_dma =
1302 xilinx_vdma_dma_prep_interleaved; 1285 xilinx_vdma_dma_prep_interleaved;
1303 xdev->common.device_control = xilinx_vdma_device_control; 1286 xdev->common.device_terminate_all = xilinx_vdma_terminate_all;
1304 xdev->common.device_tx_status = xilinx_vdma_tx_status; 1287 xdev->common.device_tx_status = xilinx_vdma_tx_status;
1305 xdev->common.device_issue_pending = xilinx_vdma_issue_pending; 1288 xdev->common.device_issue_pending = xilinx_vdma_issue_pending;
1306 1289
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 17638d7cf5c2..5907c1718f8c 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2174,14 +2174,20 @@ static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
2174 2174
2175static inline void decode_bus_error(int node_id, struct mce *m) 2175static inline void decode_bus_error(int node_id, struct mce *m)
2176{ 2176{
2177 struct mem_ctl_info *mci = mcis[node_id]; 2177 struct mem_ctl_info *mci;
2178 struct amd64_pvt *pvt = mci->pvt_info; 2178 struct amd64_pvt *pvt;
2179 u8 ecc_type = (m->status >> 45) & 0x3; 2179 u8 ecc_type = (m->status >> 45) & 0x3;
2180 u8 xec = XEC(m->status, 0x1f); 2180 u8 xec = XEC(m->status, 0x1f);
2181 u16 ec = EC(m->status); 2181 u16 ec = EC(m->status);
2182 u64 sys_addr; 2182 u64 sys_addr;
2183 struct err_info err; 2183 struct err_info err;
2184 2184
2185 mci = edac_mc_find(node_id);
2186 if (!mci)
2187 return;
2188
2189 pvt = mci->pvt_info;
2190
2185 /* Bail out early if this was an 'observed' error */ 2191 /* Bail out early if this was an 'observed' error */
2186 if (PP(ec) == NBSL_PP_OBS) 2192 if (PP(ec) == NBSL_PP_OBS)
2187 return; 2193 return;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 63aa6730e89e..1acf57ba4c86 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -2447,7 +2447,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2447 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_ibridge_table); 2447 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_ibridge_table);
2448 type = IVY_BRIDGE; 2448 type = IVY_BRIDGE;
2449 break; 2449 break;
2450 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA: 2450 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
2451 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_sbridge_table); 2451 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_sbridge_table);
2452 type = SANDY_BRIDGE; 2452 type = SANDY_BRIDGE;
2453 break; 2453 break;
@@ -2460,8 +2460,11 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2460 type = BROADWELL; 2460 type = BROADWELL;
2461 break; 2461 break;
2462 } 2462 }
2463 if (unlikely(rc < 0)) 2463 if (unlikely(rc < 0)) {
2464 edac_dbg(0, "couldn't get all devices for 0x%x\n", pdev->device);
2464 goto fail0; 2465 goto fail0;
2466 }
2467
2465 mc = 0; 2468 mc = 0;
2466 2469
2467 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { 2470 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
@@ -2474,7 +2477,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2474 goto fail1; 2477 goto fail1;
2475 } 2478 }
2476 2479
2477 sbridge_printk(KERN_INFO, "Driver loaded.\n"); 2480 sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
2478 2481
2479 mutex_unlock(&sbridge_edac_lock); 2482 mutex_unlock(&sbridge_edac_lock);
2480 return 0; 2483 return 0;
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index eb6935c8ad94..d6a09b9cd8cc 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -1246,14 +1246,14 @@ static const u32 model_textual_descriptor[] = {
1246 1246
1247static struct fw_descriptor vendor_id_descriptor = { 1247static struct fw_descriptor vendor_id_descriptor = {
1248 .length = ARRAY_SIZE(vendor_textual_descriptor), 1248 .length = ARRAY_SIZE(vendor_textual_descriptor),
1249 .immediate = 0x03d00d1e, 1249 .immediate = 0x03001f11,
1250 .key = 0x81000000, 1250 .key = 0x81000000,
1251 .data = vendor_textual_descriptor, 1251 .data = vendor_textual_descriptor,
1252}; 1252};
1253 1253
1254static struct fw_descriptor model_id_descriptor = { 1254static struct fw_descriptor model_id_descriptor = {
1255 .length = ARRAY_SIZE(model_textual_descriptor), 1255 .length = ARRAY_SIZE(model_textual_descriptor),
1256 .immediate = 0x17000001, 1256 .immediate = 0x17023901,
1257 .key = 0x81000000, 1257 .key = 0x81000000,
1258 .data = model_textual_descriptor, 1258 .data = model_textual_descriptor,
1259}; 1259};
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index aff9018d0658..f51d376d10ba 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -718,11 +718,6 @@ static inline unsigned int ar_next_buffer_index(unsigned int index)
718 return (index + 1) % AR_BUFFERS; 718 return (index + 1) % AR_BUFFERS;
719} 719}
720 720
721static inline unsigned int ar_prev_buffer_index(unsigned int index)
722{
723 return (index - 1 + AR_BUFFERS) % AR_BUFFERS;
724}
725
726static inline unsigned int ar_first_buffer_index(struct ar_context *ctx) 721static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
727{ 722{
728 return ar_next_buffer_index(ctx->last_buffer_index); 723 return ar_next_buffer_index(ctx->last_buffer_index);
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 64ac8f8f5098..c22606fe3d44 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1463,17 +1463,6 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
1463 struct sbp2_command_orb *orb; 1463 struct sbp2_command_orb *orb;
1464 int generation, retval = SCSI_MLQUEUE_HOST_BUSY; 1464 int generation, retval = SCSI_MLQUEUE_HOST_BUSY;
1465 1465
1466 /*
1467 * Bidirectional commands are not yet implemented, and unknown
1468 * transfer direction not handled.
1469 */
1470 if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
1471 dev_err(lu_dev(lu), "cannot handle bidirectional command\n");
1472 cmd->result = DID_ERROR << 16;
1473 cmd->scsi_done(cmd);
1474 return 0;
1475 }
1476
1477 orb = kzalloc(sizeof(*orb), GFP_ATOMIC); 1466 orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
1478 if (orb == NULL) 1467 if (orb == NULL)
1479 return SCSI_MLQUEUE_HOST_BUSY; 1468 return SCSI_MLQUEUE_HOST_BUSY;
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index af5d63c7cc53..2fe195002021 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -75,29 +75,25 @@ efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
75 unsigned long key; 75 unsigned long key;
76 u32 desc_version; 76 u32 desc_version;
77 77
78 *map_size = 0; 78 *map_size = sizeof(*m) * 32;
79 *desc_size = 0; 79again:
80 key = 0;
81 status = efi_call_early(get_memory_map, map_size, NULL,
82 &key, desc_size, &desc_version);
83 if (status != EFI_BUFFER_TOO_SMALL)
84 return EFI_LOAD_ERROR;
85
86 /* 80 /*
87 * Add an additional efi_memory_desc_t because we're doing an 81 * Add an additional efi_memory_desc_t because we're doing an
88 * allocation which may be in a new descriptor region. 82 * allocation which may be in a new descriptor region.
89 */ 83 */
90 *map_size += *desc_size; 84 *map_size += sizeof(*m);
91 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, 85 status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
92 *map_size, (void **)&m); 86 *map_size, (void **)&m);
93 if (status != EFI_SUCCESS) 87 if (status != EFI_SUCCESS)
94 goto fail; 88 goto fail;
95 89
90 *desc_size = 0;
91 key = 0;
96 status = efi_call_early(get_memory_map, map_size, m, 92 status = efi_call_early(get_memory_map, map_size, m,
97 &key, desc_size, &desc_version); 93 &key, desc_size, &desc_version);
98 if (status == EFI_BUFFER_TOO_SMALL) { 94 if (status == EFI_BUFFER_TOO_SMALL) {
99 efi_call_early(free_pool, m); 95 efi_call_early(free_pool, m);
100 return EFI_LOAD_ERROR; 96 goto again;
101 } 97 }
102 98
103 if (status != EFI_SUCCESS) 99 if (status != EFI_SUCCESS)
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index 472fb5b8779f..9cdbc0c9cb2d 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -26,9 +26,12 @@ struct tps65912_gpio_data {
26 struct gpio_chip gpio_chip; 26 struct gpio_chip gpio_chip;
27}; 27};
28 28
29#define to_tgd(gc) container_of(gc, struct tps65912_gpio_data, gpio_chip)
30
29static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset) 31static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
30{ 32{
31 struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); 33 struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
34 struct tps65912 *tps65912 = tps65912_gpio->tps65912;
32 int val; 35 int val;
33 36
34 val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset); 37 val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset);
@@ -42,7 +45,8 @@ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
42static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset, 45static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
43 int value) 46 int value)
44{ 47{
45 struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); 48 struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
49 struct tps65912 *tps65912 = tps65912_gpio->tps65912;
46 50
47 if (value) 51 if (value)
48 tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset, 52 tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
@@ -55,7 +59,8 @@ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
55static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset, 59static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
56 int value) 60 int value)
57{ 61{
58 struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); 62 struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
63 struct tps65912 *tps65912 = tps65912_gpio->tps65912;
59 64
60 /* Set the initial value */ 65 /* Set the initial value */
61 tps65912_gpio_set(gc, offset, value); 66 tps65912_gpio_set(gc, offset, value);
@@ -66,7 +71,8 @@ static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
66 71
67static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset) 72static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset)
68{ 73{
69 struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); 74 struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
75 struct tps65912 *tps65912 = tps65912_gpio->tps65912;
70 76
71 return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset, 77 return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
72 GPIO_CFG_MASK); 78 GPIO_CFG_MASK);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 8cad8e400b44..4650bf830d6b 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -46,12 +46,13 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
46 46
47 ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags); 47 ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
48 if (ret < 0) { 48 if (ret < 0) {
49 /* We've found the gpio chip, but the translation failed. 49 /* We've found a gpio chip, but the translation failed.
50 * Return true to stop looking and return the translation 50 * Store translation error in out_gpio.
51 * error via out_gpio 51 * Return false to keep looking, as more than one gpio chip
52 * could be registered per of-node.
52 */ 53 */
53 gg_data->out_gpio = ERR_PTR(ret); 54 gg_data->out_gpio = ERR_PTR(ret);
54 return true; 55 return false;
55 } 56 }
56 57
57 gg_data->out_gpio = gpiochip_get_desc(gc, ret); 58 gg_data->out_gpio = gpiochip_get_desc(gc, ret);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index b3589d0e39b9..910ff8ab9c9c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -62,12 +62,18 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
62 return KFD_MQD_TYPE_CP; 62 return KFD_MQD_TYPE_CP;
63} 63}
64 64
65static inline unsigned int get_first_pipe(struct device_queue_manager *dqm) 65unsigned int get_first_pipe(struct device_queue_manager *dqm)
66{ 66{
67 BUG_ON(!dqm); 67 BUG_ON(!dqm || !dqm->dev);
68 return dqm->dev->shared_resources.first_compute_pipe; 68 return dqm->dev->shared_resources.first_compute_pipe;
69} 69}
70 70
71unsigned int get_pipes_num(struct device_queue_manager *dqm)
72{
73 BUG_ON(!dqm || !dqm->dev);
74 return dqm->dev->shared_resources.compute_pipe_count;
75}
76
71static inline unsigned int get_pipes_num_cpsch(void) 77static inline unsigned int get_pipes_num_cpsch(void)
72{ 78{
73 return PIPE_PER_ME_CP_SCHEDULING; 79 return PIPE_PER_ME_CP_SCHEDULING;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index d64f86cda34f..488f51d19427 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -163,6 +163,8 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
163 struct qcm_process_device *qpd); 163 struct qcm_process_device *qpd);
164int init_pipelines(struct device_queue_manager *dqm, 164int init_pipelines(struct device_queue_manager *dqm,
165 unsigned int pipes_num, unsigned int first_pipe); 165 unsigned int pipes_num, unsigned int first_pipe);
166unsigned int get_first_pipe(struct device_queue_manager *dqm);
167unsigned int get_pipes_num(struct device_queue_manager *dqm);
166 168
167extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) 169extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
168{ 170{
@@ -175,10 +177,4 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
175 return (pdd->lds_base >> 60) & 0x0E; 177 return (pdd->lds_base >> 60) & 0x0E;
176} 178}
177 179
178extern inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
179{
180 BUG_ON(!dqm || !dqm->dev);
181 return dqm->dev->shared_resources.compute_pipe_count;
182}
183
184#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */ 180#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index 6b072466e2a6..5469efe0523e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -131,5 +131,5 @@ static int register_process_cik(struct device_queue_manager *dqm,
131 131
132static int initialize_cpsch_cik(struct device_queue_manager *dqm) 132static int initialize_cpsch_cik(struct device_queue_manager *dqm)
133{ 133{
134 return init_pipelines(dqm, get_pipes_num(dqm), 0); 134 return init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
135} 135}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 0409b907de5d..b3e3068c6ec0 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -153,7 +153,7 @@ static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c,
153 (adj->crtc_hdisplay - 1) | 153 (adj->crtc_hdisplay - 1) |
154 ((adj->crtc_vdisplay - 1) << 16)); 154 ((adj->crtc_vdisplay - 1) << 16));
155 155
156 cfg = ATMEL_HLCDC_CLKPOL; 156 cfg = 0;
157 157
158 prate = clk_get_rate(crtc->dc->hlcdc->sys_clk); 158 prate = clk_get_rate(crtc->dc->hlcdc->sys_clk);
159 mode_rate = mode->crtc_clock * 1000; 159 mode_rate = mode->crtc_clock * 1000;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 7320a6c6613f..c1cb17493e0d 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -311,8 +311,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
311 311
312 pm_runtime_enable(dev->dev); 312 pm_runtime_enable(dev->dev);
313 313
314 pm_runtime_put_sync(dev->dev);
315
316 ret = atmel_hlcdc_dc_modeset_init(dev); 314 ret = atmel_hlcdc_dc_modeset_init(dev);
317 if (ret < 0) { 315 if (ret < 0) {
318 dev_err(dev->dev, "failed to initialize mode setting\n"); 316 dev_err(dev->dev, "failed to initialize mode setting\n");
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
index 063d2a7b941f..e79bd9ba474b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
@@ -311,7 +311,8 @@ int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
311 311
312 /* Disable the layer */ 312 /* Disable the layer */
313 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR, 313 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
314 ATMEL_HLCDC_LAYER_RST); 314 ATMEL_HLCDC_LAYER_RST | ATMEL_HLCDC_LAYER_A2Q |
315 ATMEL_HLCDC_LAYER_UPDATE);
315 316
316 /* Clear all pending interrupts */ 317 /* Clear all pending interrupts */
317 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr); 318 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 6b00173d1be4..6b6b07ff720b 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2127,7 +2127,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
2127 DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); 2127 DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
2128 2128
2129 mutex_lock(&dev->mode_config.mutex); 2129 mutex_lock(&dev->mode_config.mutex);
2130 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
2131 2130
2132 connector = drm_connector_find(dev, out_resp->connector_id); 2131 connector = drm_connector_find(dev, out_resp->connector_id);
2133 if (!connector) { 2132 if (!connector) {
@@ -2157,6 +2156,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
2157 out_resp->mm_height = connector->display_info.height_mm; 2156 out_resp->mm_height = connector->display_info.height_mm;
2158 out_resp->subpixel = connector->display_info.subpixel_order; 2157 out_resp->subpixel = connector->display_info.subpixel_order;
2159 out_resp->connection = connector->status; 2158 out_resp->connection = connector->status;
2159
2160 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
2160 encoder = drm_connector_get_encoder(connector); 2161 encoder = drm_connector_get_encoder(connector);
2161 if (encoder) 2162 if (encoder)
2162 out_resp->encoder_id = encoder->base.id; 2163 out_resp->encoder_id = encoder->base.id;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f2a825e39646..8727086cf48c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2114,6 +2114,9 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
2114 * number comparisons on buffer last_read|write_seqno. It also allows an 2114 * number comparisons on buffer last_read|write_seqno. It also allows an
2115 * emission time to be associated with the request for tracking how far ahead 2115 * emission time to be associated with the request for tracking how far ahead
2116 * of the GPU the submission is. 2116 * of the GPU the submission is.
2117 *
2118 * The requests are reference counted, so upon creation they should have an
2119 * initial reference taken using kref_init
2117 */ 2120 */
2118struct drm_i915_gem_request { 2121struct drm_i915_gem_request {
2119 struct kref ref; 2122 struct kref ref;
@@ -2137,7 +2140,16 @@ struct drm_i915_gem_request {
2137 /** Position in the ringbuffer of the end of the whole request */ 2140 /** Position in the ringbuffer of the end of the whole request */
2138 u32 tail; 2141 u32 tail;
2139 2142
2140 /** Context related to this request */ 2143 /**
2144 * Context related to this request
2145 * Contexts are refcounted, so when this request is associated with a
2146 * context, we must increment the context's refcount, to guarantee that
2147 * it persists while any request is linked to it. Requests themselves
2148 * are also refcounted, so the request will only be freed when the last
2149 * reference to it is dismissed, and the code in
2150 * i915_gem_request_free() will then decrement the refcount on the
2151 * context.
2152 */
2141 struct intel_context *ctx; 2153 struct intel_context *ctx;
2142 2154
2143 /** Batch buffer related to this request if any */ 2155 /** Batch buffer related to this request if any */
@@ -2374,6 +2386,7 @@ struct drm_i915_cmd_table {
2374 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) 2386 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
2375#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ 2387#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
2376 ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ 2388 ((INTEL_DEVID(dev) & 0xf) == 0x6 || \
2389 (INTEL_DEVID(dev) & 0xf) == 0xb || \
2377 (INTEL_DEVID(dev) & 0xf) == 0xe)) 2390 (INTEL_DEVID(dev) & 0xf) == 0xe))
2378#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ 2391#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
2379 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2392 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c26d36cc4b31..e5daad5f75fb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2659,8 +2659,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2659 if (submit_req->ctx != ring->default_context) 2659 if (submit_req->ctx != ring->default_context)
2660 intel_lr_context_unpin(ring, submit_req->ctx); 2660 intel_lr_context_unpin(ring, submit_req->ctx);
2661 2661
2662 i915_gem_context_unreference(submit_req->ctx); 2662 i915_gem_request_unreference(submit_req);
2663 kfree(submit_req);
2664 } 2663 }
2665 2664
2666 /* 2665 /*
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index a2045848bd1a..9c6f93ec886b 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -485,10 +485,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
485 stolen_offset, gtt_offset, size); 485 stolen_offset, gtt_offset, size);
486 486
487 /* KISS and expect everything to be page-aligned */ 487 /* KISS and expect everything to be page-aligned */
488 BUG_ON(stolen_offset & 4095); 488 if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
489 BUG_ON(size & 4095); 489 WARN_ON(stolen_offset & 4095))
490
491 if (WARN_ON(size == 0))
492 return NULL; 490 return NULL;
493 491
494 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 492 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 7a24bd1a51f6..6377b22269ad 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -335,9 +335,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
335 return -EINVAL; 335 return -EINVAL;
336 } 336 }
337 337
338 mutex_lock(&dev->struct_mutex);
338 if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) { 339 if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
339 drm_gem_object_unreference_unlocked(&obj->base); 340 ret = -EBUSY;
340 return -EBUSY; 341 goto err;
341 } 342 }
342 343
343 if (args->tiling_mode == I915_TILING_NONE) { 344 if (args->tiling_mode == I915_TILING_NONE) {
@@ -369,7 +370,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
369 } 370 }
370 } 371 }
371 372
372 mutex_lock(&dev->struct_mutex);
373 if (args->tiling_mode != obj->tiling_mode || 373 if (args->tiling_mode != obj->tiling_mode ||
374 args->stride != obj->stride) { 374 args->stride != obj->stride) {
375 /* We need to rebind the object if its current allocation 375 /* We need to rebind the object if its current allocation
@@ -424,6 +424,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
424 obj->bit_17 = NULL; 424 obj->bit_17 = NULL;
425 } 425 }
426 426
427err:
427 drm_gem_object_unreference(&obj->base); 428 drm_gem_object_unreference(&obj->base);
428 mutex_unlock(&dev->struct_mutex); 429 mutex_unlock(&dev->struct_mutex);
429 430
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4145d95902f5..ede5bbbd8a08 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1892,6 +1892,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1892 u32 iir, gt_iir, pm_iir; 1892 u32 iir, gt_iir, pm_iir;
1893 irqreturn_t ret = IRQ_NONE; 1893 irqreturn_t ret = IRQ_NONE;
1894 1894
1895 if (!intel_irqs_enabled(dev_priv))
1896 return IRQ_NONE;
1897
1895 while (true) { 1898 while (true) {
1896 /* Find, clear, then process each source of interrupt */ 1899 /* Find, clear, then process each source of interrupt */
1897 1900
@@ -1936,6 +1939,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1936 u32 master_ctl, iir; 1939 u32 master_ctl, iir;
1937 irqreturn_t ret = IRQ_NONE; 1940 irqreturn_t ret = IRQ_NONE;
1938 1941
1942 if (!intel_irqs_enabled(dev_priv))
1943 return IRQ_NONE;
1944
1939 for (;;) { 1945 for (;;) {
1940 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1946 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1941 iir = I915_READ(VLV_IIR); 1947 iir = I915_READ(VLV_IIR);
@@ -2208,6 +2214,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2208 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2214 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2209 irqreturn_t ret = IRQ_NONE; 2215 irqreturn_t ret = IRQ_NONE;
2210 2216
2217 if (!intel_irqs_enabled(dev_priv))
2218 return IRQ_NONE;
2219
2211 /* We get interrupts on unclaimed registers, so check for this before we 2220 /* We get interrupts on unclaimed registers, so check for this before we
2212 * do any I915_{READ,WRITE}. */ 2221 * do any I915_{READ,WRITE}. */
2213 intel_uncore_check_errors(dev); 2222 intel_uncore_check_errors(dev);
@@ -2279,6 +2288,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2279 enum pipe pipe; 2288 enum pipe pipe;
2280 u32 aux_mask = GEN8_AUX_CHANNEL_A; 2289 u32 aux_mask = GEN8_AUX_CHANNEL_A;
2281 2290
2291 if (!intel_irqs_enabled(dev_priv))
2292 return IRQ_NONE;
2293
2282 if (IS_GEN9(dev)) 2294 if (IS_GEN9(dev))
2283 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 2295 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2284 GEN9_AUX_CHANNEL_D; 2296 GEN9_AUX_CHANNEL_D;
@@ -3771,6 +3783,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3771 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3783 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3772 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3784 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3773 3785
3786 if (!intel_irqs_enabled(dev_priv))
3787 return IRQ_NONE;
3788
3774 iir = I915_READ16(IIR); 3789 iir = I915_READ16(IIR);
3775 if (iir == 0) 3790 if (iir == 0)
3776 return IRQ_NONE; 3791 return IRQ_NONE;
@@ -3951,6 +3966,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3951 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3966 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3952 int pipe, ret = IRQ_NONE; 3967 int pipe, ret = IRQ_NONE;
3953 3968
3969 if (!intel_irqs_enabled(dev_priv))
3970 return IRQ_NONE;
3971
3954 iir = I915_READ(IIR); 3972 iir = I915_READ(IIR);
3955 do { 3973 do {
3956 bool irq_received = (iir & ~flip_mask) != 0; 3974 bool irq_received = (iir & ~flip_mask) != 0;
@@ -4171,6 +4189,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4171 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4189 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4172 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4190 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4173 4191
4192 if (!intel_irqs_enabled(dev_priv))
4193 return IRQ_NONE;
4194
4174 iir = I915_READ(IIR); 4195 iir = I915_READ(IIR);
4175 4196
4176 for (;;) { 4197 for (;;) {
@@ -4520,6 +4541,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4520{ 4541{
4521 dev_priv->dev->driver->irq_uninstall(dev_priv->dev); 4542 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4522 dev_priv->pm.irqs_enabled = false; 4543 dev_priv->pm.irqs_enabled = false;
4544 synchronize_irq(dev_priv->dev->irq);
4523} 4545}
4524 4546
4525/** 4547/**
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3d220a67f865..e730789b53b7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2371,13 +2371,19 @@ intel_alloc_plane_obj(struct intel_crtc *crtc,
2371 struct drm_device *dev = crtc->base.dev; 2371 struct drm_device *dev = crtc->base.dev;
2372 struct drm_i915_gem_object *obj = NULL; 2372 struct drm_i915_gem_object *obj = NULL;
2373 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 2373 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2374 u32 base = plane_config->base; 2374 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2375 u32 size_aligned = round_up(plane_config->base + plane_config->size,
2376 PAGE_SIZE);
2377
2378 size_aligned -= base_aligned;
2375 2379
2376 if (plane_config->size == 0) 2380 if (plane_config->size == 0)
2377 return false; 2381 return false;
2378 2382
2379 obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base, 2383 obj = i915_gem_object_create_stolen_for_preallocated(dev,
2380 plane_config->size); 2384 base_aligned,
2385 base_aligned,
2386 size_aligned);
2381 if (!obj) 2387 if (!obj)
2382 return false; 2388 return false;
2383 2389
@@ -2725,10 +2731,19 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
2725 case DRM_FORMAT_XRGB8888: 2731 case DRM_FORMAT_XRGB8888:
2726 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; 2732 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2727 break; 2733 break;
2734 case DRM_FORMAT_ARGB8888:
2735 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2736 plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2737 break;
2728 case DRM_FORMAT_XBGR8888: 2738 case DRM_FORMAT_XBGR8888:
2729 plane_ctl |= PLANE_CTL_ORDER_RGBX; 2739 plane_ctl |= PLANE_CTL_ORDER_RGBX;
2730 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; 2740 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2731 break; 2741 break;
2742 case DRM_FORMAT_ABGR8888:
2743 plane_ctl |= PLANE_CTL_ORDER_RGBX;
2744 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2745 plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2746 break;
2732 case DRM_FORMAT_XRGB2101010: 2747 case DRM_FORMAT_XRGB2101010:
2733 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010; 2748 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2734 break; 2749 break;
@@ -6627,7 +6642,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
6627 aligned_height = intel_fb_align_height(dev, fb->height, 6642 aligned_height = intel_fb_align_height(dev, fb->height,
6628 plane_config->tiling); 6643 plane_config->tiling);
6629 6644
6630 plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height); 6645 plane_config->size = fb->pitches[0] * aligned_height;
6631 6646
6632 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 6647 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
6633 pipe_name(pipe), plane, fb->width, fb->height, 6648 pipe_name(pipe), plane, fb->width, fb->height,
@@ -7664,7 +7679,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
7664 aligned_height = intel_fb_align_height(dev, fb->height, 7679 aligned_height = intel_fb_align_height(dev, fb->height,
7665 plane_config->tiling); 7680 plane_config->tiling);
7666 7681
7667 plane_config->size = ALIGN(fb->pitches[0] * aligned_height, PAGE_SIZE); 7682 plane_config->size = fb->pitches[0] * aligned_height;
7668 7683
7669 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 7684 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7670 pipe_name(pipe), fb->width, fb->height, 7685 pipe_name(pipe), fb->width, fb->height,
@@ -7755,7 +7770,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
7755 aligned_height = intel_fb_align_height(dev, fb->height, 7770 aligned_height = intel_fb_align_height(dev, fb->height,
7756 plane_config->tiling); 7771 plane_config->tiling);
7757 7772
7758 plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height); 7773 plane_config->size = fb->pitches[0] * aligned_height;
7759 7774
7760 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 7775 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7761 pipe_name(pipe), fb->width, fb->height, 7776 pipe_name(pipe), fb->width, fb->height,
@@ -8698,6 +8713,7 @@ retry:
8698 old->release_fb->funcs->destroy(old->release_fb); 8713 old->release_fb->funcs->destroy(old->release_fb);
8699 goto fail; 8714 goto fail;
8700 } 8715 }
8716 crtc->primary->crtc = crtc;
8701 8717
8702 /* let the connector get through one full cycle before testing */ 8718 /* let the connector get through one full cycle before testing */
8703 intel_wait_for_vblank(dev, intel_crtc->pipe); 8719 intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -12182,9 +12198,6 @@ intel_check_cursor_plane(struct drm_plane *plane,
12182 return -ENOMEM; 12198 return -ENOMEM;
12183 } 12199 }
12184 12200
12185 if (fb == crtc->cursor->fb)
12186 return 0;
12187
12188 /* we only need to pin inside GTT if cursor is non-phy */ 12201 /* we only need to pin inside GTT if cursor is non-phy */
12189 mutex_lock(&dev->struct_mutex); 12202 mutex_lock(&dev->struct_mutex);
12190 if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) { 12203 if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
@@ -13096,6 +13109,9 @@ static struct intel_quirk intel_quirks[] = {
13096 13109
13097 /* HP Chromebook 14 (Celeron 2955U) */ 13110 /* HP Chromebook 14 (Celeron 2955U) */
13098 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, 13111 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
13112
13113 /* Dell Chromebook 11 */
13114 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
13099}; 13115};
13100 13116
13101static void intel_init_quirks(struct drm_device *dev) 13117static void intel_init_quirks(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 0f358c5999ec..e8d3da9f3373 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -503,18 +503,19 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
503 * If there isn't a request associated with this submission, 503 * If there isn't a request associated with this submission,
504 * create one as a temporary holder. 504 * create one as a temporary holder.
505 */ 505 */
506 WARN(1, "execlist context submission without request");
507 request = kzalloc(sizeof(*request), GFP_KERNEL); 506 request = kzalloc(sizeof(*request), GFP_KERNEL);
508 if (request == NULL) 507 if (request == NULL)
509 return -ENOMEM; 508 return -ENOMEM;
510 request->ring = ring; 509 request->ring = ring;
511 request->ctx = to; 510 request->ctx = to;
511 kref_init(&request->ref);
512 request->uniq = dev_priv->request_uniq++;
513 i915_gem_context_reference(request->ctx);
512 } else { 514 } else {
515 i915_gem_request_reference(request);
513 WARN_ON(to != request->ctx); 516 WARN_ON(to != request->ctx);
514 } 517 }
515 request->tail = tail; 518 request->tail = tail;
516 i915_gem_request_reference(request);
517 i915_gem_context_reference(request->ctx);
518 519
519 intel_runtime_pm_get(dev_priv); 520 intel_runtime_pm_get(dev_priv);
520 521
@@ -731,7 +732,6 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
731 if (ctx_obj && (ctx != ring->default_context)) 732 if (ctx_obj && (ctx != ring->default_context))
732 intel_lr_context_unpin(ring, ctx); 733 intel_lr_context_unpin(ring, ctx);
733 intel_runtime_pm_put(dev_priv); 734 intel_runtime_pm_put(dev_priv);
734 i915_gem_context_unreference(ctx);
735 list_del(&req->execlist_link); 735 list_del(&req->execlist_link);
736 i915_gem_request_unreference(req); 736 i915_gem_request_unreference(req);
737 } 737 }
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 5bf825dfaa09..8d74de82456e 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -178,6 +178,13 @@ radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
178 switch (msg->request & ~DP_AUX_I2C_MOT) { 178 switch (msg->request & ~DP_AUX_I2C_MOT) {
179 case DP_AUX_NATIVE_WRITE: 179 case DP_AUX_NATIVE_WRITE:
180 case DP_AUX_I2C_WRITE: 180 case DP_AUX_I2C_WRITE:
181 /* The atom implementation only supports writes with a max payload of
182 * 12 bytes since it uses 4 bits for the total count (header + payload)
183 * in the parameter space. The atom interface supports 16 byte
184 * payloads for reads. The hw itself supports up to 16 bytes of payload.
185 */
186 if (WARN_ON_ONCE(msg->size > 12))
187 return -E2BIG;
181 /* tx_size needs to be 4 even for bare address packets since the atom 188 /* tx_size needs to be 4 even for bare address packets since the atom
182 * table needs the info in tx_buf[3]. 189 * table needs the info in tx_buf[3].
183 */ 190 */
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 7c9df1eac065..7fe7b749e182 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -731,7 +731,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
731 dig_connector = radeon_connector->con_priv; 731 dig_connector = radeon_connector->con_priv;
732 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 732 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
733 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { 733 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
734 if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) 734 if (radeon_audio != 0 &&
735 drm_detect_monitor_audio(radeon_connector_edid(connector)) &&
736 ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
735 return ATOM_ENCODER_MODE_DP_AUDIO; 737 return ATOM_ENCODER_MODE_DP_AUDIO;
736 return ATOM_ENCODER_MODE_DP; 738 return ATOM_ENCODER_MODE_DP;
737 } else if (radeon_audio != 0) { 739 } else if (radeon_audio != 0) {
@@ -747,7 +749,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
747 } 749 }
748 break; 750 break;
749 case DRM_MODE_CONNECTOR_eDP: 751 case DRM_MODE_CONNECTOR_eDP:
750 if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) 752 if (radeon_audio != 0 &&
753 drm_detect_monitor_audio(radeon_connector_edid(connector)) &&
754 ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
751 return ATOM_ENCODER_MODE_DP_AUDIO; 755 return ATOM_ENCODER_MODE_DP_AUDIO;
752 return ATOM_ENCODER_MODE_DP; 756 return ATOM_ENCODER_MODE_DP;
753 case DRM_MODE_CONNECTOR_DVIA: 757 case DRM_MODE_CONNECTOR_DVIA:
@@ -1720,8 +1724,10 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1720 } 1724 }
1721 1725
1722 encoder_mode = atombios_get_encoder_mode(encoder); 1726 encoder_mode = atombios_get_encoder_mode(encoder);
1723 if (radeon_audio != 0 && 1727 if (connector && (radeon_audio != 0) &&
1724 (encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode))) 1728 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
1729 (ENCODER_MODE_IS_DP(encoder_mode) &&
1730 drm_detect_monitor_audio(radeon_connector_edid(connector)))))
1725 radeon_audio_dpms(encoder, mode); 1731 radeon_audio_dpms(encoder, mode);
1726} 1732}
1727 1733
@@ -2136,6 +2142,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2136 struct drm_device *dev = encoder->dev; 2142 struct drm_device *dev = encoder->dev;
2137 struct radeon_device *rdev = dev->dev_private; 2143 struct radeon_device *rdev = dev->dev_private;
2138 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 2144 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2145 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
2139 int encoder_mode; 2146 int encoder_mode;
2140 2147
2141 radeon_encoder->pixel_clock = adjusted_mode->clock; 2148 radeon_encoder->pixel_clock = adjusted_mode->clock;
@@ -2164,8 +2171,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2164 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 2171 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2165 /* handled in dpms */ 2172 /* handled in dpms */
2166 encoder_mode = atombios_get_encoder_mode(encoder); 2173 encoder_mode = atombios_get_encoder_mode(encoder);
2167 if (radeon_audio != 0 && 2174 if (connector && (radeon_audio != 0) &&
2168 (encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode))) 2175 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
2176 (ENCODER_MODE_IS_DP(encoder_mode) &&
2177 drm_detect_monitor_audio(radeon_connector_edid(connector)))))
2169 radeon_audio_mode_set(encoder, adjusted_mode); 2178 radeon_audio_mode_set(encoder, adjusted_mode);
2170 break; 2179 break;
2171 case ENCODER_OBJECT_ID_INTERNAL_DDI: 2180 case ENCODER_OBJECT_ID_INTERNAL_DDI:
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index e6a4ba236c70..0c993da9c8fb 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3613,6 +3613,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
3613 } 3613 }
3614 3614
3615 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 3615 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
3616 WREG32(SRBM_INT_CNTL, 0x1);
3617 WREG32(SRBM_INT_ACK, 0x1);
3616 3618
3617 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); 3619 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
3618 3620
@@ -7230,6 +7232,8 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
7230 WREG32(CP_ME2_PIPE3_INT_CNTL, 0); 7232 WREG32(CP_ME2_PIPE3_INT_CNTL, 0);
7231 /* grbm */ 7233 /* grbm */
7232 WREG32(GRBM_INT_CNTL, 0); 7234 WREG32(GRBM_INT_CNTL, 0);
7235 /* SRBM */
7236 WREG32(SRBM_INT_CNTL, 0);
7233 /* vline/vblank, etc. */ 7237 /* vline/vblank, etc. */
7234 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 7238 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
7235 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 7239 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -8046,6 +8050,10 @@ restart_ih:
8046 break; 8050 break;
8047 } 8051 }
8048 break; 8052 break;
8053 case 96:
8054 DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
8055 WREG32(SRBM_INT_ACK, 0x1);
8056 break;
8049 case 124: /* UVD */ 8057 case 124: /* UVD */
8050 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); 8058 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
8051 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); 8059 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 03003f8a6de6..c648e1996dab 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -482,6 +482,10 @@
482#define SOFT_RESET_ORB (1 << 23) 482#define SOFT_RESET_ORB (1 << 23)
483#define SOFT_RESET_VCE (1 << 24) 483#define SOFT_RESET_VCE (1 << 24)
484 484
485#define SRBM_READ_ERROR 0xE98
486#define SRBM_INT_CNTL 0xEA0
487#define SRBM_INT_ACK 0xEA8
488
485#define VM_L2_CNTL 0x1400 489#define VM_L2_CNTL 0x1400
486#define ENABLE_L2_CACHE (1 << 0) 490#define ENABLE_L2_CACHE (1 << 0)
487#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1) 491#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 78600f534c80..4c0e24b3bb90 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3253,6 +3253,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
3253 } 3253 }
3254 3254
3255 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 3255 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
3256 WREG32(SRBM_INT_CNTL, 0x1);
3257 WREG32(SRBM_INT_ACK, 0x1);
3256 3258
3257 evergreen_fix_pci_max_read_req_size(rdev); 3259 evergreen_fix_pci_max_read_req_size(rdev);
3258 3260
@@ -4324,6 +4326,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
4324 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; 4326 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
4325 WREG32(DMA_CNTL, tmp); 4327 WREG32(DMA_CNTL, tmp);
4326 WREG32(GRBM_INT_CNTL, 0); 4328 WREG32(GRBM_INT_CNTL, 0);
4329 WREG32(SRBM_INT_CNTL, 0);
4327 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 4330 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
4328 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 4331 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
4329 if (rdev->num_crtc >= 4) { 4332 if (rdev->num_crtc >= 4) {
@@ -5066,6 +5069,10 @@ restart_ih:
5066 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 5069 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
5067 break; 5070 break;
5068 } 5071 }
5072 case 96:
5073 DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
5074 WREG32(SRBM_INT_ACK, 0x1);
5075 break;
5069 case 124: /* UVD */ 5076 case 124: /* UVD */
5070 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); 5077 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
5071 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); 5078 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index ee83d2a88750..a8d1d5240fcb 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1191,6 +1191,10 @@
1191#define SOFT_RESET_REGBB (1 << 22) 1191#define SOFT_RESET_REGBB (1 << 22)
1192#define SOFT_RESET_ORB (1 << 23) 1192#define SOFT_RESET_ORB (1 << 23)
1193 1193
1194#define SRBM_READ_ERROR 0xE98
1195#define SRBM_INT_CNTL 0xEA0
1196#define SRBM_INT_ACK 0xEA8
1197
1194/* display watermarks */ 1198/* display watermarks */
1195#define DC_LB_MEMORY_SPLIT 0x6b0c 1199#define DC_LB_MEMORY_SPLIT 0x6b0c
1196#define PRIORITY_A_CNT 0x6b18 1200#define PRIORITY_A_CNT 0x6b18
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 24242a7f0ac3..dab00812abaa 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -962,6 +962,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
962 } 962 }
963 963
964 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 964 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
965 WREG32(SRBM_INT_CNTL, 0x1);
966 WREG32(SRBM_INT_ACK, 0x1);
965 967
966 evergreen_fix_pci_max_read_req_size(rdev); 968 evergreen_fix_pci_max_read_req_size(rdev);
967 969
@@ -1086,12 +1088,12 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1086 1088
1087 if ((rdev->config.cayman.max_backends_per_se == 1) && 1089 if ((rdev->config.cayman.max_backends_per_se == 1) &&
1088 (rdev->flags & RADEON_IS_IGP)) { 1090 (rdev->flags & RADEON_IS_IGP)) {
1089 if ((disabled_rb_mask & 3) == 1) { 1091 if ((disabled_rb_mask & 3) == 2) {
1090 /* RB0 disabled, RB1 enabled */
1091 tmp = 0x11111111;
1092 } else {
1093 /* RB1 disabled, RB0 enabled */ 1092 /* RB1 disabled, RB0 enabled */
1094 tmp = 0x00000000; 1093 tmp = 0x00000000;
1094 } else {
1095 /* RB0 disabled, RB1 enabled */
1096 tmp = 0x11111111;
1095 } 1097 }
1096 } else { 1098 } else {
1097 tmp = gb_addr_config & NUM_PIPES_MASK; 1099 tmp = gb_addr_config & NUM_PIPES_MASK;
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index ad7125486894..6b44580440d0 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -82,6 +82,10 @@
82#define SOFT_RESET_REGBB (1 << 22) 82#define SOFT_RESET_REGBB (1 << 22)
83#define SOFT_RESET_ORB (1 << 23) 83#define SOFT_RESET_ORB (1 << 23)
84 84
85#define SRBM_READ_ERROR 0xE98
86#define SRBM_INT_CNTL 0xEA0
87#define SRBM_INT_ACK 0xEA8
88
85#define SRBM_STATUS2 0x0EC4 89#define SRBM_STATUS2 0x0EC4
86#define DMA_BUSY (1 << 5) 90#define DMA_BUSY (1 << 5)
87#define DMA1_BUSY (1 << 6) 91#define DMA1_BUSY (1 << 6)
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 843b65f46ece..fa2154493cf1 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -188,7 +188,7 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
188 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 188 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
189 radeon_crtc = to_radeon_crtc(crtc); 189 radeon_crtc = to_radeon_crtc(crtc);
190 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { 190 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
191 vrefresh = radeon_crtc->hw_mode.vrefresh; 191 vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode);
192 break; 192 break;
193 } 193 }
194 } 194 }
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index c830863bc98a..a579ed379f20 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -715,6 +715,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
715 struct radeon_cs_chunk *ib_chunk = p->chunk_ib; 715 struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
716 struct radeon_device *rdev = p->rdev; 716 struct radeon_device *rdev = p->rdev;
717 uint32_t header; 717 uint32_t header;
718 int ret = 0, i;
718 719
719 if (idx >= ib_chunk->length_dw) { 720 if (idx >= ib_chunk->length_dw) {
720 DRM_ERROR("Can not parse packet at %d after CS end %d !\n", 721 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
@@ -743,14 +744,25 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
743 break; 744 break;
744 default: 745 default:
745 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); 746 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
746 return -EINVAL; 747 ret = -EINVAL;
748 goto dump_ib;
747 } 749 }
748 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { 750 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
749 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", 751 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
750 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); 752 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
751 return -EINVAL; 753 ret = -EINVAL;
754 goto dump_ib;
752 } 755 }
753 return 0; 756 return 0;
757
758dump_ib:
759 for (i = 0; i < ib_chunk->length_dw; i++) {
760 if (i == idx)
761 printk("\t0x%08x <---\n", radeon_get_ib_value(p, i));
762 else
763 printk("\t0x%08x\n", radeon_get_ib_value(p, i));
764 }
765 return ret;
754} 766}
755 767
756/** 768/**
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 6b670b0bc47b..3a297037cc17 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -179,9 +179,12 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
179 (rdev->pdev->subsystem_vendor == 0x1734) && 179 (rdev->pdev->subsystem_vendor == 0x1734) &&
180 (rdev->pdev->subsystem_device == 0x1107)) 180 (rdev->pdev->subsystem_device == 0x1107))
181 use_bl = false; 181 use_bl = false;
182/* Older PPC macs use on-GPU backlight controller */
183#ifndef CONFIG_PPC_PMAC
182 /* disable native backlight control on older asics */ 184 /* disable native backlight control on older asics */
183 else if (rdev->family < CHIP_R600) 185 else if (rdev->family < CHIP_R600)
184 use_bl = false; 186 use_bl = false;
187#endif
185 else 188 else
186 use_bl = true; 189 use_bl = true;
187 } 190 }
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 9f758d39420d..33cf4108386d 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -852,6 +852,12 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
852 single_display = false; 852 single_display = false;
853 } 853 }
854 854
855 /* 120hz tends to be problematic even if they are under the
856 * vblank limit.
857 */
858 if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
859 single_display = false;
860
855 /* certain older asics have a separare 3D performance state, 861 /* certain older asics have a separare 3D performance state,
856 * so try that first if the user selected performance 862 * so try that first if the user selected performance
857 */ 863 */
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 73107fe9e46f..bcf516a8a2f1 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3162,6 +3162,8 @@ static void si_gpu_init(struct radeon_device *rdev)
3162 } 3162 }
3163 3163
3164 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 3164 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
3165 WREG32(SRBM_INT_CNTL, 1);
3166 WREG32(SRBM_INT_ACK, 1);
3165 3167
3166 evergreen_fix_pci_max_read_req_size(rdev); 3168 evergreen_fix_pci_max_read_req_size(rdev);
3167 3169
@@ -4699,12 +4701,6 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
4699 switch (pkt.type) { 4701 switch (pkt.type) {
4700 case RADEON_PACKET_TYPE0: 4702 case RADEON_PACKET_TYPE0:
4701 dev_err(rdev->dev, "Packet0 not allowed!\n"); 4703 dev_err(rdev->dev, "Packet0 not allowed!\n");
4702 for (i = 0; i < ib->length_dw; i++) {
4703 if (i == idx)
4704 printk("\t0x%08x <---\n", ib->ptr[i]);
4705 else
4706 printk("\t0x%08x\n", ib->ptr[i]);
4707 }
4708 ret = -EINVAL; 4704 ret = -EINVAL;
4709 break; 4705 break;
4710 case RADEON_PACKET_TYPE2: 4706 case RADEON_PACKET_TYPE2:
@@ -4736,8 +4732,15 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
4736 ret = -EINVAL; 4732 ret = -EINVAL;
4737 break; 4733 break;
4738 } 4734 }
4739 if (ret) 4735 if (ret) {
4736 for (i = 0; i < ib->length_dw; i++) {
4737 if (i == idx)
4738 printk("\t0x%08x <---\n", ib->ptr[i]);
4739 else
4740 printk("\t0x%08x\n", ib->ptr[i]);
4741 }
4740 break; 4742 break;
4743 }
4741 } while (idx < ib->length_dw); 4744 } while (idx < ib->length_dw);
4742 4745
4743 return ret; 4746 return ret;
@@ -5910,6 +5913,7 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
5910 tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; 5913 tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
5911 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp); 5914 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
5912 WREG32(GRBM_INT_CNTL, 0); 5915 WREG32(GRBM_INT_CNTL, 0);
5916 WREG32(SRBM_INT_CNTL, 0);
5913 if (rdev->num_crtc >= 2) { 5917 if (rdev->num_crtc >= 2) {
5914 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 5918 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
5915 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 5919 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -6609,6 +6613,10 @@ restart_ih:
6609 break; 6613 break;
6610 } 6614 }
6611 break; 6615 break;
6616 case 96:
6617 DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
6618 WREG32(SRBM_INT_ACK, 0x1);
6619 break;
6612 case 124: /* UVD */ 6620 case 124: /* UVD */
6613 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); 6621 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
6614 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); 6622 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index cbd91d226f3c..c27118cab16a 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -358,6 +358,10 @@
358#define CC_SYS_RB_BACKEND_DISABLE 0xe80 358#define CC_SYS_RB_BACKEND_DISABLE 0xe80
359#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84 359#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84
360 360
361#define SRBM_READ_ERROR 0xE98
362#define SRBM_INT_CNTL 0xEA0
363#define SRBM_INT_ACK 0xEA8
364
361#define SRBM_STATUS2 0x0EC4 365#define SRBM_STATUS2 0x0EC4
362#define DMA_BUSY (1 << 5) 366#define DMA_BUSY (1 << 5)
363#define DMA1_BUSY (1 << 6) 367#define DMA1_BUSY (1 << 6)
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 3aaa84ae2681..1a52522f5da7 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -997,8 +997,10 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
997 crtc->state = NULL; 997 crtc->state = NULL;
998 998
999 state = kzalloc(sizeof(*state), GFP_KERNEL); 999 state = kzalloc(sizeof(*state), GFP_KERNEL);
1000 if (state) 1000 if (state) {
1001 crtc->state = &state->base; 1001 crtc->state = &state->base;
1002 crtc->state->crtc = crtc;
1003 }
1002} 1004}
1003 1005
1004static struct drm_crtc_state * 1006static struct drm_crtc_state *
@@ -1012,6 +1014,7 @@ tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
1012 return NULL; 1014 return NULL;
1013 1015
1014 copy->base.mode_changed = false; 1016 copy->base.mode_changed = false;
1017 copy->base.active_changed = false;
1015 copy->base.planes_changed = false; 1018 copy->base.planes_changed = false;
1016 copy->base.event = NULL; 1019 copy->base.event = NULL;
1017 1020
@@ -1227,9 +1230,6 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
1227 /* program display mode */ 1230 /* program display mode */
1228 tegra_dc_set_timings(dc, mode); 1231 tegra_dc_set_timings(dc, mode);
1229 1232
1230 if (dc->soc->supports_border_color)
1231 tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
1232
1233 /* interlacing isn't supported yet, so disable it */ 1233 /* interlacing isn't supported yet, so disable it */
1234 if (dc->soc->supports_interlacing) { 1234 if (dc->soc->supports_interlacing) {
1235 value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL); 1235 value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL);
@@ -1252,42 +1252,7 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
1252 1252
1253static void tegra_crtc_prepare(struct drm_crtc *crtc) 1253static void tegra_crtc_prepare(struct drm_crtc *crtc)
1254{ 1254{
1255 struct tegra_dc *dc = to_tegra_dc(crtc);
1256 unsigned int syncpt;
1257 unsigned long value;
1258
1259 drm_crtc_vblank_off(crtc); 1255 drm_crtc_vblank_off(crtc);
1260
1261 if (dc->pipe)
1262 syncpt = SYNCPT_VBLANK1;
1263 else
1264 syncpt = SYNCPT_VBLANK0;
1265
1266 /* initialize display controller */
1267 tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
1268 tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
1269
1270 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
1271 tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
1272
1273 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
1274 WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
1275 tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
1276
1277 /* initialize timer */
1278 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
1279 WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
1280 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
1281
1282 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
1283 WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
1284 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
1285
1286 value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
1287 tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
1288
1289 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
1290 tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
1291} 1256}
1292 1257
1293static void tegra_crtc_commit(struct drm_crtc *crtc) 1258static void tegra_crtc_commit(struct drm_crtc *crtc)
@@ -1664,6 +1629,8 @@ static int tegra_dc_init(struct host1x_client *client)
1664 struct tegra_drm *tegra = drm->dev_private; 1629 struct tegra_drm *tegra = drm->dev_private;
1665 struct drm_plane *primary = NULL; 1630 struct drm_plane *primary = NULL;
1666 struct drm_plane *cursor = NULL; 1631 struct drm_plane *cursor = NULL;
1632 unsigned int syncpt;
1633 u32 value;
1667 int err; 1634 int err;
1668 1635
1669 if (tegra->domain) { 1636 if (tegra->domain) {
@@ -1730,6 +1697,40 @@ static int tegra_dc_init(struct host1x_client *client)
1730 goto cleanup; 1697 goto cleanup;
1731 } 1698 }
1732 1699
1700 /* initialize display controller */
1701 if (dc->pipe)
1702 syncpt = SYNCPT_VBLANK1;
1703 else
1704 syncpt = SYNCPT_VBLANK0;
1705
1706 tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
1707 tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
1708
1709 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
1710 tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
1711
1712 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
1713 WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
1714 tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
1715
1716 /* initialize timer */
1717 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
1718 WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
1719 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
1720
1721 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
1722 WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
1723 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
1724
1725 value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
1726 tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
1727
1728 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
1729 tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
1730
1731 if (dc->soc->supports_border_color)
1732 tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
1733
1733 return 0; 1734 return 0;
1734 1735
1735cleanup: 1736cleanup:
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 7e06657ae58b..7eaaee74a039 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -851,6 +851,14 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
851 h_back_porch = mode->htotal - mode->hsync_end; 851 h_back_porch = mode->htotal - mode->hsync_end;
852 h_front_porch = mode->hsync_start - mode->hdisplay; 852 h_front_porch = mode->hsync_start - mode->hdisplay;
853 853
854 err = clk_set_rate(hdmi->clk, pclk);
855 if (err < 0) {
856 dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n",
857 err);
858 }
859
860 DRM_DEBUG_KMS("HDMI clock rate: %lu Hz\n", clk_get_rate(hdmi->clk));
861
854 /* power up sequence */ 862 /* power up sequence */
855 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0); 863 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0);
856 value &= ~SOR_PLL_PDBG; 864 value &= ~SOR_PLL_PDBG;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index db4fb6e1cc5b..7c669c328c4c 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1872,6 +1872,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1872 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, 1872 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
1873 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) }, 1873 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
1874 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) }, 1874 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) },
1875 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K) },
1875 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) }, 1876 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
1876 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) }, 1877 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
1877 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) }, 1878 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
@@ -1926,6 +1927,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1926#endif 1927#endif
1927#if IS_ENABLED(CONFIG_HID_SAITEK) 1928#if IS_ENABLED(CONFIG_HID_SAITEK)
1928 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) }, 1929 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
1930 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD) },
1929 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) }, 1931 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) },
1930 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) }, 1932 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
1931 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) }, 1933 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 46edb4d3ed28..204312bfab2c 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -654,6 +654,7 @@
654#define USB_DEVICE_ID_MS_LK6K 0x00f9 654#define USB_DEVICE_ID_MS_LK6K 0x00f9
655#define USB_DEVICE_ID_MS_PRESENTER_8K_BT 0x0701 655#define USB_DEVICE_ID_MS_PRESENTER_8K_BT 0x0701
656#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713 656#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713
657#define USB_DEVICE_ID_MS_NE7K 0x071d
657#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730 658#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730
658#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c 659#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c
659#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799 660#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799
@@ -802,6 +803,7 @@
802#define USB_VENDOR_ID_SAITEK 0x06a3 803#define USB_VENDOR_ID_SAITEK 0x06a3
803#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 804#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
804#define USB_DEVICE_ID_SAITEK_PS1000 0x0621 805#define USB_DEVICE_ID_SAITEK_PS1000 0x0621
806#define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb
805#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7 807#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
806#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 808#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
807 809
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index fbaea6eb882e..af935eb198c9 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -264,6 +264,8 @@ static const struct hid_device_id ms_devices[] = {
264 .driver_data = MS_ERGONOMY }, 264 .driver_data = MS_ERGONOMY },
265 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP), 265 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP),
266 .driver_data = MS_ERGONOMY }, 266 .driver_data = MS_ERGONOMY },
267 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K),
268 .driver_data = MS_ERGONOMY },
267 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K), 269 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K),
268 .driver_data = MS_ERGONOMY | MS_RDESC }, 270 .driver_data = MS_ERGONOMY | MS_RDESC },
269 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB), 271 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB),
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
index 5632c54eadf0..a014f21275d8 100644
--- a/drivers/hid/hid-saitek.c
+++ b/drivers/hid/hid-saitek.c
@@ -177,6 +177,8 @@ static int saitek_event(struct hid_device *hdev, struct hid_field *field,
177static const struct hid_device_id saitek_devices[] = { 177static const struct hid_device_id saitek_devices[] = {
178 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000), 178 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000),
179 .driver_data = SAITEK_FIX_PS1000 }, 179 .driver_data = SAITEK_FIX_PS1000 },
180 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD),
181 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
180 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7), 182 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
181 .driver_data = SAITEK_RELEASE_MODE_RAT7 }, 183 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
182 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9), 184 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 6a58b6c723aa..e54ce1097e2c 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -135,8 +135,9 @@ static struct hid_sensor_hub_callbacks *sensor_hub_get_callback(
135{ 135{
136 struct hid_sensor_hub_callbacks_list *callback; 136 struct hid_sensor_hub_callbacks_list *callback;
137 struct sensor_hub_data *pdata = hid_get_drvdata(hdev); 137 struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
138 unsigned long flags;
138 139
139 spin_lock(&pdata->dyn_callback_lock); 140 spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
140 list_for_each_entry(callback, &pdata->dyn_callback_list, list) 141 list_for_each_entry(callback, &pdata->dyn_callback_list, list)
141 if (callback->usage_id == usage_id && 142 if (callback->usage_id == usage_id &&
142 (collection_index >= 143 (collection_index >=
@@ -145,10 +146,11 @@ static struct hid_sensor_hub_callbacks *sensor_hub_get_callback(
145 callback->hsdev->end_collection_index)) { 146 callback->hsdev->end_collection_index)) {
146 *priv = callback->priv; 147 *priv = callback->priv;
147 *hsdev = callback->hsdev; 148 *hsdev = callback->hsdev;
148 spin_unlock(&pdata->dyn_callback_lock); 149 spin_unlock_irqrestore(&pdata->dyn_callback_lock,
150 flags);
149 return callback->usage_callback; 151 return callback->usage_callback;
150 } 152 }
151 spin_unlock(&pdata->dyn_callback_lock); 153 spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
152 154
153 return NULL; 155 return NULL;
154} 156}
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 31e9d2561106..1896c019e302 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -804,7 +804,7 @@ union sixaxis_output_report_01 {
804#define DS4_REPORT_0x81_SIZE 7 804#define DS4_REPORT_0x81_SIZE 7
805#define SIXAXIS_REPORT_0xF2_SIZE 18 805#define SIXAXIS_REPORT_0xF2_SIZE 18
806 806
807static spinlock_t sony_dev_list_lock; 807static DEFINE_SPINLOCK(sony_dev_list_lock);
808static LIST_HEAD(sony_device_list); 808static LIST_HEAD(sony_device_list);
809static DEFINE_IDA(sony_device_id_allocator); 809static DEFINE_IDA(sony_device_id_allocator);
810 810
@@ -1944,6 +1944,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
1944 return -ENOMEM; 1944 return -ENOMEM;
1945 } 1945 }
1946 1946
1947 spin_lock_init(&sc->lock);
1948
1947 sc->quirks = quirks; 1949 sc->quirks = quirks;
1948 hid_set_drvdata(hdev, sc); 1950 hid_set_drvdata(hdev, sc);
1949 sc->hdev = hdev; 1951 sc->hdev = hdev;
@@ -2147,8 +2149,8 @@ static void __exit sony_exit(void)
2147{ 2149{
2148 dbg_hid("Sony:%s\n", __func__); 2150 dbg_hid("Sony:%s\n", __func__);
2149 2151
2150 ida_destroy(&sony_device_id_allocator);
2151 hid_unregister_driver(&sony_driver); 2152 hid_unregister_driver(&sony_driver);
2153 ida_destroy(&sony_device_id_allocator);
2152} 2154}
2153module_init(sony_init); 2155module_init(sony_init);
2154module_exit(sony_exit); 2156module_exit(sony_exit);
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index d43e967e7533..36053f33d6d9 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -370,7 +370,10 @@ static int i2c_hid_hwreset(struct i2c_client *client)
370static void i2c_hid_get_input(struct i2c_hid *ihid) 370static void i2c_hid_get_input(struct i2c_hid *ihid)
371{ 371{
372 int ret, ret_size; 372 int ret, ret_size;
373 int size = ihid->bufsize; 373 int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
374
375 if (size > ihid->bufsize)
376 size = ihid->bufsize;
374 377
375 ret = i2c_master_recv(ihid->client, ihid->inbuf, size); 378 ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
376 if (ret != size) { 379 if (ret != size) {
@@ -785,7 +788,7 @@ static int i2c_hid_init_irq(struct i2c_client *client)
785 dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq); 788 dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
786 789
787 ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq, 790 ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
788 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 791 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
789 client->name, ihid); 792 client->name, ihid);
790 if (ret < 0) { 793 if (ret < 0) {
791 dev_warn(&client->dev, 794 dev_warn(&client->dev,
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 1a6507999a65..046351cf17f3 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -778,6 +778,11 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
778 input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[4])); 778 input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[4]));
779 input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[6])); 779 input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[6]));
780 input_report_abs(input, ABS_Z, be16_to_cpup((__be16 *)&data[8])); 780 input_report_abs(input, ABS_Z, be16_to_cpup((__be16 *)&data[8]));
781 if ((data[2] & 0x07) | data[4] | data[5] | data[6] | data[7] | data[8] | data[9]) {
782 input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
783 } else {
784 input_report_abs(input, ABS_MISC, 0);
785 }
781 } else if (features->type == CINTIQ_HYBRID) { 786 } else if (features->type == CINTIQ_HYBRID) {
782 /* 787 /*
783 * Do not send hardware buttons under Android. They 788 * Do not send hardware buttons under Android. They
@@ -2725,9 +2730,9 @@ static const struct wacom_features wacom_features_0xF6 =
2725 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10, 2730 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10,
2726 .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; 2731 .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
2727static const struct wacom_features wacom_features_0x32A = 2732static const struct wacom_features wacom_features_0x32A =
2728 { "Wacom Cintiq 27QHD", 119740, 67520, 2047, 2733 { "Wacom Cintiq 27QHD", 119740, 67520, 2047, 63,
2729 63, WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 2734 WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
2730 WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 2735 WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
2731static const struct wacom_features wacom_features_0x32B = 2736static const struct wacom_features wacom_features_0x32B =
2732 { "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63, 2737 { "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63,
2733 WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 2738 WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index d931cbbed240..110fade9cb74 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1606,7 +1606,7 @@ config SENSORS_W83795
1606 will be called w83795. 1606 will be called w83795.
1607 1607
1608config SENSORS_W83795_FANCTRL 1608config SENSORS_W83795_FANCTRL
1609 boolean "Include automatic fan control support (DANGEROUS)" 1609 bool "Include automatic fan control support (DANGEROUS)"
1610 depends on SENSORS_W83795 1610 depends on SENSORS_W83795
1611 default n 1611 default n
1612 help 1612 help
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index bce4e9ff21bf..6c99ee7bafa3 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -147,6 +147,9 @@ static int ads7828_probe(struct i2c_client *client,
147 &ads2830_regmap_config); 147 &ads2830_regmap_config);
148 } 148 }
149 149
150 if (IS_ERR(data->regmap))
151 return PTR_ERR(data->regmap);
152
150 data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3; 153 data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3;
151 if (!diff_input) 154 if (!diff_input)
152 data->cmd_byte |= ADS7828_CMD_SD_SE; 155 data->cmd_byte |= ADS7828_CMD_SD_SE;
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index a674cd83a4e2..9f7dbd189c97 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -57,7 +57,7 @@ config SENSORS_LTC2978
57 be called ltc2978. 57 be called ltc2978.
58 58
59config SENSORS_LTC2978_REGULATOR 59config SENSORS_LTC2978_REGULATOR
60 boolean "Regulator support for LTC2978 and compatibles" 60 bool "Regulator support for LTC2978 and compatibles"
61 depends on SENSORS_LTC2978 && REGULATOR 61 depends on SENSORS_LTC2978 && REGULATOR
62 help 62 help
63 If you say yes here you get regulator support for Linear 63 If you say yes here you get regulator support for Linear
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 8c9e619f3026..78fbee463628 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -35,11 +35,11 @@ config ACPI_I2C_OPREGION
35if I2C 35if I2C
36 36
37config I2C_BOARDINFO 37config I2C_BOARDINFO
38 boolean 38 bool
39 default y 39 default y
40 40
41config I2C_COMPAT 41config I2C_COMPAT
42 boolean "Enable compatibility bits for old user-space" 42 bool "Enable compatibility bits for old user-space"
43 default y 43 default y
44 help 44 help
45 Say Y here if you intend to run lm-sensors 3.1.1 or older, or any 45 Say Y here if you intend to run lm-sensors 3.1.1 or older, or any
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index ab838d9e28b6..22da9c2ffa22 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -79,7 +79,7 @@ config I2C_AMD8111
79 79
80config I2C_HIX5HD2 80config I2C_HIX5HD2
81 tristate "Hix5hd2 high-speed I2C driver" 81 tristate "Hix5hd2 high-speed I2C driver"
82 depends on ARCH_HIX5HD2 82 depends on ARCH_HIX5HD2 || COMPILE_TEST
83 help 83 help
84 Say Y here to include support for high-speed I2C controller in the 84 Say Y here to include support for high-speed I2C controller in the
85 Hisilicon based hix5hd2 SoCs. 85 Hisilicon based hix5hd2 SoCs.
@@ -372,6 +372,16 @@ config I2C_BCM2835
372 This support is also available as a module. If so, the module 372 This support is also available as a module. If so, the module
373 will be called i2c-bcm2835. 373 will be called i2c-bcm2835.
374 374
375config I2C_BCM_IPROC
376 tristate "Broadcom iProc I2C controller"
377 depends on ARCH_BCM_IPROC || COMPILE_TEST
378 default ARCH_BCM_IPROC
379 help
380 If you say yes to this option, support will be included for the
381 Broadcom iProc I2C controller.
382
383 If you don't know what to do here, say N.
384
375config I2C_BCM_KONA 385config I2C_BCM_KONA
376 tristate "BCM Kona I2C adapter" 386 tristate "BCM Kona I2C adapter"
377 depends on ARCH_BCM_MOBILE 387 depends on ARCH_BCM_MOBILE
@@ -465,6 +475,16 @@ config I2C_DESIGNWARE_PCI
465 This driver can also be built as a module. If so, the module 475 This driver can also be built as a module. If so, the module
466 will be called i2c-designware-pci. 476 will be called i2c-designware-pci.
467 477
478config I2C_DESIGNWARE_BAYTRAIL
479 bool "Intel Baytrail I2C semaphore support"
480 depends on I2C_DESIGNWARE_PLATFORM && IOSF_MBI=y && ACPI
481 help
482 This driver enables managed host access to the PMIC I2C bus on select
483 Intel BayTrail platforms using the X-Powers AXP288 PMIC. It allows
484 the host to request uninterrupted access to the PMIC's I2C bus from
485 the platform firmware controlling it. You should say Y if running on
486 a BayTrail system using the AXP288.
487
468config I2C_EFM32 488config I2C_EFM32
469 tristate "EFM32 I2C controller" 489 tristate "EFM32 I2C controller"
470 depends on ARCH_EFM32 || COMPILE_TEST 490 depends on ARCH_EFM32 || COMPILE_TEST
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 56388f658d2f..3638feb6677e 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_I2C_AT91) += i2c-at91.o
33obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o 33obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
34obj-$(CONFIG_I2C_AXXIA) += i2c-axxia.o 34obj-$(CONFIG_I2C_AXXIA) += i2c-axxia.o
35obj-$(CONFIG_I2C_BCM2835) += i2c-bcm2835.o 35obj-$(CONFIG_I2C_BCM2835) += i2c-bcm2835.o
36obj-$(CONFIG_I2C_BCM_IPROC) += i2c-bcm-iproc.o
36obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o 37obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
37obj-$(CONFIG_I2C_CADENCE) += i2c-cadence.o 38obj-$(CONFIG_I2C_CADENCE) += i2c-cadence.o
38obj-$(CONFIG_I2C_CBUS_GPIO) += i2c-cbus-gpio.o 39obj-$(CONFIG_I2C_CBUS_GPIO) += i2c-cbus-gpio.o
@@ -41,6 +42,7 @@ obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o
41obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o 42obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o
42obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o 43obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o
43i2c-designware-platform-objs := i2c-designware-platdrv.o 44i2c-designware-platform-objs := i2c-designware-platdrv.o
45i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-baytrail.o
44obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o 46obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o
45i2c-designware-pci-objs := i2c-designware-pcidrv.o 47i2c-designware-pci-objs := i2c-designware-pcidrv.o
46obj-$(CONFIG_I2C_EFM32) += i2c-efm32.o 48obj-$(CONFIG_I2C_EFM32) += i2c-efm32.o
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
new file mode 100644
index 000000000000..d3c89157b337
--- /dev/null
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -0,0 +1,461 @@
1/*
2 * Copyright (C) 2014 Broadcom Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation version 2.
7 *
8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
9 * kind, whether express or implied; without even the implied warranty
10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/delay.h>
15#include <linux/i2c.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/platform_device.h>
21#include <linux/slab.h>
22
23#define CFG_OFFSET 0x00
24#define CFG_RESET_SHIFT 31
25#define CFG_EN_SHIFT 30
26#define CFG_M_RETRY_CNT_SHIFT 16
27#define CFG_M_RETRY_CNT_MASK 0x0f
28
29#define TIM_CFG_OFFSET 0x04
30#define TIM_CFG_MODE_400_SHIFT 31
31
32#define M_FIFO_CTRL_OFFSET 0x0c
33#define M_FIFO_RX_FLUSH_SHIFT 31
34#define M_FIFO_TX_FLUSH_SHIFT 30
35#define M_FIFO_RX_CNT_SHIFT 16
36#define M_FIFO_RX_CNT_MASK 0x7f
37#define M_FIFO_RX_THLD_SHIFT 8
38#define M_FIFO_RX_THLD_MASK 0x3f
39
40#define M_CMD_OFFSET 0x30
41#define M_CMD_START_BUSY_SHIFT 31
42#define M_CMD_STATUS_SHIFT 25
43#define M_CMD_STATUS_MASK 0x07
44#define M_CMD_STATUS_SUCCESS 0x0
45#define M_CMD_STATUS_LOST_ARB 0x1
46#define M_CMD_STATUS_NACK_ADDR 0x2
47#define M_CMD_STATUS_NACK_DATA 0x3
48#define M_CMD_STATUS_TIMEOUT 0x4
49#define M_CMD_PROTOCOL_SHIFT 9
50#define M_CMD_PROTOCOL_MASK 0xf
51#define M_CMD_PROTOCOL_BLK_WR 0x7
52#define M_CMD_PROTOCOL_BLK_RD 0x8
53#define M_CMD_PEC_SHIFT 8
54#define M_CMD_RD_CNT_SHIFT 0
55#define M_CMD_RD_CNT_MASK 0xff
56
57#define IE_OFFSET 0x38
58#define IE_M_RX_FIFO_FULL_SHIFT 31
59#define IE_M_RX_THLD_SHIFT 30
60#define IE_M_START_BUSY_SHIFT 28
61
62#define IS_OFFSET 0x3c
63#define IS_M_RX_FIFO_FULL_SHIFT 31
64#define IS_M_RX_THLD_SHIFT 30
65#define IS_M_START_BUSY_SHIFT 28
66
67#define M_TX_OFFSET 0x40
68#define M_TX_WR_STATUS_SHIFT 31
69#define M_TX_DATA_SHIFT 0
70#define M_TX_DATA_MASK 0xff
71
72#define M_RX_OFFSET 0x44
73#define M_RX_STATUS_SHIFT 30
74#define M_RX_STATUS_MASK 0x03
75#define M_RX_PEC_ERR_SHIFT 29
76#define M_RX_DATA_SHIFT 0
77#define M_RX_DATA_MASK 0xff
78
79#define I2C_TIMEOUT_MESC 100
80#define M_TX_RX_FIFO_SIZE 64
81
82enum bus_speed_index {
83 I2C_SPD_100K = 0,
84 I2C_SPD_400K,
85};
86
87struct bcm_iproc_i2c_dev {
88 struct device *device;
89 int irq;
90
91 void __iomem *base;
92
93 struct i2c_adapter adapter;
94
95 struct completion done;
96 int xfer_is_done;
97};
98
99/*
100 * Can be expanded in the future if more interrupt status bits are utilized
101 */
102#define ISR_MASK (1 << IS_M_START_BUSY_SHIFT)
103
104static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
105{
106 struct bcm_iproc_i2c_dev *iproc_i2c = data;
107 u32 status = readl(iproc_i2c->base + IS_OFFSET);
108
109 status &= ISR_MASK;
110
111 if (!status)
112 return IRQ_NONE;
113
114 writel(status, iproc_i2c->base + IS_OFFSET);
115 iproc_i2c->xfer_is_done = 1;
116 complete_all(&iproc_i2c->done);
117
118 return IRQ_HANDLED;
119}
120
121static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
122 struct i2c_msg *msg)
123{
124 u32 val;
125
126 val = readl(iproc_i2c->base + M_CMD_OFFSET);
127 val = (val >> M_CMD_STATUS_SHIFT) & M_CMD_STATUS_MASK;
128
129 switch (val) {
130 case M_CMD_STATUS_SUCCESS:
131 return 0;
132
133 case M_CMD_STATUS_LOST_ARB:
134 dev_dbg(iproc_i2c->device, "lost bus arbitration\n");
135 return -EAGAIN;
136
137 case M_CMD_STATUS_NACK_ADDR:
138 dev_dbg(iproc_i2c->device, "NAK addr:0x%02x\n", msg->addr);
139 return -ENXIO;
140
141 case M_CMD_STATUS_NACK_DATA:
142 dev_dbg(iproc_i2c->device, "NAK data\n");
143 return -ENXIO;
144
145 case M_CMD_STATUS_TIMEOUT:
146 dev_dbg(iproc_i2c->device, "bus timeout\n");
147 return -ETIMEDOUT;
148
149 default:
150 dev_dbg(iproc_i2c->device, "unknown error code=%d\n", val);
151 return -EIO;
152 }
153}
154
155static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
156 struct i2c_msg *msg)
157{
158 int ret, i;
159 u8 addr;
160 u32 val;
161 unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT_MESC);
162
163 /* need to reserve one byte in the FIFO for the slave address */
164 if (msg->len > M_TX_RX_FIFO_SIZE - 1) {
165 dev_err(iproc_i2c->device,
166 "only support data length up to %u bytes\n",
167 M_TX_RX_FIFO_SIZE - 1);
168 return -EOPNOTSUPP;
169 }
170
171 /* check if bus is busy */
172 if (!!(readl(iproc_i2c->base + M_CMD_OFFSET) &
173 BIT(M_CMD_START_BUSY_SHIFT))) {
174 dev_warn(iproc_i2c->device, "bus is busy\n");
175 return -EBUSY;
176 }
177
178 /* format and load slave address into the TX FIFO */
179 addr = msg->addr << 1 | (msg->flags & I2C_M_RD ? 1 : 0);
180 writel(addr, iproc_i2c->base + M_TX_OFFSET);
181
182 /* for a write transaction, load data into the TX FIFO */
183 if (!(msg->flags & I2C_M_RD)) {
184 for (i = 0; i < msg->len; i++) {
185 val = msg->buf[i];
186
187 /* mark the last byte */
188 if (i == msg->len - 1)
189 val |= 1 << M_TX_WR_STATUS_SHIFT;
190
191 writel(val, iproc_i2c->base + M_TX_OFFSET);
192 }
193 }
194
195 /* mark as incomplete before starting the transaction */
196 reinit_completion(&iproc_i2c->done);
197 iproc_i2c->xfer_is_done = 0;
198
199 /*
200 * Enable the "start busy" interrupt, which will be triggered after the
201 * transaction is done, i.e., the internal start_busy bit, transitions
202 * from 1 to 0.
203 */
204 writel(1 << IE_M_START_BUSY_SHIFT, iproc_i2c->base + IE_OFFSET);
205
206 /*
207 * Now we can activate the transfer. For a read operation, specify the
208 * number of bytes to read
209 */
210 val = 1 << M_CMD_START_BUSY_SHIFT;
211 if (msg->flags & I2C_M_RD) {
212 val |= (M_CMD_PROTOCOL_BLK_RD << M_CMD_PROTOCOL_SHIFT) |
213 (msg->len << M_CMD_RD_CNT_SHIFT);
214 } else {
215 val |= (M_CMD_PROTOCOL_BLK_WR << M_CMD_PROTOCOL_SHIFT);
216 }
217 writel(val, iproc_i2c->base + M_CMD_OFFSET);
218
219 time_left = wait_for_completion_timeout(&iproc_i2c->done, time_left);
220
221 /* disable all interrupts */
222 writel(0, iproc_i2c->base + IE_OFFSET);
223 /* read it back to flush the write */
224 readl(iproc_i2c->base + IE_OFFSET);
225
226 /* make sure the interrupt handler isn't running */
227 synchronize_irq(iproc_i2c->irq);
228
229 if (!time_left && !iproc_i2c->xfer_is_done) {
230 dev_err(iproc_i2c->device, "transaction timed out\n");
231
232 /* flush FIFOs */
233 val = (1 << M_FIFO_RX_FLUSH_SHIFT) |
234 (1 << M_FIFO_TX_FLUSH_SHIFT);
235 writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
236 return -ETIMEDOUT;
237 }
238
239 ret = bcm_iproc_i2c_check_status(iproc_i2c, msg);
240 if (ret) {
241 /* flush both TX/RX FIFOs */
242 val = (1 << M_FIFO_RX_FLUSH_SHIFT) |
243 (1 << M_FIFO_TX_FLUSH_SHIFT);
244 writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
245 return ret;
246 }
247
248 /*
249 * For a read operation, we now need to load the data from FIFO
250 * into the memory buffer
251 */
252 if (msg->flags & I2C_M_RD) {
253 for (i = 0; i < msg->len; i++) {
254 msg->buf[i] = (readl(iproc_i2c->base + M_RX_OFFSET) >>
255 M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
256 }
257 }
258
259 return 0;
260}
261
262static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
263 struct i2c_msg msgs[], int num)
264{
265 struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(adapter);
266 int ret, i;
267
268 /* go through all messages */
269 for (i = 0; i < num; i++) {
270 ret = bcm_iproc_i2c_xfer_single_msg(iproc_i2c, &msgs[i]);
271 if (ret) {
272 dev_dbg(iproc_i2c->device, "xfer failed\n");
273 return ret;
274 }
275 }
276
277 return num;
278}
279
280static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
281{
282 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
283}
284
285static const struct i2c_algorithm bcm_iproc_algo = {
286 .master_xfer = bcm_iproc_i2c_xfer,
287 .functionality = bcm_iproc_i2c_functionality,
288};
289
290static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
291{
292 unsigned int bus_speed;
293 u32 val;
294 int ret = of_property_read_u32(iproc_i2c->device->of_node,
295 "clock-frequency", &bus_speed);
296 if (ret < 0) {
297 dev_info(iproc_i2c->device,
298 "unable to interpret clock-frequency DT property\n");
299 bus_speed = 100000;
300 }
301
302 if (bus_speed < 100000) {
303 dev_err(iproc_i2c->device, "%d Hz bus speed not supported\n",
304 bus_speed);
305 dev_err(iproc_i2c->device,
306 "valid speeds are 100khz and 400khz\n");
307 return -EINVAL;
308 } else if (bus_speed < 400000) {
309 bus_speed = 100000;
310 } else {
311 bus_speed = 400000;
312 }
313
314 val = readl(iproc_i2c->base + TIM_CFG_OFFSET);
315 val &= ~(1 << TIM_CFG_MODE_400_SHIFT);
316 val |= (bus_speed == 400000) << TIM_CFG_MODE_400_SHIFT;
317 writel(val, iproc_i2c->base + TIM_CFG_OFFSET);
318
319 dev_info(iproc_i2c->device, "bus set to %u Hz\n", bus_speed);
320
321 return 0;
322}
323
324static int bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
325{
326 u32 val;
327
328 /* put controller in reset */
329 val = readl(iproc_i2c->base + CFG_OFFSET);
330 val |= 1 << CFG_RESET_SHIFT;
331 val &= ~(1 << CFG_EN_SHIFT);
332 writel(val, iproc_i2c->base + CFG_OFFSET);
333
334 /* wait 100 usec per spec */
335 udelay(100);
336
337 /* bring controller out of reset */
338 val &= ~(1 << CFG_RESET_SHIFT);
339 writel(val, iproc_i2c->base + CFG_OFFSET);
340
341 /* flush TX/RX FIFOs and set RX FIFO threshold to zero */
342 val = (1 << M_FIFO_RX_FLUSH_SHIFT) | (1 << M_FIFO_TX_FLUSH_SHIFT);
343 writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
344
345 /* disable all interrupts */
346 writel(0, iproc_i2c->base + IE_OFFSET);
347
348 /* clear all pending interrupts */
349 writel(0xffffffff, iproc_i2c->base + IS_OFFSET);
350
351 return 0;
352}
353
354static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
355 bool enable)
356{
357 u32 val;
358
359 val = readl(iproc_i2c->base + CFG_OFFSET);
360 if (enable)
361 val |= BIT(CFG_EN_SHIFT);
362 else
363 val &= ~BIT(CFG_EN_SHIFT);
364 writel(val, iproc_i2c->base + CFG_OFFSET);
365}
366
367static int bcm_iproc_i2c_probe(struct platform_device *pdev)
368{
369 int irq, ret = 0;
370 struct bcm_iproc_i2c_dev *iproc_i2c;
371 struct i2c_adapter *adap;
372 struct resource *res;
373
374 iproc_i2c = devm_kzalloc(&pdev->dev, sizeof(*iproc_i2c),
375 GFP_KERNEL);
376 if (!iproc_i2c)
377 return -ENOMEM;
378
379 platform_set_drvdata(pdev, iproc_i2c);
380 iproc_i2c->device = &pdev->dev;
381 init_completion(&iproc_i2c->done);
382
383 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
384 iproc_i2c->base = devm_ioremap_resource(iproc_i2c->device, res);
385 if (IS_ERR(iproc_i2c->base))
386 return PTR_ERR(iproc_i2c->base);
387
388 ret = bcm_iproc_i2c_init(iproc_i2c);
389 if (ret)
390 return ret;
391
392 ret = bcm_iproc_i2c_cfg_speed(iproc_i2c);
393 if (ret)
394 return ret;
395
396 irq = platform_get_irq(pdev, 0);
397 if (irq <= 0) {
398 dev_err(iproc_i2c->device, "no irq resource\n");
399 return irq;
400 }
401 iproc_i2c->irq = irq;
402
403 ret = devm_request_irq(iproc_i2c->device, irq, bcm_iproc_i2c_isr, 0,
404 pdev->name, iproc_i2c);
405 if (ret < 0) {
406 dev_err(iproc_i2c->device, "unable to request irq %i\n", irq);
407 return ret;
408 }
409
410 bcm_iproc_i2c_enable_disable(iproc_i2c, true);
411
412 adap = &iproc_i2c->adapter;
413 i2c_set_adapdata(adap, iproc_i2c);
414 strlcpy(adap->name, "Broadcom iProc I2C adapter", sizeof(adap->name));
415 adap->algo = &bcm_iproc_algo;
416 adap->dev.parent = &pdev->dev;
417 adap->dev.of_node = pdev->dev.of_node;
418
419 ret = i2c_add_adapter(adap);
420 if (ret) {
421 dev_err(iproc_i2c->device, "failed to add adapter\n");
422 return ret;
423 }
424
425 return 0;
426}
427
428static int bcm_iproc_i2c_remove(struct platform_device *pdev)
429{
430 struct bcm_iproc_i2c_dev *iproc_i2c = platform_get_drvdata(pdev);
431
432 /* make sure there's no pending interrupt when we remove the adapter */
433 writel(0, iproc_i2c->base + IE_OFFSET);
434 readl(iproc_i2c->base + IE_OFFSET);
435 synchronize_irq(iproc_i2c->irq);
436
437 i2c_del_adapter(&iproc_i2c->adapter);
438 bcm_iproc_i2c_enable_disable(iproc_i2c, false);
439
440 return 0;
441}
442
443static const struct of_device_id bcm_iproc_i2c_of_match[] = {
444 { .compatible = "brcm,iproc-i2c" },
445 { /* sentinel */ }
446};
447MODULE_DEVICE_TABLE(of, bcm_iproc_i2c_of_match);
448
449static struct platform_driver bcm_iproc_i2c_driver = {
450 .driver = {
451 .name = "bcm-iproc-i2c",
452 .of_match_table = bcm_iproc_i2c_of_match,
453 },
454 .probe = bcm_iproc_i2c_probe,
455 .remove = bcm_iproc_i2c_remove,
456};
457module_platform_driver(bcm_iproc_i2c_driver);
458
459MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
460MODULE_DESCRIPTION("Broadcom iProc I2C Driver");
461MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 626f74ecd4be..7d7a14cdadfb 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -128,6 +128,7 @@
128 * @suspended: Flag holding the device's PM status 128 * @suspended: Flag holding the device's PM status
129 * @send_count: Number of bytes still expected to send 129 * @send_count: Number of bytes still expected to send
130 * @recv_count: Number of bytes still expected to receive 130 * @recv_count: Number of bytes still expected to receive
131 * @curr_recv_count: Number of bytes to be received in current transfer
131 * @irq: IRQ number 132 * @irq: IRQ number
132 * @input_clk: Input clock to I2C controller 133 * @input_clk: Input clock to I2C controller
133 * @i2c_clk: Maximum I2C clock speed 134 * @i2c_clk: Maximum I2C clock speed
@@ -146,6 +147,7 @@ struct cdns_i2c {
146 u8 suspended; 147 u8 suspended;
147 unsigned int send_count; 148 unsigned int send_count;
148 unsigned int recv_count; 149 unsigned int recv_count;
150 unsigned int curr_recv_count;
149 int irq; 151 int irq;
150 unsigned long input_clk; 152 unsigned long input_clk;
151 unsigned int i2c_clk; 153 unsigned int i2c_clk;
@@ -182,14 +184,15 @@ static void cdns_i2c_clear_bus_hold(struct cdns_i2c *id)
182 */ 184 */
183static irqreturn_t cdns_i2c_isr(int irq, void *ptr) 185static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
184{ 186{
185 unsigned int isr_status, avail_bytes; 187 unsigned int isr_status, avail_bytes, updatetx;
186 unsigned int bytes_to_recv, bytes_to_send; 188 unsigned int bytes_to_send;
187 struct cdns_i2c *id = ptr; 189 struct cdns_i2c *id = ptr;
188 /* Signal completion only after everything is updated */ 190 /* Signal completion only after everything is updated */
189 int done_flag = 0; 191 int done_flag = 0;
190 irqreturn_t status = IRQ_NONE; 192 irqreturn_t status = IRQ_NONE;
191 193
192 isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET); 194 isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
195 cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
193 196
194 /* Handling nack and arbitration lost interrupt */ 197 /* Handling nack and arbitration lost interrupt */
195 if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_ARB_LOST)) { 198 if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_ARB_LOST)) {
@@ -197,89 +200,112 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
197 status = IRQ_HANDLED; 200 status = IRQ_HANDLED;
198 } 201 }
199 202
200 /* Handling Data interrupt */ 203 /*
201 if ((isr_status & CDNS_I2C_IXR_DATA) && 204 * Check if transfer size register needs to be updated again for a
202 (id->recv_count >= CDNS_I2C_DATA_INTR_DEPTH)) { 205 * large data receive operation.
203 /* Always read data interrupt threshold bytes */ 206 */
204 bytes_to_recv = CDNS_I2C_DATA_INTR_DEPTH; 207 updatetx = 0;
205 id->recv_count -= CDNS_I2C_DATA_INTR_DEPTH; 208 if (id->recv_count > id->curr_recv_count)
206 avail_bytes = cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET); 209 updatetx = 1;
207 210
208 /* 211 /* When receiving, handle data interrupt and completion interrupt */
209 * if the tranfer size register value is zero, then 212 if (id->p_recv_buf &&
210 * check for the remaining bytes and update the 213 ((isr_status & CDNS_I2C_IXR_COMP) ||
211 * transfer size register. 214 (isr_status & CDNS_I2C_IXR_DATA))) {
212 */ 215 /* Read data if receive data valid is set */
213 if (!avail_bytes) { 216 while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
214 if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) 217 CDNS_I2C_SR_RXDV) {
215 cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE, 218 /*
216 CDNS_I2C_XFER_SIZE_OFFSET); 219 * Clear hold bit that was set for FIFO control if
217 else 220 * RX data left is less than FIFO depth, unless
218 cdns_i2c_writereg(id->recv_count, 221 * repeated start is selected.
219 CDNS_I2C_XFER_SIZE_OFFSET); 222 */
220 } 223 if ((id->recv_count < CDNS_I2C_FIFO_DEPTH) &&
224 !id->bus_hold_flag)
225 cdns_i2c_clear_bus_hold(id);
221 226
222 /* Process the data received */
223 while (bytes_to_recv--)
224 *(id->p_recv_buf)++ = 227 *(id->p_recv_buf)++ =
225 cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET); 228 cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
229 id->recv_count--;
230 id->curr_recv_count--;
226 231
227 if (!id->bus_hold_flag && 232 if (updatetx &&
228 (id->recv_count <= CDNS_I2C_FIFO_DEPTH)) 233 (id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1))
229 cdns_i2c_clear_bus_hold(id); 234 break;
235 }
230 236
231 status = IRQ_HANDLED; 237 /*
232 } 238 * The controller sends NACK to the slave when transfer size
239 * register reaches zero without considering the HOLD bit.
240 * This workaround is implemented for large data transfers to
241 * maintain transfer size non-zero while performing a large
242 * receive operation.
243 */
244 if (updatetx &&
245 (id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1)) {
246 /* wait while fifo is full */
247 while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) !=
248 (id->curr_recv_count - CDNS_I2C_FIFO_DEPTH))
249 ;
233 250
234 /* Handling Transfer Complete interrupt */
235 if (isr_status & CDNS_I2C_IXR_COMP) {
236 if (!id->p_recv_buf) {
237 /* 251 /*
238 * If the device is sending data If there is further 252 * Check number of bytes to be received against maximum
239 * data to be sent. Calculate the available space 253 * transfer size and update register accordingly.
240 * in FIFO and fill the FIFO with that many bytes.
241 */ 254 */
242 if (id->send_count) { 255 if (((int)(id->recv_count) - CDNS_I2C_FIFO_DEPTH) >
243 avail_bytes = CDNS_I2C_FIFO_DEPTH - 256 CDNS_I2C_TRANSFER_SIZE) {
244 cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET); 257 cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
245 if (id->send_count > avail_bytes) 258 CDNS_I2C_XFER_SIZE_OFFSET);
246 bytes_to_send = avail_bytes; 259 id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE +
247 else 260 CDNS_I2C_FIFO_DEPTH;
248 bytes_to_send = id->send_count;
249
250 while (bytes_to_send--) {
251 cdns_i2c_writereg(
252 (*(id->p_send_buf)++),
253 CDNS_I2C_DATA_OFFSET);
254 id->send_count--;
255 }
256 } else { 261 } else {
257 /* 262 cdns_i2c_writereg(id->recv_count -
258 * Signal the completion of transaction and 263 CDNS_I2C_FIFO_DEPTH,
259 * clear the hold bus bit if there are no 264 CDNS_I2C_XFER_SIZE_OFFSET);
260 * further messages to be processed. 265 id->curr_recv_count = id->recv_count;
261 */
262 done_flag = 1;
263 } 266 }
264 if (!id->send_count && !id->bus_hold_flag) 267 }
265 cdns_i2c_clear_bus_hold(id); 268
266 } else { 269 /* Clear hold (if not repeated start) and signal completion */
270 if ((isr_status & CDNS_I2C_IXR_COMP) && !id->recv_count) {
267 if (!id->bus_hold_flag) 271 if (!id->bus_hold_flag)
268 cdns_i2c_clear_bus_hold(id); 272 cdns_i2c_clear_bus_hold(id);
273 done_flag = 1;
274 }
275
276 status = IRQ_HANDLED;
277 }
278
279 /* When sending, handle transfer complete interrupt */
280 if ((isr_status & CDNS_I2C_IXR_COMP) && !id->p_recv_buf) {
281 /*
282 * If there is more data to be sent, calculate the
283 * space available in FIFO and fill with that many bytes.
284 */
285 if (id->send_count) {
286 avail_bytes = CDNS_I2C_FIFO_DEPTH -
287 cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
288 if (id->send_count > avail_bytes)
289 bytes_to_send = avail_bytes;
290 else
291 bytes_to_send = id->send_count;
292
293 while (bytes_to_send--) {
294 cdns_i2c_writereg(
295 (*(id->p_send_buf)++),
296 CDNS_I2C_DATA_OFFSET);
297 id->send_count--;
298 }
299 } else {
269 /* 300 /*
270 * If the device is receiving data, then signal 301 * Signal the completion of transaction and
271 * the completion of transaction and read the data 302 * clear the hold bus bit if there are no
272 * present in the FIFO. Signal the completion of 303 * further messages to be processed.
273 * transaction.
274 */ 304 */
275 while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
276 CDNS_I2C_SR_RXDV) {
277 *(id->p_recv_buf)++ =
278 cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
279 id->recv_count--;
280 }
281 done_flag = 1; 305 done_flag = 1;
282 } 306 }
307 if (!id->send_count && !id->bus_hold_flag)
308 cdns_i2c_clear_bus_hold(id);
283 309
284 status = IRQ_HANDLED; 310 status = IRQ_HANDLED;
285 } 311 }
@@ -289,8 +315,6 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
289 if (id->err_status) 315 if (id->err_status)
290 status = IRQ_HANDLED; 316 status = IRQ_HANDLED;
291 317
292 cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
293
294 if (done_flag) 318 if (done_flag)
295 complete(&id->xfer_done); 319 complete(&id->xfer_done);
296 320
@@ -316,6 +340,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
316 if (id->p_msg->flags & I2C_M_RECV_LEN) 340 if (id->p_msg->flags & I2C_M_RECV_LEN)
317 id->recv_count = I2C_SMBUS_BLOCK_MAX + 1; 341 id->recv_count = I2C_SMBUS_BLOCK_MAX + 1;
318 342
343 id->curr_recv_count = id->recv_count;
344
319 /* 345 /*
320 * Check for the message size against FIFO depth and set the 346 * Check for the message size against FIFO depth and set the
321 * 'hold bus' bit if it is greater than FIFO depth. 347 * 'hold bus' bit if it is greater than FIFO depth.
@@ -335,11 +361,14 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
335 * receive if it is less than transfer size and transfer size if 361 * receive if it is less than transfer size and transfer size if
336 * it is more. Enable the interrupts. 362 * it is more. Enable the interrupts.
337 */ 363 */
338 if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) 364 if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) {
339 cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE, 365 cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
340 CDNS_I2C_XFER_SIZE_OFFSET); 366 CDNS_I2C_XFER_SIZE_OFFSET);
341 else 367 id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE;
368 } else {
342 cdns_i2c_writereg(id->recv_count, CDNS_I2C_XFER_SIZE_OFFSET); 369 cdns_i2c_writereg(id->recv_count, CDNS_I2C_XFER_SIZE_OFFSET);
370 }
371
343 /* Clear the bus hold flag if bytes to receive is less than FIFO size */ 372 /* Clear the bus hold flag if bytes to receive is less than FIFO size */
344 if (!id->bus_hold_flag && 373 if (!id->bus_hold_flag &&
345 ((id->p_msg->flags & I2C_M_RECV_LEN) != I2C_M_RECV_LEN) && 374 ((id->p_msg->flags & I2C_M_RECV_LEN) != I2C_M_RECV_LEN) &&
@@ -516,6 +545,20 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
516 * processed with a repeated start. 545 * processed with a repeated start.
517 */ 546 */
518 if (num > 1) { 547 if (num > 1) {
548 /*
549 * This controller does not give completion interrupt after a
550 * master receive message if HOLD bit is set (repeated start),
551 * resulting in SW timeout. Hence, if a receive message is
552 * followed by any other message, an error is returned
553 * indicating that this sequence is not supported.
554 */
555 for (count = 0; count < num - 1; count++) {
556 if (msgs[count].flags & I2C_M_RD) {
557 dev_warn(adap->dev.parent,
558 "Can't do repeated start after a receive message\n");
559 return -EOPNOTSUPP;
560 }
561 }
519 id->bus_hold_flag = 1; 562 id->bus_hold_flag = 1;
520 reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); 563 reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
521 reg |= CDNS_I2C_CR_HOLD; 564 reg |= CDNS_I2C_CR_HOLD;
diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c
new file mode 100644
index 000000000000..5f1ff4cc5c34
--- /dev/null
+++ b/drivers/i2c/busses/i2c-designware-baytrail.c
@@ -0,0 +1,160 @@
1/*
2 * Intel BayTrail PMIC I2C bus semaphore implementaion
3 * Copyright (c) 2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/module.h>
15#include <linux/delay.h>
16#include <linux/device.h>
17#include <linux/acpi.h>
18#include <linux/i2c.h>
19#include <linux/interrupt.h>
20#include <asm/iosf_mbi.h>
21#include "i2c-designware-core.h"
22
23#define SEMAPHORE_TIMEOUT 100
24#define PUNIT_SEMAPHORE 0x7
25
26static unsigned long acquired;
27
28static int get_sem(struct device *dev, u32 *sem)
29{
30 u32 reg_val;
31 int ret;
32
33 ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, PUNIT_SEMAPHORE,
34 &reg_val);
35 if (ret) {
36 dev_err(dev, "iosf failed to read punit semaphore\n");
37 return ret;
38 }
39
40 *sem = reg_val & 0x1;
41
42 return 0;
43}
44
45static void reset_semaphore(struct device *dev)
46{
47 u32 data;
48
49 if (iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
50 PUNIT_SEMAPHORE, &data)) {
51 dev_err(dev, "iosf failed to reset punit semaphore during read\n");
52 return;
53 }
54
55 data = data & 0xfffffffe;
56 if (iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
57 PUNIT_SEMAPHORE, data))
58 dev_err(dev, "iosf failed to reset punit semaphore during write\n");
59}
60
61int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
62{
63 u32 sem = 0;
64 int ret;
65 unsigned long start, end;
66
67 if (!dev || !dev->dev)
68 return -ENODEV;
69
70 if (!dev->acquire_lock)
71 return 0;
72
73 /* host driver writes 0x2 to side band semaphore register */
74 ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
75 PUNIT_SEMAPHORE, 0x2);
76 if (ret) {
77 dev_err(dev->dev, "iosf punit semaphore request failed\n");
78 return ret;
79 }
80
81 /* host driver waits for bit 0 to be set in semaphore register */
82 start = jiffies;
83 end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT);
84 while (!time_after(jiffies, end)) {
85 ret = get_sem(dev->dev, &sem);
86 if (!ret && sem) {
87 acquired = jiffies;
88 dev_dbg(dev->dev, "punit semaphore acquired after %ums\n",
89 jiffies_to_msecs(jiffies - start));
90 return 0;
91 }
92
93 usleep_range(1000, 2000);
94 }
95
96 dev_err(dev->dev, "punit semaphore timed out, resetting\n");
97 reset_semaphore(dev->dev);
98
99 ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
100 PUNIT_SEMAPHORE, &sem);
101 if (!ret)
102 dev_err(dev->dev, "iosf failed to read punit semaphore\n");
103 else
104 dev_err(dev->dev, "PUNIT SEM: %d\n", sem);
105
106 WARN_ON(1);
107
108 return -ETIMEDOUT;
109}
110EXPORT_SYMBOL(baytrail_i2c_acquire);
111
112void baytrail_i2c_release(struct dw_i2c_dev *dev)
113{
114 if (!dev || !dev->dev)
115 return;
116
117 if (!dev->acquire_lock)
118 return;
119
120 reset_semaphore(dev->dev);
121 dev_dbg(dev->dev, "punit semaphore held for %ums\n",
122 jiffies_to_msecs(jiffies - acquired));
123}
124EXPORT_SYMBOL(baytrail_i2c_release);
125
126int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
127{
128 acpi_status status;
129 unsigned long long shared_host = 0;
130 acpi_handle handle;
131
132 if (!dev || !dev->dev)
133 return 0;
134
135 handle = ACPI_HANDLE(dev->dev);
136 if (!handle)
137 return 0;
138
139 status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
140
141 if (ACPI_FAILURE(status))
142 return 0;
143
144 if (shared_host) {
145 dev_info(dev->dev, "I2C bus managed by PUNIT\n");
146 dev->acquire_lock = baytrail_i2c_acquire;
147 dev->release_lock = baytrail_i2c_release;
148 dev->pm_runtime_disabled = true;
149 }
150
151 if (!iosf_mbi_available())
152 return -EPROBE_DEFER;
153
154 return 0;
155}
156EXPORT_SYMBOL(i2c_dw_eval_lock_support);
157
158MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
159MODULE_DESCRIPTION("Baytrail I2C Semaphore driver");
160MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 23628b7bfb8d..6e25c010e690 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -170,10 +170,10 @@ u32 dw_readl(struct dw_i2c_dev *dev, int offset)
170 u32 value; 170 u32 value;
171 171
172 if (dev->accessor_flags & ACCESS_16BIT) 172 if (dev->accessor_flags & ACCESS_16BIT)
173 value = readw(dev->base + offset) | 173 value = readw_relaxed(dev->base + offset) |
174 (readw(dev->base + offset + 2) << 16); 174 (readw_relaxed(dev->base + offset + 2) << 16);
175 else 175 else
176 value = readl(dev->base + offset); 176 value = readl_relaxed(dev->base + offset);
177 177
178 if (dev->accessor_flags & ACCESS_SWAP) 178 if (dev->accessor_flags & ACCESS_SWAP)
179 return swab32(value); 179 return swab32(value);
@@ -187,10 +187,10 @@ void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
187 b = swab32(b); 187 b = swab32(b);
188 188
189 if (dev->accessor_flags & ACCESS_16BIT) { 189 if (dev->accessor_flags & ACCESS_16BIT) {
190 writew((u16)b, dev->base + offset); 190 writew_relaxed((u16)b, dev->base + offset);
191 writew((u16)(b >> 16), dev->base + offset + 2); 191 writew_relaxed((u16)(b >> 16), dev->base + offset + 2);
192 } else { 192 } else {
193 writel(b, dev->base + offset); 193 writel_relaxed(b, dev->base + offset);
194 } 194 }
195} 195}
196 196
@@ -285,6 +285,15 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
285 u32 hcnt, lcnt; 285 u32 hcnt, lcnt;
286 u32 reg; 286 u32 reg;
287 u32 sda_falling_time, scl_falling_time; 287 u32 sda_falling_time, scl_falling_time;
288 int ret;
289
290 if (dev->acquire_lock) {
291 ret = dev->acquire_lock(dev);
292 if (ret) {
293 dev_err(dev->dev, "couldn't acquire bus ownership\n");
294 return ret;
295 }
296 }
288 297
289 input_clock_khz = dev->get_clk_rate_khz(dev); 298 input_clock_khz = dev->get_clk_rate_khz(dev);
290 299
@@ -298,6 +307,8 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
298 } else if (reg != DW_IC_COMP_TYPE_VALUE) { 307 } else if (reg != DW_IC_COMP_TYPE_VALUE) {
299 dev_err(dev->dev, "Unknown Synopsys component type: " 308 dev_err(dev->dev, "Unknown Synopsys component type: "
300 "0x%08x\n", reg); 309 "0x%08x\n", reg);
310 if (dev->release_lock)
311 dev->release_lock(dev);
301 return -ENODEV; 312 return -ENODEV;
302 } 313 }
303 314
@@ -309,40 +320,39 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
309 sda_falling_time = dev->sda_falling_time ?: 300; /* ns */ 320 sda_falling_time = dev->sda_falling_time ?: 300; /* ns */
310 scl_falling_time = dev->scl_falling_time ?: 300; /* ns */ 321 scl_falling_time = dev->scl_falling_time ?: 300; /* ns */
311 322
312 /* Standard-mode */ 323 /* Set SCL timing parameters for standard-mode */
313 hcnt = i2c_dw_scl_hcnt(input_clock_khz,
314 4000, /* tHD;STA = tHIGH = 4.0 us */
315 sda_falling_time,
316 0, /* 0: DW default, 1: Ideal */
317 0); /* No offset */
318 lcnt = i2c_dw_scl_lcnt(input_clock_khz,
319 4700, /* tLOW = 4.7 us */
320 scl_falling_time,
321 0); /* No offset */
322
323 /* Allow platforms to specify the ideal HCNT and LCNT values */
324 if (dev->ss_hcnt && dev->ss_lcnt) { 324 if (dev->ss_hcnt && dev->ss_lcnt) {
325 hcnt = dev->ss_hcnt; 325 hcnt = dev->ss_hcnt;
326 lcnt = dev->ss_lcnt; 326 lcnt = dev->ss_lcnt;
327 } else {
328 hcnt = i2c_dw_scl_hcnt(input_clock_khz,
329 4000, /* tHD;STA = tHIGH = 4.0 us */
330 sda_falling_time,
331 0, /* 0: DW default, 1: Ideal */
332 0); /* No offset */
333 lcnt = i2c_dw_scl_lcnt(input_clock_khz,
334 4700, /* tLOW = 4.7 us */
335 scl_falling_time,
336 0); /* No offset */
327 } 337 }
328 dw_writel(dev, hcnt, DW_IC_SS_SCL_HCNT); 338 dw_writel(dev, hcnt, DW_IC_SS_SCL_HCNT);
329 dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT); 339 dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT);
330 dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt); 340 dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
331 341
332 /* Fast-mode */ 342 /* Set SCL timing parameters for fast-mode */
333 hcnt = i2c_dw_scl_hcnt(input_clock_khz,
334 600, /* tHD;STA = tHIGH = 0.6 us */
335 sda_falling_time,
336 0, /* 0: DW default, 1: Ideal */
337 0); /* No offset */
338 lcnt = i2c_dw_scl_lcnt(input_clock_khz,
339 1300, /* tLOW = 1.3 us */
340 scl_falling_time,
341 0); /* No offset */
342
343 if (dev->fs_hcnt && dev->fs_lcnt) { 343 if (dev->fs_hcnt && dev->fs_lcnt) {
344 hcnt = dev->fs_hcnt; 344 hcnt = dev->fs_hcnt;
345 lcnt = dev->fs_lcnt; 345 lcnt = dev->fs_lcnt;
346 } else {
347 hcnt = i2c_dw_scl_hcnt(input_clock_khz,
348 600, /* tHD;STA = tHIGH = 0.6 us */
349 sda_falling_time,
350 0, /* 0: DW default, 1: Ideal */
351 0); /* No offset */
352 lcnt = i2c_dw_scl_lcnt(input_clock_khz,
353 1300, /* tLOW = 1.3 us */
354 scl_falling_time,
355 0); /* No offset */
346 } 356 }
347 dw_writel(dev, hcnt, DW_IC_FS_SCL_HCNT); 357 dw_writel(dev, hcnt, DW_IC_FS_SCL_HCNT);
348 dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT); 358 dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT);
@@ -364,6 +374,9 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
364 374
365 /* configure the i2c master */ 375 /* configure the i2c master */
366 dw_writel(dev, dev->master_cfg , DW_IC_CON); 376 dw_writel(dev, dev->master_cfg , DW_IC_CON);
377
378 if (dev->release_lock)
379 dev->release_lock(dev);
367 return 0; 380 return 0;
368} 381}
369EXPORT_SYMBOL_GPL(i2c_dw_init); 382EXPORT_SYMBOL_GPL(i2c_dw_init);
@@ -627,6 +640,14 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
627 dev->abort_source = 0; 640 dev->abort_source = 0;
628 dev->rx_outstanding = 0; 641 dev->rx_outstanding = 0;
629 642
643 if (dev->acquire_lock) {
644 ret = dev->acquire_lock(dev);
645 if (ret) {
646 dev_err(dev->dev, "couldn't acquire bus ownership\n");
647 goto done_nolock;
648 }
649 }
650
630 ret = i2c_dw_wait_bus_not_busy(dev); 651 ret = i2c_dw_wait_bus_not_busy(dev);
631 if (ret < 0) 652 if (ret < 0)
632 goto done; 653 goto done;
@@ -672,6 +693,10 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
672 ret = -EIO; 693 ret = -EIO;
673 694
674done: 695done:
696 if (dev->release_lock)
697 dev->release_lock(dev);
698
699done_nolock:
675 pm_runtime_mark_last_busy(dev->dev); 700 pm_runtime_mark_last_busy(dev->dev);
676 pm_runtime_put_autosuspend(dev->dev); 701 pm_runtime_put_autosuspend(dev->dev);
677 mutex_unlock(&dev->lock); 702 mutex_unlock(&dev->lock);
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 5a410ef17abd..9630222abf32 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -61,6 +61,9 @@
61 * @ss_lcnt: standard speed LCNT value 61 * @ss_lcnt: standard speed LCNT value
62 * @fs_hcnt: fast speed HCNT value 62 * @fs_hcnt: fast speed HCNT value
63 * @fs_lcnt: fast speed LCNT value 63 * @fs_lcnt: fast speed LCNT value
64 * @acquire_lock: function to acquire a hardware lock on the bus
65 * @release_lock: function to release a hardware lock on the bus
66 * @pm_runtime_disabled: true if pm runtime is disabled
64 * 67 *
65 * HCNT and LCNT parameters can be used if the platform knows more accurate 68 * HCNT and LCNT parameters can be used if the platform knows more accurate
66 * values than the one computed based only on the input clock frequency. 69 * values than the one computed based only on the input clock frequency.
@@ -101,6 +104,9 @@ struct dw_i2c_dev {
101 u16 ss_lcnt; 104 u16 ss_lcnt;
102 u16 fs_hcnt; 105 u16 fs_hcnt;
103 u16 fs_lcnt; 106 u16 fs_lcnt;
107 int (*acquire_lock)(struct dw_i2c_dev *dev);
108 void (*release_lock)(struct dw_i2c_dev *dev);
109 bool pm_runtime_disabled;
104}; 110};
105 111
106#define ACCESS_SWAP 0x00000001 112#define ACCESS_SWAP 0x00000001
@@ -119,3 +125,9 @@ extern void i2c_dw_disable(struct dw_i2c_dev *dev);
119extern void i2c_dw_clear_int(struct dw_i2c_dev *dev); 125extern void i2c_dw_clear_int(struct dw_i2c_dev *dev);
120extern void i2c_dw_disable_int(struct dw_i2c_dev *dev); 126extern void i2c_dw_disable_int(struct dw_i2c_dev *dev);
121extern u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev); 127extern u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev);
128
129#if IS_ENABLED(CONFIG_I2C_DESIGNWARE_BAYTRAIL)
130extern int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev);
131#else
132static inline int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) { return 0; }
133#endif
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index acb40f95db78..6643d2dc0b25 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -6,7 +6,7 @@
6 * Copyright (C) 2006 Texas Instruments. 6 * Copyright (C) 2006 Texas Instruments.
7 * Copyright (C) 2007 MontaVista Software Inc. 7 * Copyright (C) 2007 MontaVista Software Inc.
8 * Copyright (C) 2009 Provigent Ltd. 8 * Copyright (C) 2009 Provigent Ltd.
9 * Copyright (C) 2011 Intel corporation. 9 * Copyright (C) 2011, 2015 Intel Corporation.
10 * 10 *
11 * ---------------------------------------------------------------------------- 11 * ----------------------------------------------------------------------------
12 * 12 *
@@ -40,10 +40,6 @@
40#define DRIVER_NAME "i2c-designware-pci" 40#define DRIVER_NAME "i2c-designware-pci"
41 41
42enum dw_pci_ctl_id_t { 42enum dw_pci_ctl_id_t {
43 moorestown_0,
44 moorestown_1,
45 moorestown_2,
46
47 medfield_0, 43 medfield_0,
48 medfield_1, 44 medfield_1,
49 medfield_2, 45 medfield_2,
@@ -101,28 +97,7 @@ static struct dw_scl_sda_cfg hsw_config = {
101 .sda_hold = 0x9, 97 .sda_hold = 0x9,
102}; 98};
103 99
104static struct dw_pci_controller dw_pci_controllers[] = { 100static struct dw_pci_controller dw_pci_controllers[] = {
105 [moorestown_0] = {
106 .bus_num = 0,
107 .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
108 .tx_fifo_depth = 32,
109 .rx_fifo_depth = 32,
110 .clk_khz = 25000,
111 },
112 [moorestown_1] = {
113 .bus_num = 1,
114 .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
115 .tx_fifo_depth = 32,
116 .rx_fifo_depth = 32,
117 .clk_khz = 25000,
118 },
119 [moorestown_2] = {
120 .bus_num = 2,
121 .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
122 .tx_fifo_depth = 32,
123 .rx_fifo_depth = 32,
124 .clk_khz = 25000,
125 },
126 [medfield_0] = { 101 [medfield_0] = {
127 .bus_num = 0, 102 .bus_num = 0,
128 .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, 103 .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
@@ -170,7 +145,6 @@ static struct dw_pci_controller dw_pci_controllers[] = {
170 .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, 145 .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
171 .tx_fifo_depth = 32, 146 .tx_fifo_depth = 32,
172 .rx_fifo_depth = 32, 147 .rx_fifo_depth = 32,
173 .clk_khz = 100000,
174 .functionality = I2C_FUNC_10BIT_ADDR, 148 .functionality = I2C_FUNC_10BIT_ADDR,
175 .scl_sda_cfg = &byt_config, 149 .scl_sda_cfg = &byt_config,
176 }, 150 },
@@ -179,7 +153,6 @@ static struct dw_pci_controller dw_pci_controllers[] = {
179 .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, 153 .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
180 .tx_fifo_depth = 32, 154 .tx_fifo_depth = 32,
181 .rx_fifo_depth = 32, 155 .rx_fifo_depth = 32,
182 .clk_khz = 100000,
183 .functionality = I2C_FUNC_10BIT_ADDR, 156 .functionality = I2C_FUNC_10BIT_ADDR,
184 .scl_sda_cfg = &hsw_config, 157 .scl_sda_cfg = &hsw_config,
185 }, 158 },
@@ -259,7 +232,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
259 dev->functionality = controller->functionality | 232 dev->functionality = controller->functionality |
260 DW_DEFAULT_FUNCTIONALITY; 233 DW_DEFAULT_FUNCTIONALITY;
261 234
262 dev->master_cfg = controller->bus_cfg; 235 dev->master_cfg = controller->bus_cfg;
263 if (controller->scl_sda_cfg) { 236 if (controller->scl_sda_cfg) {
264 cfg = controller->scl_sda_cfg; 237 cfg = controller->scl_sda_cfg;
265 dev->ss_hcnt = cfg->ss_hcnt; 238 dev->ss_hcnt = cfg->ss_hcnt;
@@ -325,12 +298,8 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev)
325MODULE_ALIAS("i2c_designware-pci"); 298MODULE_ALIAS("i2c_designware-pci");
326 299
327static const struct pci_device_id i2_designware_pci_ids[] = { 300static const struct pci_device_id i2_designware_pci_ids[] = {
328 /* Moorestown */
329 { PCI_VDEVICE(INTEL, 0x0802), moorestown_0 },
330 { PCI_VDEVICE(INTEL, 0x0803), moorestown_1 },
331 { PCI_VDEVICE(INTEL, 0x0804), moorestown_2 },
332 /* Medfield */ 301 /* Medfield */
333 { PCI_VDEVICE(INTEL, 0x0817), medfield_3,}, 302 { PCI_VDEVICE(INTEL, 0x0817), medfield_3 },
334 { PCI_VDEVICE(INTEL, 0x0818), medfield_4 }, 303 { PCI_VDEVICE(INTEL, 0x0818), medfield_4 },
335 { PCI_VDEVICE(INTEL, 0x0819), medfield_5 }, 304 { PCI_VDEVICE(INTEL, 0x0819), medfield_5 },
336 { PCI_VDEVICE(INTEL, 0x082C), medfield_0 }, 305 { PCI_VDEVICE(INTEL, 0x082C), medfield_0 },
@@ -348,7 +317,7 @@ static const struct pci_device_id i2_designware_pci_ids[] = {
348 { PCI_VDEVICE(INTEL, 0x9c61), haswell }, 317 { PCI_VDEVICE(INTEL, 0x9c61), haswell },
349 { PCI_VDEVICE(INTEL, 0x9c62), haswell }, 318 { PCI_VDEVICE(INTEL, 0x9c62), haswell },
350 /* Braswell / Cherrytrail */ 319 /* Braswell / Cherrytrail */
351 { PCI_VDEVICE(INTEL, 0x22C1), baytrail,}, 320 { PCI_VDEVICE(INTEL, 0x22C1), baytrail },
352 { PCI_VDEVICE(INTEL, 0x22C2), baytrail }, 321 { PCI_VDEVICE(INTEL, 0x22C2), baytrail },
353 { PCI_VDEVICE(INTEL, 0x22C3), baytrail }, 322 { PCI_VDEVICE(INTEL, 0x22C3), baytrail },
354 { PCI_VDEVICE(INTEL, 0x22C4), baytrail }, 323 { PCI_VDEVICE(INTEL, 0x22C4), baytrail },
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 2b463c313e4e..c270f5f9a8f9 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -195,6 +195,10 @@ static int dw_i2c_probe(struct platform_device *pdev)
195 clk_freq = pdata->i2c_scl_freq; 195 clk_freq = pdata->i2c_scl_freq;
196 } 196 }
197 197
198 r = i2c_dw_eval_lock_support(dev);
199 if (r)
200 return r;
201
198 dev->functionality = 202 dev->functionality =
199 I2C_FUNC_I2C | 203 I2C_FUNC_I2C |
200 I2C_FUNC_10BIT_ADDR | 204 I2C_FUNC_10BIT_ADDR |
@@ -257,10 +261,14 @@ static int dw_i2c_probe(struct platform_device *pdev)
257 return r; 261 return r;
258 } 262 }
259 263
260 pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); 264 if (dev->pm_runtime_disabled) {
261 pm_runtime_use_autosuspend(&pdev->dev); 265 pm_runtime_forbid(&pdev->dev);
262 pm_runtime_set_active(&pdev->dev); 266 } else {
263 pm_runtime_enable(&pdev->dev); 267 pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
268 pm_runtime_use_autosuspend(&pdev->dev);
269 pm_runtime_set_active(&pdev->dev);
270 pm_runtime_enable(&pdev->dev);
271 }
264 272
265 return 0; 273 return 0;
266} 274}
@@ -310,7 +318,9 @@ static int dw_i2c_resume(struct device *dev)
310 struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev); 318 struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
311 319
312 clk_prepare_enable(i_dev->clk); 320 clk_prepare_enable(i_dev->clk);
313 i2c_dw_init(i_dev); 321
322 if (!i_dev->pm_runtime_disabled)
323 i2c_dw_init(i_dev);
314 324
315 return 0; 325 return 0;
316} 326}
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 7f3a9fe9bf4e..d7b26fc6f432 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -201,7 +201,7 @@ struct imx_i2c_struct {
201 void __iomem *base; 201 void __iomem *base;
202 wait_queue_head_t queue; 202 wait_queue_head_t queue;
203 unsigned long i2csr; 203 unsigned long i2csr;
204 unsigned int disable_delay; 204 unsigned int disable_delay;
205 int stopped; 205 int stopped;
206 unsigned int ifdr; /* IMX_I2C_IFDR */ 206 unsigned int ifdr; /* IMX_I2C_IFDR */
207 unsigned int cur_clk; 207 unsigned int cur_clk;
@@ -295,7 +295,6 @@ static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
295 dma->chan_tx = dma_request_slave_channel(dev, "tx"); 295 dma->chan_tx = dma_request_slave_channel(dev, "tx");
296 if (!dma->chan_tx) { 296 if (!dma->chan_tx) {
297 dev_dbg(dev, "can't request DMA tx channel\n"); 297 dev_dbg(dev, "can't request DMA tx channel\n");
298 ret = -ENODEV;
299 goto fail_al; 298 goto fail_al;
300 } 299 }
301 300
@@ -313,7 +312,6 @@ static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
313 dma->chan_rx = dma_request_slave_channel(dev, "rx"); 312 dma->chan_rx = dma_request_slave_channel(dev, "rx");
314 if (!dma->chan_rx) { 313 if (!dma->chan_rx) {
315 dev_dbg(dev, "can't request DMA rx channel\n"); 314 dev_dbg(dev, "can't request DMA rx channel\n");
316 ret = -ENODEV;
317 goto fail_tx; 315 goto fail_tx;
318 } 316 }
319 317
@@ -481,8 +479,8 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx)
481 i2c_clk_rate = clk_get_rate(i2c_imx->clk); 479 i2c_clk_rate = clk_get_rate(i2c_imx->clk);
482 if (i2c_imx->cur_clk == i2c_clk_rate) 480 if (i2c_imx->cur_clk == i2c_clk_rate)
483 return; 481 return;
484 else 482
485 i2c_imx->cur_clk = i2c_clk_rate; 483 i2c_imx->cur_clk = i2c_clk_rate;
486 484
487 div = (i2c_clk_rate + i2c_imx->bitrate - 1) / i2c_imx->bitrate; 485 div = (i2c_clk_rate + i2c_imx->bitrate - 1) / i2c_imx->bitrate;
488 if (div < i2c_clk_div[0].div) 486 if (div < i2c_clk_div[0].div)
@@ -490,7 +488,8 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx)
490 else if (div > i2c_clk_div[i2c_imx->hwdata->ndivs - 1].div) 488 else if (div > i2c_clk_div[i2c_imx->hwdata->ndivs - 1].div)
491 i = i2c_imx->hwdata->ndivs - 1; 489 i = i2c_imx->hwdata->ndivs - 1;
492 else 490 else
493 for (i = 0; i2c_clk_div[i].div < div; i++); 491 for (i = 0; i2c_clk_div[i].div < div; i++)
492 ;
494 493
495 /* Store divider value */ 494 /* Store divider value */
496 i2c_imx->ifdr = i2c_clk_div[i].val; 495 i2c_imx->ifdr = i2c_clk_div[i].val;
@@ -628,9 +627,9 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
628 result = wait_for_completion_timeout( 627 result = wait_for_completion_timeout(
629 &i2c_imx->dma->cmd_complete, 628 &i2c_imx->dma->cmd_complete,
630 msecs_to_jiffies(DMA_TIMEOUT)); 629 msecs_to_jiffies(DMA_TIMEOUT));
631 if (result <= 0) { 630 if (result == 0) {
632 dmaengine_terminate_all(dma->chan_using); 631 dmaengine_terminate_all(dma->chan_using);
633 return result ?: -ETIMEDOUT; 632 return -ETIMEDOUT;
634 } 633 }
635 634
636 /* Waiting for transfer complete. */ 635 /* Waiting for transfer complete. */
@@ -686,9 +685,9 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
686 result = wait_for_completion_timeout( 685 result = wait_for_completion_timeout(
687 &i2c_imx->dma->cmd_complete, 686 &i2c_imx->dma->cmd_complete,
688 msecs_to_jiffies(DMA_TIMEOUT)); 687 msecs_to_jiffies(DMA_TIMEOUT));
689 if (result <= 0) { 688 if (result == 0) {
690 dmaengine_terminate_all(dma->chan_using); 689 dmaengine_terminate_all(dma->chan_using);
691 return result ?: -ETIMEDOUT; 690 return -ETIMEDOUT;
692 } 691 }
693 692
694 /* waiting for transfer complete. */ 693 /* waiting for transfer complete. */
@@ -822,6 +821,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
822 /* read data */ 821 /* read data */
823 for (i = 0; i < msgs->len; i++) { 822 for (i = 0; i < msgs->len; i++) {
824 u8 len = 0; 823 u8 len = 0;
824
825 result = i2c_imx_trx_complete(i2c_imx); 825 result = i2c_imx_trx_complete(i2c_imx);
826 if (result) 826 if (result)
827 return result; 827 return result;
@@ -917,15 +917,16 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
917 /* write/read data */ 917 /* write/read data */
918#ifdef CONFIG_I2C_DEBUG_BUS 918#ifdef CONFIG_I2C_DEBUG_BUS
919 temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); 919 temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
920 dev_dbg(&i2c_imx->adapter.dev, "<%s> CONTROL: IEN=%d, IIEN=%d, " 920 dev_dbg(&i2c_imx->adapter.dev,
921 "MSTA=%d, MTX=%d, TXAK=%d, RSTA=%d\n", __func__, 921 "<%s> CONTROL: IEN=%d, IIEN=%d, MSTA=%d, MTX=%d, TXAK=%d, RSTA=%d\n",
922 __func__,
922 (temp & I2CR_IEN ? 1 : 0), (temp & I2CR_IIEN ? 1 : 0), 923 (temp & I2CR_IEN ? 1 : 0), (temp & I2CR_IIEN ? 1 : 0),
923 (temp & I2CR_MSTA ? 1 : 0), (temp & I2CR_MTX ? 1 : 0), 924 (temp & I2CR_MSTA ? 1 : 0), (temp & I2CR_MTX ? 1 : 0),
924 (temp & I2CR_TXAK ? 1 : 0), (temp & I2CR_RSTA ? 1 : 0)); 925 (temp & I2CR_TXAK ? 1 : 0), (temp & I2CR_RSTA ? 1 : 0));
925 temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); 926 temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
926 dev_dbg(&i2c_imx->adapter.dev, 927 dev_dbg(&i2c_imx->adapter.dev,
927 "<%s> STATUS: ICF=%d, IAAS=%d, IBB=%d, " 928 "<%s> STATUS: ICF=%d, IAAS=%d, IBB=%d, IAL=%d, SRW=%d, IIF=%d, RXAK=%d\n",
928 "IAL=%d, SRW=%d, IIF=%d, RXAK=%d\n", __func__, 929 __func__,
929 (temp & I2SR_ICF ? 1 : 0), (temp & I2SR_IAAS ? 1 : 0), 930 (temp & I2SR_ICF ? 1 : 0), (temp & I2SR_IAAS ? 1 : 0),
930 (temp & I2SR_IBB ? 1 : 0), (temp & I2SR_IAL ? 1 : 0), 931 (temp & I2SR_IBB ? 1 : 0), (temp & I2SR_IAL ? 1 : 0),
931 (temp & I2SR_SRW ? 1 : 0), (temp & I2SR_IIF ? 1 : 0), 932 (temp & I2SR_SRW ? 1 : 0), (temp & I2SR_IIF ? 1 : 0),
@@ -1004,7 +1005,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
1004 i2c_imx->adapter.owner = THIS_MODULE; 1005 i2c_imx->adapter.owner = THIS_MODULE;
1005 i2c_imx->adapter.algo = &i2c_imx_algo; 1006 i2c_imx->adapter.algo = &i2c_imx_algo;
1006 i2c_imx->adapter.dev.parent = &pdev->dev; 1007 i2c_imx->adapter.dev.parent = &pdev->dev;
1007 i2c_imx->adapter.nr = pdev->id; 1008 i2c_imx->adapter.nr = pdev->id;
1008 i2c_imx->adapter.dev.of_node = pdev->dev.of_node; 1009 i2c_imx->adapter.dev.of_node = pdev->dev.of_node;
1009 i2c_imx->base = base; 1010 i2c_imx->base = base;
1010 1011
@@ -1063,7 +1064,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
1063 i2c_imx->adapter.name); 1064 i2c_imx->adapter.name);
1064 dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); 1065 dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
1065 1066
1066 /* Init DMA config if support*/ 1067 /* Init DMA config if supported */
1067 i2c_imx_dma_request(i2c_imx, phy_addr); 1068 i2c_imx_dma_request(i2c_imx, phy_addr);
1068 1069
1069 return 0; /* Return OK */ 1070 return 0; /* Return OK */
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 7249b5b1e5d0..abf5db7e441e 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -12,6 +12,7 @@
12 * kind, whether express or implied. 12 * kind, whether express or implied.
13 */ 13 */
14 14
15#include <linux/clk.h>
15#include <linux/err.h> 16#include <linux/err.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/module.h> 18#include <linux/module.h>
@@ -35,7 +36,9 @@ struct ocores_i2c {
35 int pos; 36 int pos;
36 int nmsgs; 37 int nmsgs;
37 int state; /* see STATE_ */ 38 int state; /* see STATE_ */
38 int clock_khz; 39 struct clk *clk;
40 int ip_clock_khz;
41 int bus_clock_khz;
39 void (*setreg)(struct ocores_i2c *i2c, int reg, u8 value); 42 void (*setreg)(struct ocores_i2c *i2c, int reg, u8 value);
40 u8 (*getreg)(struct ocores_i2c *i2c, int reg); 43 u8 (*getreg)(struct ocores_i2c *i2c, int reg);
41}; 44};
@@ -215,21 +218,34 @@ static int ocores_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
215 return -ETIMEDOUT; 218 return -ETIMEDOUT;
216} 219}
217 220
218static void ocores_init(struct ocores_i2c *i2c) 221static int ocores_init(struct device *dev, struct ocores_i2c *i2c)
219{ 222{
220 int prescale; 223 int prescale;
224 int diff;
221 u8 ctrl = oc_getreg(i2c, OCI2C_CONTROL); 225 u8 ctrl = oc_getreg(i2c, OCI2C_CONTROL);
222 226
223 /* make sure the device is disabled */ 227 /* make sure the device is disabled */
224 oc_setreg(i2c, OCI2C_CONTROL, ctrl & ~(OCI2C_CTRL_EN|OCI2C_CTRL_IEN)); 228 oc_setreg(i2c, OCI2C_CONTROL, ctrl & ~(OCI2C_CTRL_EN|OCI2C_CTRL_IEN));
225 229
226 prescale = (i2c->clock_khz / (5*100)) - 1; 230 prescale = (i2c->ip_clock_khz / (5 * i2c->bus_clock_khz)) - 1;
231 prescale = clamp(prescale, 0, 0xffff);
232
233 diff = i2c->ip_clock_khz / (5 * (prescale + 1)) - i2c->bus_clock_khz;
234 if (abs(diff) > i2c->bus_clock_khz / 10) {
235 dev_err(dev,
236 "Unsupported clock settings: core: %d KHz, bus: %d KHz\n",
237 i2c->ip_clock_khz, i2c->bus_clock_khz);
238 return -EINVAL;
239 }
240
227 oc_setreg(i2c, OCI2C_PRELOW, prescale & 0xff); 241 oc_setreg(i2c, OCI2C_PRELOW, prescale & 0xff);
228 oc_setreg(i2c, OCI2C_PREHIGH, prescale >> 8); 242 oc_setreg(i2c, OCI2C_PREHIGH, prescale >> 8);
229 243
230 /* Init the device */ 244 /* Init the device */
231 oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK); 245 oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
232 oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_IEN | OCI2C_CTRL_EN); 246 oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_IEN | OCI2C_CTRL_EN);
247
248 return 0;
233} 249}
234 250
235 251
@@ -304,6 +320,8 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
304 struct device_node *np = pdev->dev.of_node; 320 struct device_node *np = pdev->dev.of_node;
305 const struct of_device_id *match; 321 const struct of_device_id *match;
306 u32 val; 322 u32 val;
323 u32 clock_frequency;
324 bool clock_frequency_present;
307 325
308 if (of_property_read_u32(np, "reg-shift", &i2c->reg_shift)) { 326 if (of_property_read_u32(np, "reg-shift", &i2c->reg_shift)) {
309 /* no 'reg-shift', check for deprecated 'regstep' */ 327 /* no 'reg-shift', check for deprecated 'regstep' */
@@ -319,12 +337,42 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
319 } 337 }
320 } 338 }
321 339
322 if (of_property_read_u32(np, "clock-frequency", &val)) { 340 clock_frequency_present = !of_property_read_u32(np, "clock-frequency",
323 dev_err(&pdev->dev, 341 &clock_frequency);
324 "Missing required parameter 'clock-frequency'\n"); 342 i2c->bus_clock_khz = 100;
325 return -ENODEV; 343
344 i2c->clk = devm_clk_get(&pdev->dev, NULL);
345
346 if (!IS_ERR(i2c->clk)) {
347 int ret = clk_prepare_enable(i2c->clk);
348
349 if (ret) {
350 dev_err(&pdev->dev,
351 "clk_prepare_enable failed: %d\n", ret);
352 return ret;
353 }
354 i2c->ip_clock_khz = clk_get_rate(i2c->clk) / 1000;
355 if (clock_frequency_present)
356 i2c->bus_clock_khz = clock_frequency / 1000;
357 }
358
359 if (i2c->ip_clock_khz == 0) {
360 if (of_property_read_u32(np, "opencores,ip-clock-frequency",
361 &val)) {
362 if (!clock_frequency_present) {
363 dev_err(&pdev->dev,
364 "Missing required parameter 'opencores,ip-clock-frequency'\n");
365 return -ENODEV;
366 }
367 i2c->ip_clock_khz = clock_frequency / 1000;
368 dev_warn(&pdev->dev,
369 "Deprecated usage of the 'clock-frequency' property, please update to 'opencores,ip-clock-frequency'\n");
370 } else {
371 i2c->ip_clock_khz = val / 1000;
372 if (clock_frequency_present)
373 i2c->bus_clock_khz = clock_frequency / 1000;
374 }
326 } 375 }
327 i2c->clock_khz = val / 1000;
328 376
329 of_property_read_u32(pdev->dev.of_node, "reg-io-width", 377 of_property_read_u32(pdev->dev.of_node, "reg-io-width",
330 &i2c->reg_io_width); 378 &i2c->reg_io_width);
@@ -368,7 +416,8 @@ static int ocores_i2c_probe(struct platform_device *pdev)
368 if (pdata) { 416 if (pdata) {
369 i2c->reg_shift = pdata->reg_shift; 417 i2c->reg_shift = pdata->reg_shift;
370 i2c->reg_io_width = pdata->reg_io_width; 418 i2c->reg_io_width = pdata->reg_io_width;
371 i2c->clock_khz = pdata->clock_khz; 419 i2c->ip_clock_khz = pdata->clock_khz;
420 i2c->bus_clock_khz = 100;
372 } else { 421 } else {
373 ret = ocores_i2c_of_probe(pdev, i2c); 422 ret = ocores_i2c_of_probe(pdev, i2c);
374 if (ret) 423 if (ret)
@@ -402,7 +451,9 @@ static int ocores_i2c_probe(struct platform_device *pdev)
402 } 451 }
403 } 452 }
404 453
405 ocores_init(i2c); 454 ret = ocores_init(&pdev->dev, i2c);
455 if (ret)
456 return ret;
406 457
407 init_waitqueue_head(&i2c->wait); 458 init_waitqueue_head(&i2c->wait);
408 ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0, 459 ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0,
@@ -446,6 +497,9 @@ static int ocores_i2c_remove(struct platform_device *pdev)
446 /* remove adapter & data */ 497 /* remove adapter & data */
447 i2c_del_adapter(&i2c->adap); 498 i2c_del_adapter(&i2c->adap);
448 499
500 if (!IS_ERR(i2c->clk))
501 clk_disable_unprepare(i2c->clk);
502
449 return 0; 503 return 0;
450} 504}
451 505
@@ -458,6 +512,8 @@ static int ocores_i2c_suspend(struct device *dev)
458 /* make sure the device is disabled */ 512 /* make sure the device is disabled */
459 oc_setreg(i2c, OCI2C_CONTROL, ctrl & ~(OCI2C_CTRL_EN|OCI2C_CTRL_IEN)); 513 oc_setreg(i2c, OCI2C_CONTROL, ctrl & ~(OCI2C_CTRL_EN|OCI2C_CTRL_IEN));
460 514
515 if (!IS_ERR(i2c->clk))
516 clk_disable_unprepare(i2c->clk);
461 return 0; 517 return 0;
462} 518}
463 519
@@ -465,9 +521,20 @@ static int ocores_i2c_resume(struct device *dev)
465{ 521{
466 struct ocores_i2c *i2c = dev_get_drvdata(dev); 522 struct ocores_i2c *i2c = dev_get_drvdata(dev);
467 523
468 ocores_init(i2c); 524 if (!IS_ERR(i2c->clk)) {
525 unsigned long rate;
526 int ret = clk_prepare_enable(i2c->clk);
469 527
470 return 0; 528 if (ret) {
529 dev_err(dev,
530 "clk_prepare_enable failed: %d\n", ret);
531 return ret;
532 }
533 rate = clk_get_rate(i2c->clk) / 1000;
534 if (rate)
535 i2c->ip_clock_khz = rate;
536 }
537 return ocores_init(dev, i2c);
471} 538}
472 539
473static SIMPLE_DEV_PM_OPS(ocores_i2c_pm, ocores_i2c_suspend, ocores_i2c_resume); 540static SIMPLE_DEV_PM_OPS(ocores_i2c_pm, ocores_i2c_suspend, ocores_i2c_resume);
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 44f03eed00dd..d37d9db6681e 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -148,13 +148,6 @@ static inline u32 pmcmsptwi_clock_to_reg(
148 return ((clock->filter & 0xf) << 12) | (clock->clock & 0x03ff); 148 return ((clock->filter & 0xf) << 12) | (clock->clock & 0x03ff);
149} 149}
150 150
151static inline void pmcmsptwi_reg_to_clock(
152 u32 reg, struct pmcmsptwi_clock *clock)
153{
154 clock->filter = (reg >> 12) & 0xf;
155 clock->clock = reg & 0x03ff;
156}
157
158static inline u32 pmcmsptwi_cfg_to_reg(const struct pmcmsptwi_cfg *cfg) 151static inline u32 pmcmsptwi_cfg_to_reg(const struct pmcmsptwi_cfg *cfg)
159{ 152{
160 return ((cfg->arbf & 0xf) << 12) | 153 return ((cfg->arbf & 0xf) << 12) |
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 92462843db66..5f96b1b3e3a5 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -102,6 +102,9 @@ struct rk3x_i2c {
102 102
103 /* Settings */ 103 /* Settings */
104 unsigned int scl_frequency; 104 unsigned int scl_frequency;
105 unsigned int scl_rise_ns;
106 unsigned int scl_fall_ns;
107 unsigned int sda_fall_ns;
105 108
106 /* Synchronization & notification */ 109 /* Synchronization & notification */
107 spinlock_t lock; 110 spinlock_t lock;
@@ -435,6 +438,9 @@ out:
435 * 438 *
436 * @clk_rate: I2C input clock rate 439 * @clk_rate: I2C input clock rate
437 * @scl_rate: Desired SCL rate 440 * @scl_rate: Desired SCL rate
441 * @scl_rise_ns: How many ns it takes for SCL to rise.
442 * @scl_fall_ns: How many ns it takes for SCL to fall.
443 * @sda_fall_ns: How many ns it takes for SDA to fall.
438 * @div_low: Divider output for low 444 * @div_low: Divider output for low
439 * @div_high: Divider output for high 445 * @div_high: Divider output for high
440 * 446 *
@@ -443,11 +449,16 @@ out:
443 * too high, we silently use the highest possible rate. 449 * too high, we silently use the highest possible rate.
444 */ 450 */
445static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate, 451static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
452 unsigned long scl_rise_ns,
453 unsigned long scl_fall_ns,
454 unsigned long sda_fall_ns,
446 unsigned long *div_low, unsigned long *div_high) 455 unsigned long *div_low, unsigned long *div_high)
447{ 456{
448 unsigned long min_low_ns, min_high_ns; 457 unsigned long spec_min_low_ns, spec_min_high_ns;
449 unsigned long max_data_hold_ns; 458 unsigned long spec_setup_start, spec_max_data_hold_ns;
450 unsigned long data_hold_buffer_ns; 459 unsigned long data_hold_buffer_ns;
460
461 unsigned long min_low_ns, min_high_ns;
451 unsigned long max_low_ns, min_total_ns; 462 unsigned long max_low_ns, min_total_ns;
452 463
453 unsigned long clk_rate_khz, scl_rate_khz; 464 unsigned long clk_rate_khz, scl_rate_khz;
@@ -469,29 +480,50 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
469 scl_rate = 1000; 480 scl_rate = 1000;
470 481
471 /* 482 /*
472 * min_low_ns: The minimum number of ns we need to hold low 483 * min_low_ns: The minimum number of ns we need to hold low to
473 * to meet i2c spec 484 * meet I2C specification, should include fall time.
474 * min_high_ns: The minimum number of ns we need to hold high 485 * min_high_ns: The minimum number of ns we need to hold high to
475 * to meet i2c spec 486 * meet I2C specification, should include rise time.
476 * max_low_ns: The maximum number of ns we can hold low 487 * max_low_ns: The maximum number of ns we can hold low to meet
477 * to meet i2c spec 488 * I2C specification.
478 * 489 *
479 * Note: max_low_ns should be (max data hold time * 2 - buffer) 490 * Note: max_low_ns should be (maximum data hold time * 2 - buffer)
480 * This is because the i2c host on Rockchip holds the data line 491 * This is because the i2c host on Rockchip holds the data line
481 * for half the low time. 492 * for half the low time.
482 */ 493 */
483 if (scl_rate <= 100000) { 494 if (scl_rate <= 100000) {
484 min_low_ns = 4700; 495 /* Standard-mode */
485 min_high_ns = 4000; 496 spec_min_low_ns = 4700;
486 max_data_hold_ns = 3450; 497 spec_setup_start = 4700;
498 spec_min_high_ns = 4000;
499 spec_max_data_hold_ns = 3450;
487 data_hold_buffer_ns = 50; 500 data_hold_buffer_ns = 50;
488 } else { 501 } else {
489 min_low_ns = 1300; 502 /* Fast-mode */
490 min_high_ns = 600; 503 spec_min_low_ns = 1300;
491 max_data_hold_ns = 900; 504 spec_setup_start = 600;
505 spec_min_high_ns = 600;
506 spec_max_data_hold_ns = 900;
492 data_hold_buffer_ns = 50; 507 data_hold_buffer_ns = 50;
493 } 508 }
494 max_low_ns = max_data_hold_ns * 2 - data_hold_buffer_ns; 509 min_high_ns = scl_rise_ns + spec_min_high_ns;
510
511 /*
512 * Timings for repeated start:
513 * - controller appears to drop SDA at .875x (7/8) programmed clk high.
514 * - controller appears to keep SCL high for 2x programmed clk high.
515 *
516 * We need to account for those rules in picking our "high" time so
517 * we meet tSU;STA and tHD;STA times.
518 */
519 min_high_ns = max(min_high_ns,
520 DIV_ROUND_UP((scl_rise_ns + spec_setup_start) * 1000, 875));
521 min_high_ns = max(min_high_ns,
522 DIV_ROUND_UP((scl_rise_ns + spec_setup_start +
523 sda_fall_ns + spec_min_high_ns), 2));
524
525 min_low_ns = scl_fall_ns + spec_min_low_ns;
526 max_low_ns = spec_max_data_hold_ns * 2 - data_hold_buffer_ns;
495 min_total_ns = min_low_ns + min_high_ns; 527 min_total_ns = min_low_ns + min_high_ns;
496 528
497 /* Adjust to avoid overflow */ 529 /* Adjust to avoid overflow */
@@ -510,8 +542,8 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
510 min_div_for_hold = (min_low_div + min_high_div); 542 min_div_for_hold = (min_low_div + min_high_div);
511 543
512 /* 544 /*
513 * This is the maximum divider so we don't go over the max. 545 * This is the maximum divider so we don't go over the maximum.
514 * We don't round up here (we round down) since this is a max. 546 * We don't round up here (we round down) since this is a maximum.
515 */ 547 */
516 max_low_div = clk_rate_khz * max_low_ns / (8 * 1000000); 548 max_low_div = clk_rate_khz * max_low_ns / (8 * 1000000);
517 549
@@ -544,7 +576,7 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
544 ideal_low_div = DIV_ROUND_UP(clk_rate_khz * min_low_ns, 576 ideal_low_div = DIV_ROUND_UP(clk_rate_khz * min_low_ns,
545 scl_rate_khz * 8 * min_total_ns); 577 scl_rate_khz * 8 * min_total_ns);
546 578
547 /* Don't allow it to go over the max */ 579 /* Don't allow it to go over the maximum */
548 if (ideal_low_div > max_low_div) 580 if (ideal_low_div > max_low_div)
549 ideal_low_div = max_low_div; 581 ideal_low_div = max_low_div;
550 582
@@ -588,9 +620,9 @@ static void rk3x_i2c_adapt_div(struct rk3x_i2c *i2c, unsigned long clk_rate)
588 u64 t_low_ns, t_high_ns; 620 u64 t_low_ns, t_high_ns;
589 int ret; 621 int ret;
590 622
591 ret = rk3x_i2c_calc_divs(clk_rate, i2c->scl_frequency, &div_low, 623 ret = rk3x_i2c_calc_divs(clk_rate, i2c->scl_frequency, i2c->scl_rise_ns,
592 &div_high); 624 i2c->scl_fall_ns, i2c->sda_fall_ns,
593 625 &div_low, &div_high);
594 WARN_ONCE(ret != 0, "Could not reach SCL freq %u", i2c->scl_frequency); 626 WARN_ONCE(ret != 0, "Could not reach SCL freq %u", i2c->scl_frequency);
595 627
596 clk_enable(i2c->clk); 628 clk_enable(i2c->clk);
@@ -633,9 +665,10 @@ static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
633 switch (event) { 665 switch (event) {
634 case PRE_RATE_CHANGE: 666 case PRE_RATE_CHANGE:
635 if (rk3x_i2c_calc_divs(ndata->new_rate, i2c->scl_frequency, 667 if (rk3x_i2c_calc_divs(ndata->new_rate, i2c->scl_frequency,
636 &div_low, &div_high) != 0) { 668 i2c->scl_rise_ns, i2c->scl_fall_ns,
669 i2c->sda_fall_ns,
670 &div_low, &div_high) != 0)
637 return NOTIFY_STOP; 671 return NOTIFY_STOP;
638 }
639 672
640 /* scale up */ 673 /* scale up */
641 if (ndata->new_rate > ndata->old_rate) 674 if (ndata->new_rate > ndata->old_rate)
@@ -859,6 +892,24 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
859 i2c->scl_frequency = DEFAULT_SCL_RATE; 892 i2c->scl_frequency = DEFAULT_SCL_RATE;
860 } 893 }
861 894
895 /*
896 * Read rise and fall time from device tree. If not available use
897 * the default maximum timing from the specification.
898 */
899 if (of_property_read_u32(pdev->dev.of_node, "i2c-scl-rising-time-ns",
900 &i2c->scl_rise_ns)) {
901 if (i2c->scl_frequency <= 100000)
902 i2c->scl_rise_ns = 1000;
903 else
904 i2c->scl_rise_ns = 300;
905 }
906 if (of_property_read_u32(pdev->dev.of_node, "i2c-scl-falling-time-ns",
907 &i2c->scl_fall_ns))
908 i2c->scl_fall_ns = 300;
909 if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns",
910 &i2c->scl_fall_ns))
911 i2c->sda_fall_ns = i2c->scl_fall_ns;
912
862 strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name)); 913 strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
863 i2c->adap.owner = THIS_MODULE; 914 i2c->adap.owner = THIS_MODULE;
864 i2c->adap.algo = &rk3x_i2c_algorithm; 915 i2c->adap.algo = &rk3x_i2c_algorithm;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 28b87e683503..29f14331dd9d 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -286,6 +286,7 @@ static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev)
286 if (rx_fifo_avail > 0 && buf_remaining > 0) { 286 if (rx_fifo_avail > 0 && buf_remaining > 0) {
287 BUG_ON(buf_remaining > 3); 287 BUG_ON(buf_remaining > 3);
288 val = i2c_readl(i2c_dev, I2C_RX_FIFO); 288 val = i2c_readl(i2c_dev, I2C_RX_FIFO);
289 val = cpu_to_le32(val);
289 memcpy(buf, &val, buf_remaining); 290 memcpy(buf, &val, buf_remaining);
290 buf_remaining = 0; 291 buf_remaining = 0;
291 rx_fifo_avail--; 292 rx_fifo_avail--;
@@ -344,6 +345,7 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
344 if (tx_fifo_avail > 0 && buf_remaining > 0) { 345 if (tx_fifo_avail > 0 && buf_remaining > 0) {
345 BUG_ON(buf_remaining > 3); 346 BUG_ON(buf_remaining > 3);
346 memcpy(&val, buf, buf_remaining); 347 memcpy(&val, buf, buf_remaining);
348 val = le32_to_cpu(val);
347 349
348 /* Again update before writing to FIFO to make sure isr sees. */ 350 /* Again update before writing to FIFO to make sure isr sees. */
349 i2c_dev->msg_buf_remaining = 0; 351 i2c_dev->msg_buf_remaining = 0;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index e9eae57a2b50..210cf4874cb7 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -102,7 +102,7 @@ static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data)
102 struct acpi_resource_i2c_serialbus *sb; 102 struct acpi_resource_i2c_serialbus *sb;
103 103
104 sb = &ares->data.i2c_serial_bus; 104 sb = &ares->data.i2c_serial_bus;
105 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) { 105 if (!info->addr && sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
106 info->addr = sb->slave_address; 106 info->addr = sb->slave_address;
107 if (sb->access_mode == ACPI_I2C_10BIT_MODE) 107 if (sb->access_mode == ACPI_I2C_10BIT_MODE)
108 info->flags |= I2C_CLIENT_TEN; 108 info->flags |= I2C_CLIENT_TEN;
@@ -698,101 +698,6 @@ static void i2c_device_shutdown(struct device *dev)
698 driver->shutdown(client); 698 driver->shutdown(client);
699} 699}
700 700
701#ifdef CONFIG_PM_SLEEP
702static int i2c_legacy_suspend(struct device *dev, pm_message_t mesg)
703{
704 struct i2c_client *client = i2c_verify_client(dev);
705 struct i2c_driver *driver;
706
707 if (!client || !dev->driver)
708 return 0;
709 driver = to_i2c_driver(dev->driver);
710 if (!driver->suspend)
711 return 0;
712 return driver->suspend(client, mesg);
713}
714
715static int i2c_legacy_resume(struct device *dev)
716{
717 struct i2c_client *client = i2c_verify_client(dev);
718 struct i2c_driver *driver;
719
720 if (!client || !dev->driver)
721 return 0;
722 driver = to_i2c_driver(dev->driver);
723 if (!driver->resume)
724 return 0;
725 return driver->resume(client);
726}
727
728static int i2c_device_pm_suspend(struct device *dev)
729{
730 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
731
732 if (pm)
733 return pm_generic_suspend(dev);
734 else
735 return i2c_legacy_suspend(dev, PMSG_SUSPEND);
736}
737
738static int i2c_device_pm_resume(struct device *dev)
739{
740 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
741
742 if (pm)
743 return pm_generic_resume(dev);
744 else
745 return i2c_legacy_resume(dev);
746}
747
748static int i2c_device_pm_freeze(struct device *dev)
749{
750 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
751
752 if (pm)
753 return pm_generic_freeze(dev);
754 else
755 return i2c_legacy_suspend(dev, PMSG_FREEZE);
756}
757
758static int i2c_device_pm_thaw(struct device *dev)
759{
760 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
761
762 if (pm)
763 return pm_generic_thaw(dev);
764 else
765 return i2c_legacy_resume(dev);
766}
767
768static int i2c_device_pm_poweroff(struct device *dev)
769{
770 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
771
772 if (pm)
773 return pm_generic_poweroff(dev);
774 else
775 return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
776}
777
778static int i2c_device_pm_restore(struct device *dev)
779{
780 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
781
782 if (pm)
783 return pm_generic_restore(dev);
784 else
785 return i2c_legacy_resume(dev);
786}
787#else /* !CONFIG_PM_SLEEP */
788#define i2c_device_pm_suspend NULL
789#define i2c_device_pm_resume NULL
790#define i2c_device_pm_freeze NULL
791#define i2c_device_pm_thaw NULL
792#define i2c_device_pm_poweroff NULL
793#define i2c_device_pm_restore NULL
794#endif /* !CONFIG_PM_SLEEP */
795
796static void i2c_client_dev_release(struct device *dev) 701static void i2c_client_dev_release(struct device *dev)
797{ 702{
798 kfree(to_i2c_client(dev)); 703 kfree(to_i2c_client(dev));
@@ -804,6 +709,7 @@ show_name(struct device *dev, struct device_attribute *attr, char *buf)
804 return sprintf(buf, "%s\n", dev->type == &i2c_client_type ? 709 return sprintf(buf, "%s\n", dev->type == &i2c_client_type ?
805 to_i2c_client(dev)->name : to_i2c_adapter(dev)->name); 710 to_i2c_client(dev)->name : to_i2c_adapter(dev)->name);
806} 711}
712static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
807 713
808static ssize_t 714static ssize_t
809show_modalias(struct device *dev, struct device_attribute *attr, char *buf) 715show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
@@ -817,8 +723,6 @@ show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
817 723
818 return sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name); 724 return sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name);
819} 725}
820
821static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
822static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL); 726static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL);
823 727
824static struct attribute *i2c_dev_attrs[] = { 728static struct attribute *i2c_dev_attrs[] = {
@@ -827,29 +731,7 @@ static struct attribute *i2c_dev_attrs[] = {
827 &dev_attr_modalias.attr, 731 &dev_attr_modalias.attr,
828 NULL 732 NULL
829}; 733};
830 734ATTRIBUTE_GROUPS(i2c_dev);
831static struct attribute_group i2c_dev_attr_group = {
832 .attrs = i2c_dev_attrs,
833};
834
835static const struct attribute_group *i2c_dev_attr_groups[] = {
836 &i2c_dev_attr_group,
837 NULL
838};
839
840static const struct dev_pm_ops i2c_device_pm_ops = {
841 .suspend = i2c_device_pm_suspend,
842 .resume = i2c_device_pm_resume,
843 .freeze = i2c_device_pm_freeze,
844 .thaw = i2c_device_pm_thaw,
845 .poweroff = i2c_device_pm_poweroff,
846 .restore = i2c_device_pm_restore,
847 SET_RUNTIME_PM_OPS(
848 pm_generic_runtime_suspend,
849 pm_generic_runtime_resume,
850 NULL
851 )
852};
853 735
854struct bus_type i2c_bus_type = { 736struct bus_type i2c_bus_type = {
855 .name = "i2c", 737 .name = "i2c",
@@ -857,12 +739,11 @@ struct bus_type i2c_bus_type = {
857 .probe = i2c_device_probe, 739 .probe = i2c_device_probe,
858 .remove = i2c_device_remove, 740 .remove = i2c_device_remove,
859 .shutdown = i2c_device_shutdown, 741 .shutdown = i2c_device_shutdown,
860 .pm = &i2c_device_pm_ops,
861}; 742};
862EXPORT_SYMBOL_GPL(i2c_bus_type); 743EXPORT_SYMBOL_GPL(i2c_bus_type);
863 744
864static struct device_type i2c_client_type = { 745static struct device_type i2c_client_type = {
865 .groups = i2c_dev_attr_groups, 746 .groups = i2c_dev_groups,
866 .uevent = i2c_device_uevent, 747 .uevent = i2c_device_uevent,
867 .release = i2c_client_dev_release, 748 .release = i2c_client_dev_release,
868}; 749};
@@ -1261,6 +1142,7 @@ i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr,
1261 1142
1262 return count; 1143 return count;
1263} 1144}
1145static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device);
1264 1146
1265/* 1147/*
1266 * And of course let the users delete the devices they instantiated, if 1148 * And of course let the users delete the devices they instantiated, if
@@ -1315,8 +1197,6 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr,
1315 "delete_device"); 1197 "delete_device");
1316 return res; 1198 return res;
1317} 1199}
1318
1319static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device);
1320static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL, 1200static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL,
1321 i2c_sysfs_delete_device); 1201 i2c_sysfs_delete_device);
1322 1202
@@ -1326,18 +1206,10 @@ static struct attribute *i2c_adapter_attrs[] = {
1326 &dev_attr_delete_device.attr, 1206 &dev_attr_delete_device.attr,
1327 NULL 1207 NULL
1328}; 1208};
1329 1209ATTRIBUTE_GROUPS(i2c_adapter);
1330static struct attribute_group i2c_adapter_attr_group = {
1331 .attrs = i2c_adapter_attrs,
1332};
1333
1334static const struct attribute_group *i2c_adapter_attr_groups[] = {
1335 &i2c_adapter_attr_group,
1336 NULL
1337};
1338 1210
1339struct device_type i2c_adapter_type = { 1211struct device_type i2c_adapter_type = {
1340 .groups = i2c_adapter_attr_groups, 1212 .groups = i2c_adapter_groups,
1341 .release = i2c_adapter_dev_release, 1213 .release = i2c_adapter_dev_release,
1342}; 1214};
1343EXPORT_SYMBOL_GPL(i2c_adapter_type); 1215EXPORT_SYMBOL_GPL(i2c_adapter_type);
@@ -1419,8 +1291,6 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
1419 if (of_get_property(node, "wakeup-source", NULL)) 1291 if (of_get_property(node, "wakeup-source", NULL))
1420 info.flags |= I2C_CLIENT_WAKE; 1292 info.flags |= I2C_CLIENT_WAKE;
1421 1293
1422 request_module("%s%s", I2C_MODULE_PREFIX, info.type);
1423
1424 result = i2c_new_device(adap, &info); 1294 result = i2c_new_device(adap, &info);
1425 if (result == NULL) { 1295 if (result == NULL) {
1426 dev_err(&adap->dev, "of_i2c: Failure registering %s\n", 1296 dev_err(&adap->dev, "of_i2c: Failure registering %s\n",
@@ -1796,11 +1666,15 @@ void i2c_del_adapter(struct i2c_adapter *adap)
1796 /* device name is gone after device_unregister */ 1666 /* device name is gone after device_unregister */
1797 dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name); 1667 dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
1798 1668
1799 /* clean up the sysfs representation */ 1669 /* wait until all references to the device are gone
1670 *
1671 * FIXME: This is old code and should ideally be replaced by an
1672 * alternative which results in decoupling the lifetime of the struct
1673 * device from the i2c_adapter, like spi or netdev do. Any solution
1674 * should be throughly tested with DEBUG_KOBJECT_RELEASE enabled!
1675 */
1800 init_completion(&adap->dev_released); 1676 init_completion(&adap->dev_released);
1801 device_unregister(&adap->dev); 1677 device_unregister(&adap->dev);
1802
1803 /* wait for sysfs to drop all references */
1804 wait_for_completion(&adap->dev_released); 1678 wait_for_completion(&adap->dev_released);
1805 1679
1806 /* free bus id */ 1680 /* free bus id */
@@ -1859,14 +1733,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
1859 if (res) 1733 if (res)
1860 return res; 1734 return res;
1861 1735
1862 /* Drivers should switch to dev_pm_ops instead. */
1863 if (driver->suspend)
1864 pr_warn("i2c-core: driver [%s] using legacy suspend method\n",
1865 driver->driver.name);
1866 if (driver->resume)
1867 pr_warn("i2c-core: driver [%s] using legacy resume method\n",
1868 driver->driver.name);
1869
1870 pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name); 1736 pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
1871 1737
1872 INIT_LIST_HEAD(&driver->clients); 1738 INIT_LIST_HEAD(&driver->clients);
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index ec11b404b433..3d8f4fe2e47e 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -41,6 +41,7 @@
41#include <linux/i2c-mux.h> 41#include <linux/i2c-mux.h>
42#include <linux/i2c/pca954x.h> 42#include <linux/i2c/pca954x.h>
43#include <linux/module.h> 43#include <linux/module.h>
44#include <linux/of.h>
44#include <linux/pm.h> 45#include <linux/pm.h>
45#include <linux/slab.h> 46#include <linux/slab.h>
46 47
@@ -186,6 +187,8 @@ static int pca954x_probe(struct i2c_client *client,
186{ 187{
187 struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent); 188 struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent);
188 struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev); 189 struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
190 struct device_node *of_node = client->dev.of_node;
191 bool idle_disconnect_dt;
189 struct gpio_desc *gpio; 192 struct gpio_desc *gpio;
190 int num, force, class; 193 int num, force, class;
191 struct pca954x *data; 194 struct pca954x *data;
@@ -217,8 +220,13 @@ static int pca954x_probe(struct i2c_client *client,
217 data->type = id->driver_data; 220 data->type = id->driver_data;
218 data->last_chan = 0; /* force the first selection */ 221 data->last_chan = 0; /* force the first selection */
219 222
223 idle_disconnect_dt = of_node &&
224 of_property_read_bool(of_node, "i2c-mux-idle-disconnect");
225
220 /* Now create an adapter for each channel */ 226 /* Now create an adapter for each channel */
221 for (num = 0; num < chips[data->type].nchans; num++) { 227 for (num = 0; num < chips[data->type].nchans; num++) {
228 bool idle_disconnect_pd = false;
229
222 force = 0; /* dynamic adap number */ 230 force = 0; /* dynamic adap number */
223 class = 0; /* no class by default */ 231 class = 0; /* no class by default */
224 if (pdata) { 232 if (pdata) {
@@ -229,12 +237,13 @@ static int pca954x_probe(struct i2c_client *client,
229 } else 237 } else
230 /* discard unconfigured channels */ 238 /* discard unconfigured channels */
231 break; 239 break;
240 idle_disconnect_pd = pdata->modes[num].deselect_on_exit;
232 } 241 }
233 242
234 data->virt_adaps[num] = 243 data->virt_adaps[num] =
235 i2c_add_mux_adapter(adap, &client->dev, client, 244 i2c_add_mux_adapter(adap, &client->dev, client,
236 force, num, class, pca954x_select_chan, 245 force, num, class, pca954x_select_chan,
237 (pdata && pdata->modes[num].deselect_on_exit) 246 (idle_disconnect_pd || idle_disconnect_dt)
238 ? pca954x_deselect_mux : NULL); 247 ? pca954x_deselect_mux : NULL);
239 248
240 if (data->virt_adaps[num] == NULL) { 249 if (data->virt_adaps[num] == NULL) {
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 4132935dc929..4011effe4c05 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -21,7 +21,7 @@ config IIO_BUFFER
21if IIO_BUFFER 21if IIO_BUFFER
22 22
23config IIO_BUFFER_CB 23config IIO_BUFFER_CB
24boolean "IIO callback buffer used for push in-kernel interfaces" 24 bool "IIO callback buffer used for push in-kernel interfaces"
25 help 25 help
26 Should be selected by any drivers that do in-kernel push 26 Should be selected by any drivers that do in-kernel push
27 usage. That is, those where the data is pushed to the consumer. 27 usage. That is, those where the data is pushed to the consumer.
@@ -43,7 +43,7 @@ config IIO_TRIGGERED_BUFFER
43endif # IIO_BUFFER 43endif # IIO_BUFFER
44 44
45config IIO_TRIGGER 45config IIO_TRIGGER
46 boolean "Enable triggered sampling support" 46 bool "Enable triggered sampling support"
47 help 47 help
48 Provides IIO core support for triggers. Currently these 48 Provides IIO core support for triggers. Currently these
49 are used to initialize capture of samples to push into 49 are used to initialize capture of samples to push into
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 56a4b7ca7ee3..45d67e9228d7 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1124,6 +1124,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
1124 if (!optlen) 1124 if (!optlen)
1125 return -EINVAL; 1125 return -EINVAL;
1126 1126
1127 memset(&sa_path, 0, sizeof(sa_path));
1128 sa_path.vlan_id = 0xffff;
1129
1127 ib_sa_unpack_path(path_data->path_rec, &sa_path); 1130 ib_sa_unpack_path(path_data->path_rec, &sa_path);
1128 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1); 1131 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
1129 if (ret) 1132 if (ret)
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 6095872549e7..8b8cc6fa0ab0 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -294,7 +294,8 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
294 if (likely(ib_umem_start(umem) != ib_umem_end(umem))) 294 if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
295 rbt_ib_umem_insert(&umem->odp_data->interval_tree, 295 rbt_ib_umem_insert(&umem->odp_data->interval_tree,
296 &context->umem_tree); 296 &context->umem_tree);
297 if (likely(!atomic_read(&context->notifier_count))) 297 if (likely(!atomic_read(&context->notifier_count)) ||
298 context->odp_mrs_count == 1)
298 umem->odp_data->mn_counters_active = true; 299 umem->odp_data->mn_counters_active = true;
299 else 300 else
300 list_add(&umem->odp_data->no_private_counters, 301 list_add(&umem->odp_data->no_private_counters,
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 643c08a025a5..b716b0815644 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -258,5 +258,6 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
258 258
259IB_UVERBS_DECLARE_EX_CMD(create_flow); 259IB_UVERBS_DECLARE_EX_CMD(create_flow);
260IB_UVERBS_DECLARE_EX_CMD(destroy_flow); 260IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
261IB_UVERBS_DECLARE_EX_CMD(query_device);
261 262
262#endif /* UVERBS_H */ 263#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index b7943ff16ed3..a9f048990dfc 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -400,6 +400,52 @@ err:
400 return ret; 400 return ret;
401} 401}
402 402
403static void copy_query_dev_fields(struct ib_uverbs_file *file,
404 struct ib_uverbs_query_device_resp *resp,
405 struct ib_device_attr *attr)
406{
407 resp->fw_ver = attr->fw_ver;
408 resp->node_guid = file->device->ib_dev->node_guid;
409 resp->sys_image_guid = attr->sys_image_guid;
410 resp->max_mr_size = attr->max_mr_size;
411 resp->page_size_cap = attr->page_size_cap;
412 resp->vendor_id = attr->vendor_id;
413 resp->vendor_part_id = attr->vendor_part_id;
414 resp->hw_ver = attr->hw_ver;
415 resp->max_qp = attr->max_qp;
416 resp->max_qp_wr = attr->max_qp_wr;
417 resp->device_cap_flags = attr->device_cap_flags;
418 resp->max_sge = attr->max_sge;
419 resp->max_sge_rd = attr->max_sge_rd;
420 resp->max_cq = attr->max_cq;
421 resp->max_cqe = attr->max_cqe;
422 resp->max_mr = attr->max_mr;
423 resp->max_pd = attr->max_pd;
424 resp->max_qp_rd_atom = attr->max_qp_rd_atom;
425 resp->max_ee_rd_atom = attr->max_ee_rd_atom;
426 resp->max_res_rd_atom = attr->max_res_rd_atom;
427 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
428 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
429 resp->atomic_cap = attr->atomic_cap;
430 resp->max_ee = attr->max_ee;
431 resp->max_rdd = attr->max_rdd;
432 resp->max_mw = attr->max_mw;
433 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
434 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
435 resp->max_mcast_grp = attr->max_mcast_grp;
436 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
437 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
438 resp->max_ah = attr->max_ah;
439 resp->max_fmr = attr->max_fmr;
440 resp->max_map_per_fmr = attr->max_map_per_fmr;
441 resp->max_srq = attr->max_srq;
442 resp->max_srq_wr = attr->max_srq_wr;
443 resp->max_srq_sge = attr->max_srq_sge;
444 resp->max_pkeys = attr->max_pkeys;
445 resp->local_ca_ack_delay = attr->local_ca_ack_delay;
446 resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt;
447}
448
403ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 449ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
404 const char __user *buf, 450 const char __user *buf,
405 int in_len, int out_len) 451 int in_len, int out_len)
@@ -420,47 +466,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
420 return ret; 466 return ret;
421 467
422 memset(&resp, 0, sizeof resp); 468 memset(&resp, 0, sizeof resp);
423 469 copy_query_dev_fields(file, &resp, &attr);
424 resp.fw_ver = attr.fw_ver;
425 resp.node_guid = file->device->ib_dev->node_guid;
426 resp.sys_image_guid = attr.sys_image_guid;
427 resp.max_mr_size = attr.max_mr_size;
428 resp.page_size_cap = attr.page_size_cap;
429 resp.vendor_id = attr.vendor_id;
430 resp.vendor_part_id = attr.vendor_part_id;
431 resp.hw_ver = attr.hw_ver;
432 resp.max_qp = attr.max_qp;
433 resp.max_qp_wr = attr.max_qp_wr;
434 resp.device_cap_flags = attr.device_cap_flags;
435 resp.max_sge = attr.max_sge;
436 resp.max_sge_rd = attr.max_sge_rd;
437 resp.max_cq = attr.max_cq;
438 resp.max_cqe = attr.max_cqe;
439 resp.max_mr = attr.max_mr;
440 resp.max_pd = attr.max_pd;
441 resp.max_qp_rd_atom = attr.max_qp_rd_atom;
442 resp.max_ee_rd_atom = attr.max_ee_rd_atom;
443 resp.max_res_rd_atom = attr.max_res_rd_atom;
444 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
445 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
446 resp.atomic_cap = attr.atomic_cap;
447 resp.max_ee = attr.max_ee;
448 resp.max_rdd = attr.max_rdd;
449 resp.max_mw = attr.max_mw;
450 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
451 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
452 resp.max_mcast_grp = attr.max_mcast_grp;
453 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
454 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
455 resp.max_ah = attr.max_ah;
456 resp.max_fmr = attr.max_fmr;
457 resp.max_map_per_fmr = attr.max_map_per_fmr;
458 resp.max_srq = attr.max_srq;
459 resp.max_srq_wr = attr.max_srq_wr;
460 resp.max_srq_sge = attr.max_srq_sge;
461 resp.max_pkeys = attr.max_pkeys;
462 resp.local_ca_ack_delay = attr.local_ca_ack_delay;
463 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
464 470
465 if (copy_to_user((void __user *) (unsigned long) cmd.response, 471 if (copy_to_user((void __user *) (unsigned long) cmd.response,
466 &resp, sizeof resp)) 472 &resp, sizeof resp))
@@ -2091,20 +2097,21 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
2091 if (qp->real_qp == qp) { 2097 if (qp->real_qp == qp) {
2092 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask); 2098 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
2093 if (ret) 2099 if (ret)
2094 goto out; 2100 goto release_qp;
2095 ret = qp->device->modify_qp(qp, attr, 2101 ret = qp->device->modify_qp(qp, attr,
2096 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); 2102 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
2097 } else { 2103 } else {
2098 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); 2104 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
2099 } 2105 }
2100 2106
2101 put_qp_read(qp);
2102
2103 if (ret) 2107 if (ret)
2104 goto out; 2108 goto release_qp;
2105 2109
2106 ret = in_len; 2110 ret = in_len;
2107 2111
2112release_qp:
2113 put_qp_read(qp);
2114
2108out: 2115out:
2109 kfree(attr); 2116 kfree(attr);
2110 2117
@@ -3287,3 +3294,64 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3287 3294
3288 return ret ? ret : in_len; 3295 return ret ? ret : in_len;
3289} 3296}
3297
3298int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3299 struct ib_udata *ucore,
3300 struct ib_udata *uhw)
3301{
3302 struct ib_uverbs_ex_query_device_resp resp;
3303 struct ib_uverbs_ex_query_device cmd;
3304 struct ib_device_attr attr;
3305 struct ib_device *device;
3306 int err;
3307
3308 device = file->device->ib_dev;
3309 if (ucore->inlen < sizeof(cmd))
3310 return -EINVAL;
3311
3312 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3313 if (err)
3314 return err;
3315
3316 if (cmd.comp_mask)
3317 return -EINVAL;
3318
3319 if (cmd.reserved)
3320 return -EINVAL;
3321
3322 resp.response_length = offsetof(typeof(resp), odp_caps);
3323
3324 if (ucore->outlen < resp.response_length)
3325 return -ENOSPC;
3326
3327 err = device->query_device(device, &attr);
3328 if (err)
3329 return err;
3330
3331 copy_query_dev_fields(file, &resp.base, &attr);
3332 resp.comp_mask = 0;
3333
3334 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
3335 goto end;
3336
3337#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3338 resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3339 resp.odp_caps.per_transport_caps.rc_odp_caps =
3340 attr.odp_caps.per_transport_caps.rc_odp_caps;
3341 resp.odp_caps.per_transport_caps.uc_odp_caps =
3342 attr.odp_caps.per_transport_caps.uc_odp_caps;
3343 resp.odp_caps.per_transport_caps.ud_odp_caps =
3344 attr.odp_caps.per_transport_caps.ud_odp_caps;
3345 resp.odp_caps.reserved = 0;
3346#else
3347 memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
3348#endif
3349 resp.response_length += sizeof(resp.odp_caps);
3350
3351end:
3352 err = ib_copy_to_udata(ucore, &resp, resp.response_length);
3353 if (err)
3354 return err;
3355
3356 return 0;
3357}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 5db1a8cc388d..259dcc7779f5 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -123,6 +123,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
123 struct ib_udata *uhw) = { 123 struct ib_udata *uhw) = {
124 [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow, 124 [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
125 [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow, 125 [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow,
126 [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device,
126}; 127};
127 128
128static void ib_uverbs_add_one(struct ib_device *device); 129static void ib_uverbs_add_one(struct ib_device *device);
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index 794555dc86a5..bdfac2ccb704 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -225,13 +225,20 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
225 struct c4iw_cq *chp; 225 struct c4iw_cq *chp;
226 unsigned long flag; 226 unsigned long flag;
227 227
228 spin_lock_irqsave(&dev->lock, flag);
228 chp = get_chp(dev, qid); 229 chp = get_chp(dev, qid);
229 if (chp) { 230 if (chp) {
231 atomic_inc(&chp->refcnt);
232 spin_unlock_irqrestore(&dev->lock, flag);
230 t4_clear_cq_armed(&chp->cq); 233 t4_clear_cq_armed(&chp->cq);
231 spin_lock_irqsave(&chp->comp_handler_lock, flag); 234 spin_lock_irqsave(&chp->comp_handler_lock, flag);
232 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); 235 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
233 spin_unlock_irqrestore(&chp->comp_handler_lock, flag); 236 spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
234 } else 237 if (atomic_dec_and_test(&chp->refcnt))
238 wake_up(&chp->wait);
239 } else {
235 PDBG("%s unknown cqid 0x%x\n", __func__, qid); 240 PDBG("%s unknown cqid 0x%x\n", __func__, qid);
241 spin_unlock_irqrestore(&dev->lock, flag);
242 }
236 return 0; 243 return 0;
237} 244}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index b5678ac97393..d87e1650f643 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -196,7 +196,7 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
196 return (int)(rdev->lldi.vr->stag.size >> 5); 196 return (int)(rdev->lldi.vr->stag.size >> 5);
197} 197}
198 198
199#define C4IW_WR_TO (30*HZ) 199#define C4IW_WR_TO (60*HZ)
200 200
201struct c4iw_wr_wait { 201struct c4iw_wr_wait {
202 struct completion completion; 202 struct completion completion;
@@ -220,22 +220,21 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
220 u32 hwtid, u32 qpid, 220 u32 hwtid, u32 qpid,
221 const char *func) 221 const char *func)
222{ 222{
223 unsigned to = C4IW_WR_TO;
224 int ret; 223 int ret;
225 224
226 do { 225 if (c4iw_fatal_error(rdev)) {
227 ret = wait_for_completion_timeout(&wr_waitp->completion, to); 226 wr_waitp->ret = -EIO;
228 if (!ret) { 227 goto out;
229 printk(KERN_ERR MOD "%s - Device %s not responding - " 228 }
230 "tid %u qpid %u\n", func, 229
231 pci_name(rdev->lldi.pdev), hwtid, qpid); 230 ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
232 if (c4iw_fatal_error(rdev)) { 231 if (!ret) {
233 wr_waitp->ret = -EIO; 232 PDBG("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
234 break; 233 func, pci_name(rdev->lldi.pdev), hwtid, qpid);
235 } 234 rdev->flags |= T4_FATAL_ERROR;
236 to = to << 2; 235 wr_waitp->ret = -EIO;
237 } 236 }
238 } while (!ret); 237out:
239 if (wr_waitp->ret) 238 if (wr_waitp->ret)
240 PDBG("%s: FW reply %d tid %u qpid %u\n", 239 PDBG("%s: FW reply %d tid %u qpid %u\n",
241 pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); 240 pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index 4977082e081f..33c45dfcbd88 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -277,7 +277,7 @@ static int remove_file(struct dentry *parent, char *name)
277 } 277 }
278 278
279 spin_lock(&tmp->d_lock); 279 spin_lock(&tmp->d_lock);
280 if (!(d_unhashed(tmp) && tmp->d_inode)) { 280 if (!d_unhashed(tmp) && tmp->d_inode) {
281 dget_dlock(tmp); 281 dget_dlock(tmp);
282 __d_drop(tmp); 282 __d_drop(tmp);
283 spin_unlock(&tmp->d_lock); 283 spin_unlock(&tmp->d_lock);
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index 6559af60bffd..e08db7020cd4 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -908,9 +908,6 @@ void ipath_chip_cleanup(struct ipath_devdata *);
908/* clean up any chip type-specific stuff */ 908/* clean up any chip type-specific stuff */
909void ipath_chip_done(void); 909void ipath_chip_done(void);
910 910
911/* check to see if we have to force ordering for write combining */
912int ipath_unordered_wc(void);
913
914void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first, 911void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
915 unsigned cnt); 912 unsigned cnt);
916void ipath_cancel_sends(struct ipath_devdata *, int); 913void ipath_cancel_sends(struct ipath_devdata *, int);
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
index 1d7bd82a1fb1..1a7e20a75149 100644
--- a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
+++ b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
@@ -47,16 +47,3 @@ int ipath_enable_wc(struct ipath_devdata *dd)
47{ 47{
48 return 0; 48 return 0;
49} 49}
50
51/**
52 * ipath_unordered_wc - indicate whether write combining is unordered
53 *
54 * Because our performance depends on our ability to do write
55 * combining mmio writes in the most efficient way, we need to
56 * know if we are on a processor that may reorder stores when
57 * write combining.
58 */
59int ipath_unordered_wc(void)
60{
61 return 1;
62}
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
index 3428acb0868c..4ad0b932df1f 100644
--- a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
+++ b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
@@ -167,18 +167,3 @@ void ipath_disable_wc(struct ipath_devdata *dd)
167 dd->ipath_wc_cookie = 0; /* even on failure */ 167 dd->ipath_wc_cookie = 0; /* even on failure */
168 } 168 }
169} 169}
170
171/**
172 * ipath_unordered_wc - indicate whether write combining is ordered
173 *
174 * Because our performance depends on our ability to do write combining mmio
175 * writes in the most efficient way, we need to know if we are on an Intel
176 * or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in
177 * the order completed, and so no special flushing is required to get
178 * correct ordering. Intel processors, however, will flush write buffers
179 * out in "random" orders, and so explicit ordering is needed at times.
180 */
181int ipath_unordered_wc(void)
182{
183 return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
184}
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 56a593e0ae5d..39a488889fc7 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -372,7 +372,7 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
372 *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id); 372 *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
373 if (*slave < 0) { 373 if (*slave < 0) {
374 mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n", 374 mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
375 gid.global.interface_id); 375 be64_to_cpu(gid.global.interface_id));
376 return -ENOENT; 376 return -ENOENT;
377 } 377 }
378 return 0; 378 return 0;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 543ecdd8667b..0176caa5792c 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -369,8 +369,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
369 int err; 369 int err;
370 370
371 mutex_lock(&cq->resize_mutex); 371 mutex_lock(&cq->resize_mutex);
372 372 if (entries < 1 || entries > dev->dev->caps.max_cqes) {
373 if (entries < 1) {
374 err = -EINVAL; 373 err = -EINVAL;
375 goto out; 374 goto out;
376 } 375 }
@@ -381,7 +380,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
381 goto out; 380 goto out;
382 } 381 }
383 382
384 if (entries > dev->dev->caps.max_cqes) { 383 if (entries > dev->dev->caps.max_cqes + 1) {
385 err = -EINVAL; 384 err = -EINVAL;
386 goto out; 385 goto out;
387 } 386 }
@@ -394,7 +393,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
394 /* Can't be smaller than the number of outstanding CQEs */ 393 /* Can't be smaller than the number of outstanding CQEs */
395 outst_cqe = mlx4_ib_get_outstanding_cqes(cq); 394 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
396 if (entries < outst_cqe + 1) { 395 if (entries < outst_cqe + 1) {
397 err = 0; 396 err = -EINVAL;
398 goto out; 397 goto out;
399 } 398 }
400 399
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index eb8e215f1613..ac6e2b710ea6 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1269,8 +1269,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1269 struct mlx4_dev *dev = mdev->dev; 1269 struct mlx4_dev *dev = mdev->dev;
1270 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 1270 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1271 struct mlx4_ib_steering *ib_steering = NULL; 1271 struct mlx4_ib_steering *ib_steering = NULL;
1272 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? 1272 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1273 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1274 struct mlx4_flow_reg_id reg_id; 1273 struct mlx4_flow_reg_id reg_id;
1275 1274
1276 if (mdev->dev->caps.steering_mode == 1275 if (mdev->dev->caps.steering_mode ==
@@ -1284,8 +1283,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1284 !!(mqp->flags & 1283 !!(mqp->flags &
1285 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 1284 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1286 prot, &reg_id.id); 1285 prot, &reg_id.id);
1287 if (err) 1286 if (err) {
1287 pr_err("multicast attach op failed, err %d\n", err);
1288 goto err_malloc; 1288 goto err_malloc;
1289 }
1289 1290
1290 reg_id.mirror = 0; 1291 reg_id.mirror = 0;
1291 if (mlx4_is_bonded(dev)) { 1292 if (mlx4_is_bonded(dev)) {
@@ -1348,9 +1349,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1348 struct net_device *ndev; 1349 struct net_device *ndev;
1349 struct mlx4_ib_gid_entry *ge; 1350 struct mlx4_ib_gid_entry *ge;
1350 struct mlx4_flow_reg_id reg_id = {0, 0}; 1351 struct mlx4_flow_reg_id reg_id = {0, 0};
1351 1352 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1352 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
1353 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1354 1353
1355 if (mdev->dev->caps.steering_mode == 1354 if (mdev->dev->caps.steering_mode ==
1356 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1355 MLX4_STEERING_MODE_DEVICE_MANAGED) {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index dfc6ca128a7e..ed2bd6701f9b 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1696,8 +1696,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1696 qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI || 1696 qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
1697 qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) { 1697 qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
1698 err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context); 1698 err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
1699 if (err) 1699 if (err) {
1700 return -EINVAL; 1700 err = -EINVAL;
1701 goto out;
1702 }
1701 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) 1703 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
1702 dev->qp1_proxy[qp->port - 1] = qp; 1704 dev->qp1_proxy[qp->port - 1] = qp;
1703 } 1705 }
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 03bf81211a54..cc4ac1e583b2 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -997,7 +997,7 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
997 struct ib_device_attr *dprops = NULL; 997 struct ib_device_attr *dprops = NULL;
998 struct ib_port_attr *pprops = NULL; 998 struct ib_port_attr *pprops = NULL;
999 struct mlx5_general_caps *gen; 999 struct mlx5_general_caps *gen;
1000 int err = 0; 1000 int err = -ENOMEM;
1001 int port; 1001 int port;
1002 1002
1003 gen = &dev->mdev->caps.gen; 1003 gen = &dev->mdev->caps.gen;
@@ -1331,6 +1331,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1331 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 1331 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
1332 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | 1332 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
1333 (1ull << IB_USER_VERBS_CMD_OPEN_QP); 1333 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
1334 dev->ib_dev.uverbs_ex_cmd_mask =
1335 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
1334 1336
1335 dev->ib_dev.query_device = mlx5_ib_query_device; 1337 dev->ib_dev.query_device = mlx5_ib_query_device;
1336 dev->ib_dev.query_port = mlx5_ib_query_port; 1338 dev->ib_dev.query_port = mlx5_ib_query_port;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 32a28bd50b20..cd9822eeacae 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1012,6 +1012,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
1012 goto err_2; 1012 goto err_2;
1013 } 1013 }
1014 mr->umem = umem; 1014 mr->umem = umem;
1015 mr->dev = dev;
1015 mr->live = 1; 1016 mr->live = 1;
1016 kvfree(in); 1017 kvfree(in);
1017 1018
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index b43456ae124b..c9780d919769 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -40,7 +40,7 @@
40#include <be_roce.h> 40#include <be_roce.h>
41#include "ocrdma_sli.h" 41#include "ocrdma_sli.h"
42 42
43#define OCRDMA_ROCE_DRV_VERSION "10.2.287.0u" 43#define OCRDMA_ROCE_DRV_VERSION "10.4.205.0u"
44 44
45#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver" 45#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
46#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" 46#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
@@ -55,12 +55,19 @@
55#define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) 55#define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
56 56
57#define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo) 57#define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo)
58#define EQ_INTR_PER_SEC_THRSH_HI 150000
59#define EQ_INTR_PER_SEC_THRSH_LOW 100000
60#define EQ_AIC_MAX_EQD 20
61#define EQ_AIC_MIN_EQD 0
62
63void ocrdma_eqd_set_task(struct work_struct *work);
58 64
59struct ocrdma_dev_attr { 65struct ocrdma_dev_attr {
60 u8 fw_ver[32]; 66 u8 fw_ver[32];
61 u32 vendor_id; 67 u32 vendor_id;
62 u32 device_id; 68 u32 device_id;
63 u16 max_pd; 69 u16 max_pd;
70 u16 max_dpp_pds;
64 u16 max_cq; 71 u16 max_cq;
65 u16 max_cqe; 72 u16 max_cqe;
66 u16 max_qp; 73 u16 max_qp;
@@ -116,12 +123,19 @@ struct ocrdma_queue_info {
116 bool created; 123 bool created;
117}; 124};
118 125
126struct ocrdma_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
127 u32 prev_eqd;
128 u64 eq_intr_cnt;
129 u64 prev_eq_intr_cnt;
130};
131
119struct ocrdma_eq { 132struct ocrdma_eq {
120 struct ocrdma_queue_info q; 133 struct ocrdma_queue_info q;
121 u32 vector; 134 u32 vector;
122 int cq_cnt; 135 int cq_cnt;
123 struct ocrdma_dev *dev; 136 struct ocrdma_dev *dev;
124 char irq_name[32]; 137 char irq_name[32];
138 struct ocrdma_aic_obj aic_obj;
125}; 139};
126 140
127struct ocrdma_mq { 141struct ocrdma_mq {
@@ -171,6 +185,21 @@ struct ocrdma_stats {
171 struct ocrdma_dev *dev; 185 struct ocrdma_dev *dev;
172}; 186};
173 187
188struct ocrdma_pd_resource_mgr {
189 u32 pd_norm_start;
190 u16 pd_norm_count;
191 u16 pd_norm_thrsh;
192 u16 max_normal_pd;
193 u32 pd_dpp_start;
194 u16 pd_dpp_count;
195 u16 pd_dpp_thrsh;
196 u16 max_dpp_pd;
197 u16 dpp_page_index;
198 unsigned long *pd_norm_bitmap;
199 unsigned long *pd_dpp_bitmap;
200 bool pd_prealloc_valid;
201};
202
174struct stats_mem { 203struct stats_mem {
175 struct ocrdma_mqe mqe; 204 struct ocrdma_mqe mqe;
176 void *va; 205 void *va;
@@ -198,6 +227,7 @@ struct ocrdma_dev {
198 227
199 struct ocrdma_eq *eq_tbl; 228 struct ocrdma_eq *eq_tbl;
200 int eq_cnt; 229 int eq_cnt;
230 struct delayed_work eqd_work;
201 u16 base_eqid; 231 u16 base_eqid;
202 u16 max_eq; 232 u16 max_eq;
203 233
@@ -255,7 +285,12 @@ struct ocrdma_dev {
255 struct ocrdma_stats rx_qp_err_stats; 285 struct ocrdma_stats rx_qp_err_stats;
256 struct ocrdma_stats tx_dbg_stats; 286 struct ocrdma_stats tx_dbg_stats;
257 struct ocrdma_stats rx_dbg_stats; 287 struct ocrdma_stats rx_dbg_stats;
288 struct ocrdma_stats driver_stats;
289 struct ocrdma_stats reset_stats;
258 struct dentry *dir; 290 struct dentry *dir;
291 atomic_t async_err_stats[OCRDMA_MAX_ASYNC_ERRORS];
292 atomic_t cqe_err_stats[OCRDMA_MAX_CQE_ERR];
293 struct ocrdma_pd_resource_mgr *pd_mgr;
259}; 294};
260 295
261struct ocrdma_cq { 296struct ocrdma_cq {
@@ -335,7 +370,6 @@ struct ocrdma_srq {
335 370
336struct ocrdma_qp { 371struct ocrdma_qp {
337 struct ib_qp ibqp; 372 struct ib_qp ibqp;
338 struct ocrdma_dev *dev;
339 373
340 u8 __iomem *sq_db; 374 u8 __iomem *sq_db;
341 struct ocrdma_qp_hwq_info sq; 375 struct ocrdma_qp_hwq_info sq;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index f3cc8c9e65ae..d812904f3984 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -29,19 +29,22 @@
29#include <net/netevent.h> 29#include <net/netevent.h>
30 30
31#include <rdma/ib_addr.h> 31#include <rdma/ib_addr.h>
32#include <rdma/ib_mad.h>
32 33
33#include "ocrdma.h" 34#include "ocrdma.h"
34#include "ocrdma_verbs.h" 35#include "ocrdma_verbs.h"
35#include "ocrdma_ah.h" 36#include "ocrdma_ah.h"
36#include "ocrdma_hw.h" 37#include "ocrdma_hw.h"
38#include "ocrdma_stats.h"
37 39
38#define OCRDMA_VID_PCP_SHIFT 0xD 40#define OCRDMA_VID_PCP_SHIFT 0xD
39 41
40static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, 42static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
41 struct ib_ah_attr *attr, union ib_gid *sgid, int pdid) 43 struct ib_ah_attr *attr, union ib_gid *sgid,
44 int pdid, bool *isvlan)
42{ 45{
43 int status = 0; 46 int status = 0;
44 u16 vlan_tag; bool vlan_enabled = false; 47 u16 vlan_tag;
45 struct ocrdma_eth_vlan eth; 48 struct ocrdma_eth_vlan eth;
46 struct ocrdma_grh grh; 49 struct ocrdma_grh grh;
47 int eth_sz; 50 int eth_sz;
@@ -59,7 +62,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
59 vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT; 62 vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
60 eth.vlan_tag = cpu_to_be16(vlan_tag); 63 eth.vlan_tag = cpu_to_be16(vlan_tag);
61 eth_sz = sizeof(struct ocrdma_eth_vlan); 64 eth_sz = sizeof(struct ocrdma_eth_vlan);
62 vlan_enabled = true; 65 *isvlan = true;
63 } else { 66 } else {
64 eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); 67 eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
65 eth_sz = sizeof(struct ocrdma_eth_basic); 68 eth_sz = sizeof(struct ocrdma_eth_basic);
@@ -82,7 +85,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
82 /* Eth HDR */ 85 /* Eth HDR */
83 memcpy(&ah->av->eth_hdr, &eth, eth_sz); 86 memcpy(&ah->av->eth_hdr, &eth, eth_sz);
84 memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh)); 87 memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
85 if (vlan_enabled) 88 if (*isvlan)
86 ah->av->valid |= OCRDMA_AV_VLAN_VALID; 89 ah->av->valid |= OCRDMA_AV_VLAN_VALID;
87 ah->av->valid = cpu_to_le32(ah->av->valid); 90 ah->av->valid = cpu_to_le32(ah->av->valid);
88 return status; 91 return status;
@@ -91,6 +94,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
91struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) 94struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
92{ 95{
93 u32 *ahid_addr; 96 u32 *ahid_addr;
97 bool isvlan = false;
94 int status; 98 int status;
95 struct ocrdma_ah *ah; 99 struct ocrdma_ah *ah;
96 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 100 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
@@ -127,15 +131,20 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
127 } 131 }
128 } 132 }
129 133
130 status = set_av_attr(dev, ah, attr, &sgid, pd->id); 134 status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan);
131 if (status) 135 if (status)
132 goto av_conf_err; 136 goto av_conf_err;
133 137
134 /* if pd is for the user process, pass the ah_id to user space */ 138 /* if pd is for the user process, pass the ah_id to user space */
135 if ((pd->uctx) && (pd->uctx->ah_tbl.va)) { 139 if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
136 ahid_addr = pd->uctx->ah_tbl.va + attr->dlid; 140 ahid_addr = pd->uctx->ah_tbl.va + attr->dlid;
137 *ahid_addr = ah->id; 141 *ahid_addr = 0;
142 *ahid_addr |= ah->id & OCRDMA_AH_ID_MASK;
143 if (isvlan)
144 *ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK <<
145 OCRDMA_AH_VLAN_VALID_SHIFT);
138 } 146 }
147
139 return &ah->ibah; 148 return &ah->ibah;
140 149
141av_conf_err: 150av_conf_err:
@@ -191,5 +200,20 @@ int ocrdma_process_mad(struct ib_device *ibdev,
191 struct ib_grh *in_grh, 200 struct ib_grh *in_grh,
192 struct ib_mad *in_mad, struct ib_mad *out_mad) 201 struct ib_mad *in_mad, struct ib_mad *out_mad)
193{ 202{
194 return IB_MAD_RESULT_SUCCESS; 203 int status;
204 struct ocrdma_dev *dev;
205
206 switch (in_mad->mad_hdr.mgmt_class) {
207 case IB_MGMT_CLASS_PERF_MGMT:
208 dev = get_ocrdma_dev(ibdev);
209 if (!ocrdma_pma_counters(dev, out_mad))
210 status = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
211 else
212 status = IB_MAD_RESULT_SUCCESS;
213 break;
214 default:
215 status = IB_MAD_RESULT_SUCCESS;
216 break;
217 }
218 return status;
195} 219}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 8ac49e7f96d1..726a87cf22dc 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -28,6 +28,12 @@
28#ifndef __OCRDMA_AH_H__ 28#ifndef __OCRDMA_AH_H__
29#define __OCRDMA_AH_H__ 29#define __OCRDMA_AH_H__
30 30
31enum {
32 OCRDMA_AH_ID_MASK = 0x3FF,
33 OCRDMA_AH_VLAN_VALID_MASK = 0x01,
34 OCRDMA_AH_VLAN_VALID_SHIFT = 0x1F
35};
36
31struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *); 37struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *);
32int ocrdma_destroy_ah(struct ib_ah *); 38int ocrdma_destroy_ah(struct ib_ah *);
33int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *); 39int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 638bff1ffc6c..0c9e95909a64 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -734,6 +734,9 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
734 break; 734 break;
735 } 735 }
736 736
737 if (type < OCRDMA_MAX_ASYNC_ERRORS)
738 atomic_inc(&dev->async_err_stats[type]);
739
737 if (qp_event) { 740 if (qp_event) {
738 if (qp->ibqp.event_handler) 741 if (qp->ibqp.event_handler)
739 qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context); 742 qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
@@ -831,20 +834,20 @@ static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
831 return 0; 834 return 0;
832} 835}
833 836
834static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, 837static struct ocrdma_cq *_ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
835 struct ocrdma_cq *cq) 838 struct ocrdma_cq *cq, bool sq)
836{ 839{
837 unsigned long flags;
838 struct ocrdma_qp *qp; 840 struct ocrdma_qp *qp;
839 bool buddy_cq_found = false; 841 struct list_head *cur;
840 /* Go through list of QPs in error state which are using this CQ 842 struct ocrdma_cq *bcq = NULL;
841 * and invoke its callback handler to trigger CQE processing for 843 struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head);
842 * error/flushed CQE. It is rare to find more than few entries in 844
843 * this list as most consumers stops after getting error CQE. 845 list_for_each(cur, head) {
844 * List is traversed only once when a matching buddy cq found for a QP. 846 if (sq)
845 */ 847 qp = list_entry(cur, struct ocrdma_qp, sq_entry);
846 spin_lock_irqsave(&dev->flush_q_lock, flags); 848 else
847 list_for_each_entry(qp, &cq->sq_head, sq_entry) { 849 qp = list_entry(cur, struct ocrdma_qp, rq_entry);
850
848 if (qp->srq) 851 if (qp->srq)
849 continue; 852 continue;
850 /* if wq and rq share the same cq, than comp_handler 853 /* if wq and rq share the same cq, than comp_handler
@@ -856,19 +859,41 @@ static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
856 * if completion came on rq, sq's cq is buddy cq. 859 * if completion came on rq, sq's cq is buddy cq.
857 */ 860 */
858 if (qp->sq_cq == cq) 861 if (qp->sq_cq == cq)
859 cq = qp->rq_cq; 862 bcq = qp->rq_cq;
860 else 863 else
861 cq = qp->sq_cq; 864 bcq = qp->sq_cq;
862 buddy_cq_found = true; 865 return bcq;
863 break;
864 } 866 }
867 return NULL;
868}
869
870static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
871 struct ocrdma_cq *cq)
872{
873 unsigned long flags;
874 struct ocrdma_cq *bcq = NULL;
875
876 /* Go through list of QPs in error state which are using this CQ
877 * and invoke its callback handler to trigger CQE processing for
878 * error/flushed CQE. It is rare to find more than few entries in
879 * this list as most consumers stops after getting error CQE.
880 * List is traversed only once when a matching buddy cq found for a QP.
881 */
882 spin_lock_irqsave(&dev->flush_q_lock, flags);
883 /* Check if buddy CQ is present.
884 * true - Check for SQ CQ
885 * false - Check for RQ CQ
886 */
887 bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, true);
888 if (bcq == NULL)
889 bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, false);
865 spin_unlock_irqrestore(&dev->flush_q_lock, flags); 890 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
866 if (buddy_cq_found == false) 891
867 return; 892 /* if there is valid buddy cq, look for its completion handler */
868 if (cq->ibcq.comp_handler) { 893 if (bcq && bcq->ibcq.comp_handler) {
869 spin_lock_irqsave(&cq->comp_handler_lock, flags); 894 spin_lock_irqsave(&bcq->comp_handler_lock, flags);
870 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); 895 (*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context);
871 spin_unlock_irqrestore(&cq->comp_handler_lock, flags); 896 spin_unlock_irqrestore(&bcq->comp_handler_lock, flags);
872 } 897 }
873} 898}
874 899
@@ -935,6 +960,7 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
935 960
936 } while (budget); 961 } while (budget);
937 962
963 eq->aic_obj.eq_intr_cnt++;
938 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0); 964 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
939 return IRQ_HANDLED; 965 return IRQ_HANDLED;
940} 966}
@@ -1050,6 +1076,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
1050 attr->max_pd = 1076 attr->max_pd =
1051 (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >> 1077 (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
1052 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT; 1078 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
1079 attr->max_dpp_pds =
1080 (rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >>
1081 OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET;
1053 attr->max_qp = 1082 attr->max_qp =
1054 (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >> 1083 (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
1055 OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT; 1084 OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
@@ -1396,6 +1425,122 @@ int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1396 return status; 1425 return status;
1397} 1426}
1398 1427
1428
1429static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
1430{
1431 int status = -ENOMEM;
1432 size_t pd_bitmap_size;
1433 struct ocrdma_alloc_pd_range *cmd;
1434 struct ocrdma_alloc_pd_range_rsp *rsp;
1435
1436 /* Pre allocate the DPP PDs */
1437 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
1438 if (!cmd)
1439 return -ENOMEM;
1440 cmd->pd_count = dev->attr.max_dpp_pds;
1441 cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
1442 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1443 if (status)
1444 goto mbx_err;
1445 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
1446
1447 if ((rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && rsp->pd_count) {
1448 dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
1449 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
1450 dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
1451 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1452 dev->pd_mgr->max_dpp_pd = rsp->pd_count;
1453 pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
1454 dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
1455 GFP_KERNEL);
1456 }
1457 kfree(cmd);
1458
1459 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
1460 if (!cmd)
1461 return -ENOMEM;
1462
1463 cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds;
1464 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1465 if (status)
1466 goto mbx_err;
1467 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
1468 if (rsp->pd_count) {
1469 dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
1470 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1471 dev->pd_mgr->max_normal_pd = rsp->pd_count;
1472 pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
1473 dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
1474 GFP_KERNEL);
1475 }
1476
1477 if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) {
1478 /* Enable PD resource manager */
1479 dev->pd_mgr->pd_prealloc_valid = true;
1480 } else {
1481 return -ENOMEM;
1482 }
1483mbx_err:
1484 kfree(cmd);
1485 return status;
1486}
1487
1488static void ocrdma_mbx_dealloc_pd_range(struct ocrdma_dev *dev)
1489{
1490 struct ocrdma_dealloc_pd_range *cmd;
1491
1492 /* return normal PDs to firmware */
1493 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, sizeof(*cmd));
1494 if (!cmd)
1495 goto mbx_err;
1496
1497 if (dev->pd_mgr->max_normal_pd) {
1498 cmd->start_pd_id = dev->pd_mgr->pd_norm_start;
1499 cmd->pd_count = dev->pd_mgr->max_normal_pd;
1500 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1501 }
1502
1503 if (dev->pd_mgr->max_dpp_pd) {
1504 kfree(cmd);
1505 /* return DPP PDs to firmware */
1506 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE,
1507 sizeof(*cmd));
1508 if (!cmd)
1509 goto mbx_err;
1510
1511 cmd->start_pd_id = dev->pd_mgr->pd_dpp_start;
1512 cmd->pd_count = dev->pd_mgr->max_dpp_pd;
1513 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1514 }
1515mbx_err:
1516 kfree(cmd);
1517}
1518
1519void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
1520{
1521 int status;
1522
1523 dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr),
1524 GFP_KERNEL);
1525 if (!dev->pd_mgr) {
1526 pr_err("%s(%d)Memory allocation failure.\n", __func__, dev->id);
1527 return;
1528 }
1529 status = ocrdma_mbx_alloc_pd_range(dev);
1530 if (status) {
1531 pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
1532 __func__, dev->id);
1533 }
1534}
1535
1536static void ocrdma_free_pd_pool(struct ocrdma_dev *dev)
1537{
1538 ocrdma_mbx_dealloc_pd_range(dev);
1539 kfree(dev->pd_mgr->pd_norm_bitmap);
1540 kfree(dev->pd_mgr->pd_dpp_bitmap);
1541 kfree(dev->pd_mgr);
1542}
1543
1399static int ocrdma_build_q_conf(u32 *num_entries, int entry_size, 1544static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
1400 int *num_pages, int *page_size) 1545 int *num_pages, int *page_size)
1401{ 1546{
@@ -1896,8 +2041,9 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
1896{ 2041{
1897 bool found; 2042 bool found;
1898 unsigned long flags; 2043 unsigned long flags;
2044 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
1899 2045
1900 spin_lock_irqsave(&qp->dev->flush_q_lock, flags); 2046 spin_lock_irqsave(&dev->flush_q_lock, flags);
1901 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); 2047 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1902 if (!found) 2048 if (!found)
1903 list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head); 2049 list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
@@ -1906,7 +2052,7 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
1906 if (!found) 2052 if (!found)
1907 list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head); 2053 list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
1908 } 2054 }
1909 spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags); 2055 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1910} 2056}
1911 2057
1912static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp) 2058static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
@@ -1972,7 +2118,8 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
1972 int status; 2118 int status;
1973 u32 len, hw_pages, hw_page_size; 2119 u32 len, hw_pages, hw_page_size;
1974 dma_addr_t pa; 2120 dma_addr_t pa;
1975 struct ocrdma_dev *dev = qp->dev; 2121 struct ocrdma_pd *pd = qp->pd;
2122 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
1976 struct pci_dev *pdev = dev->nic_info.pdev; 2123 struct pci_dev *pdev = dev->nic_info.pdev;
1977 u32 max_wqe_allocated; 2124 u32 max_wqe_allocated;
1978 u32 max_sges = attrs->cap.max_send_sge; 2125 u32 max_sges = attrs->cap.max_send_sge;
@@ -2027,7 +2174,8 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
2027 int status; 2174 int status;
2028 u32 len, hw_pages, hw_page_size; 2175 u32 len, hw_pages, hw_page_size;
2029 dma_addr_t pa = 0; 2176 dma_addr_t pa = 0;
2030 struct ocrdma_dev *dev = qp->dev; 2177 struct ocrdma_pd *pd = qp->pd;
2178 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2031 struct pci_dev *pdev = dev->nic_info.pdev; 2179 struct pci_dev *pdev = dev->nic_info.pdev;
2032 u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1; 2180 u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
2033 2181
@@ -2086,7 +2234,8 @@ static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
2086static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd, 2234static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
2087 struct ocrdma_qp *qp) 2235 struct ocrdma_qp *qp)
2088{ 2236{
2089 struct ocrdma_dev *dev = qp->dev; 2237 struct ocrdma_pd *pd = qp->pd;
2238 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2090 struct pci_dev *pdev = dev->nic_info.pdev; 2239 struct pci_dev *pdev = dev->nic_info.pdev;
2091 dma_addr_t pa = 0; 2240 dma_addr_t pa = 0;
2092 int ird_page_size = dev->attr.ird_page_size; 2241 int ird_page_size = dev->attr.ird_page_size;
@@ -2157,8 +2306,8 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
2157{ 2306{
2158 int status = -ENOMEM; 2307 int status = -ENOMEM;
2159 u32 flags = 0; 2308 u32 flags = 0;
2160 struct ocrdma_dev *dev = qp->dev;
2161 struct ocrdma_pd *pd = qp->pd; 2309 struct ocrdma_pd *pd = qp->pd;
2310 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2162 struct pci_dev *pdev = dev->nic_info.pdev; 2311 struct pci_dev *pdev = dev->nic_info.pdev;
2163 struct ocrdma_cq *cq; 2312 struct ocrdma_cq *cq;
2164 struct ocrdma_create_qp_req *cmd; 2313 struct ocrdma_create_qp_req *cmd;
@@ -2281,11 +2430,12 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2281 union ib_gid sgid, zgid; 2430 union ib_gid sgid, zgid;
2282 u32 vlan_id; 2431 u32 vlan_id;
2283 u8 mac_addr[6]; 2432 u8 mac_addr[6];
2433 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2284 2434
2285 if ((ah_attr->ah_flags & IB_AH_GRH) == 0) 2435 if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
2286 return -EINVAL; 2436 return -EINVAL;
2287 if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0)) 2437 if (atomic_cmpxchg(&dev->update_sl, 1, 0))
2288 ocrdma_init_service_level(qp->dev); 2438 ocrdma_init_service_level(dev);
2289 cmd->params.tclass_sq_psn |= 2439 cmd->params.tclass_sq_psn |=
2290 (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT); 2440 (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
2291 cmd->params.rnt_rc_sl_fl |= 2441 cmd->params.rnt_rc_sl_fl |=
@@ -2296,7 +2446,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2296 cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID; 2446 cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
2297 memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0], 2447 memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
2298 sizeof(cmd->params.dgid)); 2448 sizeof(cmd->params.dgid));
2299 status = ocrdma_query_gid(&qp->dev->ibdev, 1, 2449 status = ocrdma_query_gid(&dev->ibdev, 1,
2300 ah_attr->grh.sgid_index, &sgid); 2450 ah_attr->grh.sgid_index, &sgid);
2301 if (status) 2451 if (status)
2302 return status; 2452 return status;
@@ -2307,7 +2457,9 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2307 2457
2308 qp->sgid_idx = ah_attr->grh.sgid_index; 2458 qp->sgid_idx = ah_attr->grh.sgid_index;
2309 memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid)); 2459 memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
2310 ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]); 2460 status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
2461 if (status)
2462 return status;
2311 cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) | 2463 cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
2312 (mac_addr[2] << 16) | (mac_addr[3] << 24); 2464 (mac_addr[2] << 16) | (mac_addr[3] << 24);
2313 /* convert them to LE format. */ 2465 /* convert them to LE format. */
@@ -2320,7 +2472,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2320 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; 2472 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
2321 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; 2473 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
2322 cmd->params.rnt_rc_sl_fl |= 2474 cmd->params.rnt_rc_sl_fl |=
2323 (qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT; 2475 (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
2324 } 2476 }
2325 return 0; 2477 return 0;
2326} 2478}
@@ -2330,6 +2482,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2330 struct ib_qp_attr *attrs, int attr_mask) 2482 struct ib_qp_attr *attrs, int attr_mask)
2331{ 2483{
2332 int status = 0; 2484 int status = 0;
2485 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2333 2486
2334 if (attr_mask & IB_QP_PKEY_INDEX) { 2487 if (attr_mask & IB_QP_PKEY_INDEX) {
2335 cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index & 2488 cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
@@ -2347,12 +2500,12 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2347 return status; 2500 return status;
2348 } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) { 2501 } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
2349 /* set the default mac address for UD, GSI QPs */ 2502 /* set the default mac address for UD, GSI QPs */
2350 cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] | 2503 cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] |
2351 (qp->dev->nic_info.mac_addr[1] << 8) | 2504 (dev->nic_info.mac_addr[1] << 8) |
2352 (qp->dev->nic_info.mac_addr[2] << 16) | 2505 (dev->nic_info.mac_addr[2] << 16) |
2353 (qp->dev->nic_info.mac_addr[3] << 24); 2506 (dev->nic_info.mac_addr[3] << 24);
2354 cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] | 2507 cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] |
2355 (qp->dev->nic_info.mac_addr[5] << 8); 2508 (dev->nic_info.mac_addr[5] << 8);
2356 } 2509 }
2357 if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) && 2510 if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
2358 attrs->en_sqd_async_notify) { 2511 attrs->en_sqd_async_notify) {
@@ -2409,7 +2562,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2409 cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID; 2562 cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
2410 } 2563 }
2411 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 2564 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2412 if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) { 2565 if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) {
2413 status = -EINVAL; 2566 status = -EINVAL;
2414 goto pmtu_err; 2567 goto pmtu_err;
2415 } 2568 }
@@ -2417,7 +2570,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2417 cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID; 2570 cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
2418 } 2571 }
2419 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 2572 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2420 if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) { 2573 if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) {
2421 status = -EINVAL; 2574 status = -EINVAL;
2422 goto pmtu_err; 2575 goto pmtu_err;
2423 } 2576 }
@@ -2870,6 +3023,82 @@ done:
2870 return status; 3023 return status;
2871} 3024}
2872 3025
3026static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
3027 int num)
3028{
3029 int i, status = -ENOMEM;
3030 struct ocrdma_modify_eqd_req *cmd;
3031
3032 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY, sizeof(*cmd));
3033 if (!cmd)
3034 return status;
3035
3036 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY,
3037 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
3038
3039 cmd->cmd.num_eq = num;
3040 for (i = 0; i < num; i++) {
3041 cmd->cmd.set_eqd[i].eq_id = eq[i].q.id;
3042 cmd->cmd.set_eqd[i].phase = 0;
3043 cmd->cmd.set_eqd[i].delay_multiplier =
3044 (eq[i].aic_obj.prev_eqd * 65)/100;
3045 }
3046 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
3047 if (status)
3048 goto mbx_err;
3049mbx_err:
3050 kfree(cmd);
3051 return status;
3052}
3053
3054static int ocrdma_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
3055 int num)
3056{
3057 int num_eqs, i = 0;
3058 if (num > 8) {
3059 while (num) {
3060 num_eqs = min(num, 8);
3061 ocrdma_mbx_modify_eqd(dev, &eq[i], num_eqs);
3062 i += num_eqs;
3063 num -= num_eqs;
3064 }
3065 } else {
3066 ocrdma_mbx_modify_eqd(dev, eq, num);
3067 }
3068 return 0;
3069}
3070
3071void ocrdma_eqd_set_task(struct work_struct *work)
3072{
3073 struct ocrdma_dev *dev =
3074 container_of(work, struct ocrdma_dev, eqd_work.work);
3075 struct ocrdma_eq *eq = 0;
3076 int i, num = 0, status = -EINVAL;
3077 u64 eq_intr;
3078
3079 for (i = 0; i < dev->eq_cnt; i++) {
3080 eq = &dev->eq_tbl[i];
3081 if (eq->aic_obj.eq_intr_cnt > eq->aic_obj.prev_eq_intr_cnt) {
3082 eq_intr = eq->aic_obj.eq_intr_cnt -
3083 eq->aic_obj.prev_eq_intr_cnt;
3084 if ((eq_intr > EQ_INTR_PER_SEC_THRSH_HI) &&
3085 (eq->aic_obj.prev_eqd == EQ_AIC_MIN_EQD)) {
3086 eq->aic_obj.prev_eqd = EQ_AIC_MAX_EQD;
3087 num++;
3088 } else if ((eq_intr < EQ_INTR_PER_SEC_THRSH_LOW) &&
3089 (eq->aic_obj.prev_eqd == EQ_AIC_MAX_EQD)) {
3090 eq->aic_obj.prev_eqd = EQ_AIC_MIN_EQD;
3091 num++;
3092 }
3093 }
3094 eq->aic_obj.prev_eq_intr_cnt = eq->aic_obj.eq_intr_cnt;
3095 }
3096
3097 if (num)
3098 status = ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
3099 schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
3100}
3101
2873int ocrdma_init_hw(struct ocrdma_dev *dev) 3102int ocrdma_init_hw(struct ocrdma_dev *dev)
2874{ 3103{
2875 int status; 3104 int status;
@@ -2915,6 +3144,7 @@ qpeq_err:
2915 3144
2916void ocrdma_cleanup_hw(struct ocrdma_dev *dev) 3145void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
2917{ 3146{
3147 ocrdma_free_pd_pool(dev);
2918 ocrdma_mbx_delete_ah_tbl(dev); 3148 ocrdma_mbx_delete_ah_tbl(dev);
2919 3149
2920 /* cleanup the eqs */ 3150 /* cleanup the eqs */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index 6eed8f191322..e905972fceb7 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -136,5 +136,7 @@ int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq);
136int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset); 136int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset);
137char *port_speed_string(struct ocrdma_dev *dev); 137char *port_speed_string(struct ocrdma_dev *dev);
138void ocrdma_init_service_level(struct ocrdma_dev *); 138void ocrdma_init_service_level(struct ocrdma_dev *);
139void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev);
140void ocrdma_free_pd_range(struct ocrdma_dev *dev);
139 141
140#endif /* __OCRDMA_HW_H__ */ 142#endif /* __OCRDMA_HW_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index b0b2257b8e04..7a2b59aca004 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -239,7 +239,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
239 239
240 dev->ibdev.node_type = RDMA_NODE_IB_CA; 240 dev->ibdev.node_type = RDMA_NODE_IB_CA;
241 dev->ibdev.phys_port_cnt = 1; 241 dev->ibdev.phys_port_cnt = 1;
242 dev->ibdev.num_comp_vectors = 1; 242 dev->ibdev.num_comp_vectors = dev->eq_cnt;
243 243
244 /* mandatory verbs. */ 244 /* mandatory verbs. */
245 dev->ibdev.query_device = ocrdma_query_device; 245 dev->ibdev.query_device = ocrdma_query_device;
@@ -329,6 +329,8 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
329 if (dev->stag_arr == NULL) 329 if (dev->stag_arr == NULL)
330 goto alloc_err; 330 goto alloc_err;
331 331
332 ocrdma_alloc_pd_pool(dev);
333
332 spin_lock_init(&dev->av_tbl.lock); 334 spin_lock_init(&dev->av_tbl.lock);
333 spin_lock_init(&dev->flush_q_lock); 335 spin_lock_init(&dev->flush_q_lock);
334 return 0; 336 return 0;
@@ -491,6 +493,9 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
491 spin_unlock(&ocrdma_devlist_lock); 493 spin_unlock(&ocrdma_devlist_lock);
492 /* Init stats */ 494 /* Init stats */
493 ocrdma_add_port_stats(dev); 495 ocrdma_add_port_stats(dev);
496 /* Interrupt Moderation */
497 INIT_DELAYED_WORK(&dev->eqd_work, ocrdma_eqd_set_task);
498 schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
494 499
495 pr_info("%s %s: %s \"%s\" port %d\n", 500 pr_info("%s %s: %s \"%s\" port %d\n",
496 dev_name(&dev->nic_info.pdev->dev), hca_name(dev), 501 dev_name(&dev->nic_info.pdev->dev), hca_name(dev),
@@ -528,11 +533,12 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
528 /* first unregister with stack to stop all the active traffic 533 /* first unregister with stack to stop all the active traffic
529 * of the registered clients. 534 * of the registered clients.
530 */ 535 */
531 ocrdma_rem_port_stats(dev); 536 cancel_delayed_work_sync(&dev->eqd_work);
532 ocrdma_remove_sysfiles(dev); 537 ocrdma_remove_sysfiles(dev);
533
534 ib_unregister_device(&dev->ibdev); 538 ib_unregister_device(&dev->ibdev);
535 539
540 ocrdma_rem_port_stats(dev);
541
536 spin_lock(&ocrdma_devlist_lock); 542 spin_lock(&ocrdma_devlist_lock);
537 list_del_rcu(&dev->entry); 543 list_del_rcu(&dev->entry);
538 spin_unlock(&ocrdma_devlist_lock); 544 spin_unlock(&ocrdma_devlist_lock);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 4e036480c1a8..243c87c8bd65 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -75,6 +75,8 @@ enum {
75 OCRDMA_CMD_DESTROY_RBQ = 26, 75 OCRDMA_CMD_DESTROY_RBQ = 26,
76 76
77 OCRDMA_CMD_GET_RDMA_STATS = 27, 77 OCRDMA_CMD_GET_RDMA_STATS = 27,
78 OCRDMA_CMD_ALLOC_PD_RANGE = 28,
79 OCRDMA_CMD_DEALLOC_PD_RANGE = 29,
78 80
79 OCRDMA_CMD_MAX 81 OCRDMA_CMD_MAX
80}; 82};
@@ -87,6 +89,7 @@ enum {
87 OCRDMA_CMD_CREATE_MQ = 21, 89 OCRDMA_CMD_CREATE_MQ = 21,
88 OCRDMA_CMD_GET_CTRL_ATTRIBUTES = 32, 90 OCRDMA_CMD_GET_CTRL_ATTRIBUTES = 32,
89 OCRDMA_CMD_GET_FW_VER = 35, 91 OCRDMA_CMD_GET_FW_VER = 35,
92 OCRDMA_CMD_MODIFY_EQ_DELAY = 41,
90 OCRDMA_CMD_DELETE_MQ = 53, 93 OCRDMA_CMD_DELETE_MQ = 53,
91 OCRDMA_CMD_DELETE_CQ = 54, 94 OCRDMA_CMD_DELETE_CQ = 54,
92 OCRDMA_CMD_DELETE_EQ = 55, 95 OCRDMA_CMD_DELETE_EQ = 55,
@@ -101,7 +104,7 @@ enum {
101 QTYPE_MCCQ = 3 104 QTYPE_MCCQ = 3
102}; 105};
103 106
104#define OCRDMA_MAX_SGID 8 107#define OCRDMA_MAX_SGID 16
105 108
106#define OCRDMA_MAX_QP 2048 109#define OCRDMA_MAX_QP 2048
107#define OCRDMA_MAX_CQ 2048 110#define OCRDMA_MAX_CQ 2048
@@ -314,6 +317,29 @@ struct ocrdma_create_eq_rsp {
314 317
315#define OCRDMA_EQ_MINOR_OTHER 0x1 318#define OCRDMA_EQ_MINOR_OTHER 0x1
316 319
320struct ocrmda_set_eqd {
321 u32 eq_id;
322 u32 phase;
323 u32 delay_multiplier;
324};
325
326struct ocrdma_modify_eqd_cmd {
327 struct ocrdma_mbx_hdr req;
328 u32 num_eq;
329 struct ocrmda_set_eqd set_eqd[8];
330} __packed;
331
332struct ocrdma_modify_eqd_req {
333 struct ocrdma_mqe_hdr hdr;
334 struct ocrdma_modify_eqd_cmd cmd;
335};
336
337
338struct ocrdma_modify_eq_delay_rsp {
339 struct ocrdma_mbx_rsp hdr;
340 u32 rsvd0;
341} __packed;
342
317enum { 343enum {
318 OCRDMA_MCQE_STATUS_SHIFT = 0, 344 OCRDMA_MCQE_STATUS_SHIFT = 0,
319 OCRDMA_MCQE_STATUS_MASK = 0xFFFF, 345 OCRDMA_MCQE_STATUS_MASK = 0xFFFF,
@@ -441,7 +467,9 @@ enum OCRDMA_ASYNC_EVENT_TYPE {
441 OCRDMA_DEVICE_FATAL_EVENT = 0x08, 467 OCRDMA_DEVICE_FATAL_EVENT = 0x08,
442 OCRDMA_SRQCAT_ERROR = 0x0E, 468 OCRDMA_SRQCAT_ERROR = 0x0E,
443 OCRDMA_SRQ_LIMIT_EVENT = 0x0F, 469 OCRDMA_SRQ_LIMIT_EVENT = 0x0F,
444 OCRDMA_QP_LAST_WQE_EVENT = 0x10 470 OCRDMA_QP_LAST_WQE_EVENT = 0x10,
471
472 OCRDMA_MAX_ASYNC_ERRORS
445}; 473};
446 474
447/* mailbox command request and responses */ 475/* mailbox command request and responses */
@@ -1297,6 +1325,37 @@ struct ocrdma_dealloc_pd_rsp {
1297 struct ocrdma_mbx_rsp rsp; 1325 struct ocrdma_mbx_rsp rsp;
1298}; 1326};
1299 1327
1328struct ocrdma_alloc_pd_range {
1329 struct ocrdma_mqe_hdr hdr;
1330 struct ocrdma_mbx_hdr req;
1331 u32 enable_dpp_rsvd;
1332 u32 pd_count;
1333};
1334
1335struct ocrdma_alloc_pd_range_rsp {
1336 struct ocrdma_mqe_hdr hdr;
1337 struct ocrdma_mbx_rsp rsp;
1338 u32 dpp_page_pdid;
1339 u32 pd_count;
1340};
1341
1342enum {
1343 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK = 0xFFFF,
1344};
1345
1346struct ocrdma_dealloc_pd_range {
1347 struct ocrdma_mqe_hdr hdr;
1348 struct ocrdma_mbx_hdr req;
1349 u32 start_pd_id;
1350 u32 pd_count;
1351};
1352
1353struct ocrdma_dealloc_pd_range_rsp {
1354 struct ocrdma_mqe_hdr hdr;
1355 struct ocrdma_mbx_hdr req;
1356 u32 rsvd;
1357};
1358
1300enum { 1359enum {
1301 OCRDMA_ADDR_CHECK_ENABLE = 1, 1360 OCRDMA_ADDR_CHECK_ENABLE = 1,
1302 OCRDMA_ADDR_CHECK_DISABLE = 0 1361 OCRDMA_ADDR_CHECK_DISABLE = 0
@@ -1597,7 +1656,9 @@ enum OCRDMA_CQE_STATUS {
1597 OCRDMA_CQE_INV_EEC_STATE_ERR, 1656 OCRDMA_CQE_INV_EEC_STATE_ERR,
1598 OCRDMA_CQE_FATAL_ERR, 1657 OCRDMA_CQE_FATAL_ERR,
1599 OCRDMA_CQE_RESP_TIMEOUT_ERR, 1658 OCRDMA_CQE_RESP_TIMEOUT_ERR,
1600 OCRDMA_CQE_GENERAL_ERR 1659 OCRDMA_CQE_GENERAL_ERR,
1660
1661 OCRDMA_MAX_CQE_ERR
1601}; 1662};
1602 1663
1603enum { 1664enum {
@@ -1673,6 +1734,7 @@ enum {
1673 OCRDMA_FLAG_FENCE_R = 0x8, 1734 OCRDMA_FLAG_FENCE_R = 0x8,
1674 OCRDMA_FLAG_SOLICIT = 0x10, 1735 OCRDMA_FLAG_SOLICIT = 0x10,
1675 OCRDMA_FLAG_IMM = 0x20, 1736 OCRDMA_FLAG_IMM = 0x20,
1737 OCRDMA_FLAG_AH_VLAN_PR = 0x40,
1676 1738
1677 /* Stag flags */ 1739 /* Stag flags */
1678 OCRDMA_LKEY_FLAG_LOCAL_WR = 0x1, 1740 OCRDMA_LKEY_FLAG_LOCAL_WR = 0x1,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 41a9aec9998d..48d7ef51aa0c 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -26,6 +26,7 @@
26 *******************************************************************/ 26 *******************************************************************/
27 27
28#include <rdma/ib_addr.h> 28#include <rdma/ib_addr.h>
29#include <rdma/ib_pma.h>
29#include "ocrdma_stats.h" 30#include "ocrdma_stats.h"
30 31
31static struct dentry *ocrdma_dbgfs_dir; 32static struct dentry *ocrdma_dbgfs_dir;
@@ -249,6 +250,27 @@ static char *ocrdma_rx_stats(struct ocrdma_dev *dev)
249 return stats; 250 return stats;
250} 251}
251 252
253static u64 ocrdma_sysfs_rcv_pkts(struct ocrdma_dev *dev)
254{
255 struct ocrdma_rdma_stats_resp *rdma_stats =
256 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
257 struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
258
259 return convert_to_64bit(rx_stats->roce_frames_lo,
260 rx_stats->roce_frames_hi) + (u64)rx_stats->roce_frame_icrc_drops
261 + (u64)rx_stats->roce_frame_payload_len_drops;
262}
263
264static u64 ocrdma_sysfs_rcv_data(struct ocrdma_dev *dev)
265{
266 struct ocrdma_rdma_stats_resp *rdma_stats =
267 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
268 struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
269
270 return (convert_to_64bit(rx_stats->roce_frame_bytes_lo,
271 rx_stats->roce_frame_bytes_hi))/4;
272}
273
252static char *ocrdma_tx_stats(struct ocrdma_dev *dev) 274static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
253{ 275{
254 char *stats = dev->stats_mem.debugfs_mem, *pcur; 276 char *stats = dev->stats_mem.debugfs_mem, *pcur;
@@ -292,6 +314,37 @@ static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
292 return stats; 314 return stats;
293} 315}
294 316
317static u64 ocrdma_sysfs_xmit_pkts(struct ocrdma_dev *dev)
318{
319 struct ocrdma_rdma_stats_resp *rdma_stats =
320 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
321 struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
322
323 return (convert_to_64bit(tx_stats->send_pkts_lo,
324 tx_stats->send_pkts_hi) +
325 convert_to_64bit(tx_stats->write_pkts_lo, tx_stats->write_pkts_hi) +
326 convert_to_64bit(tx_stats->read_pkts_lo, tx_stats->read_pkts_hi) +
327 convert_to_64bit(tx_stats->read_rsp_pkts_lo,
328 tx_stats->read_rsp_pkts_hi) +
329 convert_to_64bit(tx_stats->ack_pkts_lo, tx_stats->ack_pkts_hi));
330}
331
332static u64 ocrdma_sysfs_xmit_data(struct ocrdma_dev *dev)
333{
334 struct ocrdma_rdma_stats_resp *rdma_stats =
335 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
336 struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
337
338 return (convert_to_64bit(tx_stats->send_bytes_lo,
339 tx_stats->send_bytes_hi) +
340 convert_to_64bit(tx_stats->write_bytes_lo,
341 tx_stats->write_bytes_hi) +
342 convert_to_64bit(tx_stats->read_req_bytes_lo,
343 tx_stats->read_req_bytes_hi) +
344 convert_to_64bit(tx_stats->read_rsp_bytes_lo,
345 tx_stats->read_rsp_bytes_hi))/4;
346}
347
295static char *ocrdma_wqe_stats(struct ocrdma_dev *dev) 348static char *ocrdma_wqe_stats(struct ocrdma_dev *dev)
296{ 349{
297 char *stats = dev->stats_mem.debugfs_mem, *pcur; 350 char *stats = dev->stats_mem.debugfs_mem, *pcur;
@@ -432,10 +485,118 @@ static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev)
432 return dev->stats_mem.debugfs_mem; 485 return dev->stats_mem.debugfs_mem;
433} 486}
434 487
488static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev)
489{
490 char *stats = dev->stats_mem.debugfs_mem, *pcur;
491
492
493 memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
494
495 pcur = stats;
496 pcur += ocrdma_add_stat(stats, pcur, "async_cq_err",
497 (u64)(dev->async_err_stats
498 [OCRDMA_CQ_ERROR].counter));
499 pcur += ocrdma_add_stat(stats, pcur, "async_cq_overrun_err",
500 (u64)dev->async_err_stats
501 [OCRDMA_CQ_OVERRUN_ERROR].counter);
502 pcur += ocrdma_add_stat(stats, pcur, "async_cq_qpcat_err",
503 (u64)dev->async_err_stats
504 [OCRDMA_CQ_QPCAT_ERROR].counter);
505 pcur += ocrdma_add_stat(stats, pcur, "async_qp_access_err",
506 (u64)dev->async_err_stats
507 [OCRDMA_QP_ACCESS_ERROR].counter);
508 pcur += ocrdma_add_stat(stats, pcur, "async_qp_commm_est_evt",
509 (u64)dev->async_err_stats
510 [OCRDMA_QP_COMM_EST_EVENT].counter);
511 pcur += ocrdma_add_stat(stats, pcur, "async_sq_drained_evt",
512 (u64)dev->async_err_stats
513 [OCRDMA_SQ_DRAINED_EVENT].counter);
514 pcur += ocrdma_add_stat(stats, pcur, "async_dev_fatal_evt",
515 (u64)dev->async_err_stats
516 [OCRDMA_DEVICE_FATAL_EVENT].counter);
517 pcur += ocrdma_add_stat(stats, pcur, "async_srqcat_err",
518 (u64)dev->async_err_stats
519 [OCRDMA_SRQCAT_ERROR].counter);
520 pcur += ocrdma_add_stat(stats, pcur, "async_srq_limit_evt",
521 (u64)dev->async_err_stats
522 [OCRDMA_SRQ_LIMIT_EVENT].counter);
523 pcur += ocrdma_add_stat(stats, pcur, "async_qp_last_wqe_evt",
524 (u64)dev->async_err_stats
525 [OCRDMA_QP_LAST_WQE_EVENT].counter);
526
527 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_len_err",
528 (u64)dev->cqe_err_stats
529 [OCRDMA_CQE_LOC_LEN_ERR].counter);
530 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_qp_op_err",
531 (u64)dev->cqe_err_stats
532 [OCRDMA_CQE_LOC_QP_OP_ERR].counter);
533 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_eec_op_err",
534 (u64)dev->cqe_err_stats
535 [OCRDMA_CQE_LOC_EEC_OP_ERR].counter);
536 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_prot_err",
537 (u64)dev->cqe_err_stats
538 [OCRDMA_CQE_LOC_PROT_ERR].counter);
539 pcur += ocrdma_add_stat(stats, pcur, "cqe_wr_flush_err",
540 (u64)dev->cqe_err_stats
541 [OCRDMA_CQE_WR_FLUSH_ERR].counter);
542 pcur += ocrdma_add_stat(stats, pcur, "cqe_mw_bind_err",
543 (u64)dev->cqe_err_stats
544 [OCRDMA_CQE_MW_BIND_ERR].counter);
545 pcur += ocrdma_add_stat(stats, pcur, "cqe_bad_resp_err",
546 (u64)dev->cqe_err_stats
547 [OCRDMA_CQE_BAD_RESP_ERR].counter);
548 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_access_err",
549 (u64)dev->cqe_err_stats
550 [OCRDMA_CQE_LOC_ACCESS_ERR].counter);
551 pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_req_err",
552 (u64)dev->cqe_err_stats
553 [OCRDMA_CQE_REM_INV_REQ_ERR].counter);
554 pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_access_err",
555 (u64)dev->cqe_err_stats
556 [OCRDMA_CQE_REM_ACCESS_ERR].counter);
557 pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_op_err",
558 (u64)dev->cqe_err_stats
559 [OCRDMA_CQE_REM_OP_ERR].counter);
560 pcur += ocrdma_add_stat(stats, pcur, "cqe_retry_exc_err",
561 (u64)dev->cqe_err_stats
562 [OCRDMA_CQE_RETRY_EXC_ERR].counter);
563 pcur += ocrdma_add_stat(stats, pcur, "cqe_rnr_retry_exc_err",
564 (u64)dev->cqe_err_stats
565 [OCRDMA_CQE_RNR_RETRY_EXC_ERR].counter);
566 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_rdd_viol_err",
567 (u64)dev->cqe_err_stats
568 [OCRDMA_CQE_LOC_RDD_VIOL_ERR].counter);
569 pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_rd_req_err",
570 (u64)dev->cqe_err_stats
571 [OCRDMA_CQE_REM_INV_RD_REQ_ERR].counter);
572 pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_abort_err",
573 (u64)dev->cqe_err_stats
574 [OCRDMA_CQE_REM_ABORT_ERR].counter);
575 pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eecn_err",
576 (u64)dev->cqe_err_stats
577 [OCRDMA_CQE_INV_EECN_ERR].counter);
578 pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eec_state_err",
579 (u64)dev->cqe_err_stats
580 [OCRDMA_CQE_INV_EEC_STATE_ERR].counter);
581 pcur += ocrdma_add_stat(stats, pcur, "cqe_fatal_err",
582 (u64)dev->cqe_err_stats
583 [OCRDMA_CQE_FATAL_ERR].counter);
584 pcur += ocrdma_add_stat(stats, pcur, "cqe_resp_timeout_err",
585 (u64)dev->cqe_err_stats
586 [OCRDMA_CQE_RESP_TIMEOUT_ERR].counter);
587 pcur += ocrdma_add_stat(stats, pcur, "cqe_general_err",
588 (u64)dev->cqe_err_stats
589 [OCRDMA_CQE_GENERAL_ERR].counter);
590 return stats;
591}
592
435static void ocrdma_update_stats(struct ocrdma_dev *dev) 593static void ocrdma_update_stats(struct ocrdma_dev *dev)
436{ 594{
437 ulong now = jiffies, secs; 595 ulong now = jiffies, secs;
438 int status = 0; 596 int status = 0;
597 struct ocrdma_rdma_stats_resp *rdma_stats =
598 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
599 struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
439 600
440 secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U; 601 secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U;
441 if (secs) { 602 if (secs) {
@@ -444,10 +605,74 @@ static void ocrdma_update_stats(struct ocrdma_dev *dev)
444 if (status) 605 if (status)
445 pr_err("%s: stats mbox failed with status = %d\n", 606 pr_err("%s: stats mbox failed with status = %d\n",
446 __func__, status); 607 __func__, status);
608 /* Update PD counters from PD resource manager */
609 if (dev->pd_mgr->pd_prealloc_valid) {
610 rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_count;
611 rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_count;
612 /* Threshold stata*/
613 rsrc_stats = &rdma_stats->th_rsrc_stats;
614 rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_thrsh;
615 rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_thrsh;
616 }
447 dev->last_stats_time = jiffies; 617 dev->last_stats_time = jiffies;
448 } 618 }
449} 619}
450 620
621static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
622 const char __user *buffer,
623 size_t count, loff_t *ppos)
624{
625 char tmp_str[32];
626 long reset;
627 int status = 0;
628 struct ocrdma_stats *pstats = filp->private_data;
629 struct ocrdma_dev *dev = pstats->dev;
630
631 if (count > 32)
632 goto err;
633
634 if (copy_from_user(tmp_str, buffer, count))
635 goto err;
636
637 tmp_str[count-1] = '\0';
638 if (kstrtol(tmp_str, 10, &reset))
639 goto err;
640
641 switch (pstats->type) {
642 case OCRDMA_RESET_STATS:
643 if (reset) {
644 status = ocrdma_mbx_rdma_stats(dev, true);
645 if (status) {
646 pr_err("Failed to reset stats = %d", status);
647 goto err;
648 }
649 }
650 break;
651 default:
652 goto err;
653 }
654
655 return count;
656err:
657 return -EFAULT;
658}
659
660int ocrdma_pma_counters(struct ocrdma_dev *dev,
661 struct ib_mad *out_mad)
662{
663 struct ib_pma_portcounters *pma_cnt;
664
665 memset(out_mad->data, 0, sizeof out_mad->data);
666 pma_cnt = (void *)(out_mad->data + 40);
667 ocrdma_update_stats(dev);
668
669 pma_cnt->port_xmit_data = cpu_to_be32(ocrdma_sysfs_xmit_data(dev));
670 pma_cnt->port_rcv_data = cpu_to_be32(ocrdma_sysfs_rcv_data(dev));
671 pma_cnt->port_xmit_packets = cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev));
672 pma_cnt->port_rcv_packets = cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev));
673 return 0;
674}
675
451static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer, 676static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
452 size_t usr_buf_len, loff_t *ppos) 677 size_t usr_buf_len, loff_t *ppos)
453{ 678{
@@ -492,6 +717,9 @@ static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
492 case OCRDMA_RX_DBG_STATS: 717 case OCRDMA_RX_DBG_STATS:
493 data = ocrdma_rx_dbg_stats(dev); 718 data = ocrdma_rx_dbg_stats(dev);
494 break; 719 break;
720 case OCRDMA_DRV_STATS:
721 data = ocrdma_driver_dbg_stats(dev);
722 break;
495 723
496 default: 724 default:
497 status = -EFAULT; 725 status = -EFAULT;
@@ -514,6 +742,7 @@ static const struct file_operations ocrdma_dbg_ops = {
514 .owner = THIS_MODULE, 742 .owner = THIS_MODULE,
515 .open = simple_open, 743 .open = simple_open,
516 .read = ocrdma_dbgfs_ops_read, 744 .read = ocrdma_dbgfs_ops_read,
745 .write = ocrdma_dbgfs_ops_write,
517}; 746};
518 747
519void ocrdma_add_port_stats(struct ocrdma_dev *dev) 748void ocrdma_add_port_stats(struct ocrdma_dev *dev)
@@ -582,6 +811,18 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
582 &dev->rx_dbg_stats, &ocrdma_dbg_ops)) 811 &dev->rx_dbg_stats, &ocrdma_dbg_ops))
583 goto err; 812 goto err;
584 813
814 dev->driver_stats.type = OCRDMA_DRV_STATS;
815 dev->driver_stats.dev = dev;
816 if (!debugfs_create_file("driver_dbg_stats", S_IRUSR, dev->dir,
817 &dev->driver_stats, &ocrdma_dbg_ops))
818 goto err;
819
820 dev->reset_stats.type = OCRDMA_RESET_STATS;
821 dev->reset_stats.dev = dev;
822 if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir,
823 &dev->reset_stats, &ocrdma_dbg_ops))
824 goto err;
825
585 /* Now create dma_mem for stats mbx command */ 826 /* Now create dma_mem for stats mbx command */
586 if (!ocrdma_alloc_stats_mem(dev)) 827 if (!ocrdma_alloc_stats_mem(dev))
587 goto err; 828 goto err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
index 5f5e20c46d7c..091edd68a8a3 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -43,12 +43,16 @@ enum OCRDMA_STATS_TYPE {
43 OCRDMA_RXQP_ERRSTATS, 43 OCRDMA_RXQP_ERRSTATS,
44 OCRDMA_TXQP_ERRSTATS, 44 OCRDMA_TXQP_ERRSTATS,
45 OCRDMA_TX_DBG_STATS, 45 OCRDMA_TX_DBG_STATS,
46 OCRDMA_RX_DBG_STATS 46 OCRDMA_RX_DBG_STATS,
47 OCRDMA_DRV_STATS,
48 OCRDMA_RESET_STATS
47}; 49};
48 50
49void ocrdma_rem_debugfs(void); 51void ocrdma_rem_debugfs(void);
50void ocrdma_init_debugfs(void); 52void ocrdma_init_debugfs(void);
51void ocrdma_rem_port_stats(struct ocrdma_dev *dev); 53void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
52void ocrdma_add_port_stats(struct ocrdma_dev *dev); 54void ocrdma_add_port_stats(struct ocrdma_dev *dev);
55int ocrdma_pma_counters(struct ocrdma_dev *dev,
56 struct ib_mad *out_mad);
53 57
54#endif /* __OCRDMA_STATS_H__ */ 58#endif /* __OCRDMA_STATS_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index fb8d8c4dfbb9..877175563634 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
53 53
54 dev = get_ocrdma_dev(ibdev); 54 dev = get_ocrdma_dev(ibdev);
55 memset(sgid, 0, sizeof(*sgid)); 55 memset(sgid, 0, sizeof(*sgid));
56 if (index > OCRDMA_MAX_SGID) 56 if (index >= OCRDMA_MAX_SGID)
57 return -EINVAL; 57 return -EINVAL;
58 58
59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); 59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
@@ -253,6 +253,107 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
253 return found; 253 return found;
254} 254}
255 255
256
257static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
258{
259 u16 pd_bitmap_idx = 0;
260 const unsigned long *pd_bitmap;
261
262 if (dpp_pool) {
263 pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
264 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
265 dev->pd_mgr->max_dpp_pd);
266 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
267 dev->pd_mgr->pd_dpp_count++;
268 if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
269 dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
270 } else {
271 pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
272 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
273 dev->pd_mgr->max_normal_pd);
274 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
275 dev->pd_mgr->pd_norm_count++;
276 if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
277 dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
278 }
279 return pd_bitmap_idx;
280}
281
282static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
283 bool dpp_pool)
284{
285 u16 pd_count;
286 u16 pd_bit_index;
287
288 pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
289 dev->pd_mgr->pd_norm_count;
290 if (pd_count == 0)
291 return -EINVAL;
292
293 if (dpp_pool) {
294 pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
295 if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
296 return -EINVAL;
297 } else {
298 __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
299 dev->pd_mgr->pd_dpp_count--;
300 }
301 } else {
302 pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
303 if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
304 return -EINVAL;
305 } else {
306 __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
307 dev->pd_mgr->pd_norm_count--;
308 }
309 }
310
311 return 0;
312}
313
314static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
315 bool dpp_pool)
316{
317 int status;
318
319 mutex_lock(&dev->dev_lock);
320 status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
321 mutex_unlock(&dev->dev_lock);
322 return status;
323}
324
325static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
326{
327 u16 pd_idx = 0;
328 int status = 0;
329
330 mutex_lock(&dev->dev_lock);
331 if (pd->dpp_enabled) {
332 /* try allocating DPP PD, if not available then normal PD */
333 if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
334 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
335 pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
336 pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
337 } else if (dev->pd_mgr->pd_norm_count <
338 dev->pd_mgr->max_normal_pd) {
339 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
340 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
341 pd->dpp_enabled = false;
342 } else {
343 status = -EINVAL;
344 }
345 } else {
346 if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
347 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
348 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
349 } else {
350 status = -EINVAL;
351 }
352 }
353 mutex_unlock(&dev->dev_lock);
354 return status;
355}
356
256static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, 357static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
257 struct ocrdma_ucontext *uctx, 358 struct ocrdma_ucontext *uctx,
258 struct ib_udata *udata) 359 struct ib_udata *udata)
@@ -272,6 +373,11 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
272 dev->attr.wqe_size) : 0; 373 dev->attr.wqe_size) : 0;
273 } 374 }
274 375
376 if (dev->pd_mgr->pd_prealloc_valid) {
377 status = ocrdma_get_pd_num(dev, pd);
378 return (status == 0) ? pd : ERR_PTR(status);
379 }
380
275retry: 381retry:
276 status = ocrdma_mbx_alloc_pd(dev, pd); 382 status = ocrdma_mbx_alloc_pd(dev, pd);
277 if (status) { 383 if (status) {
@@ -299,7 +405,11 @@ static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
299{ 405{
300 int status = 0; 406 int status = 0;
301 407
302 status = ocrdma_mbx_dealloc_pd(dev, pd); 408 if (dev->pd_mgr->pd_prealloc_valid)
409 status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
410 else
411 status = ocrdma_mbx_dealloc_pd(dev, pd);
412
303 kfree(pd); 413 kfree(pd);
304 return status; 414 return status;
305} 415}
@@ -325,7 +435,6 @@ err:
325 435
326static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) 436static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
327{ 437{
328 int status = 0;
329 struct ocrdma_pd *pd = uctx->cntxt_pd; 438 struct ocrdma_pd *pd = uctx->cntxt_pd;
330 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); 439 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
331 440
@@ -334,8 +443,8 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
334 __func__, dev->id, pd->id); 443 __func__, dev->id, pd->id);
335 } 444 }
336 uctx->cntxt_pd = NULL; 445 uctx->cntxt_pd = NULL;
337 status = _ocrdma_dealloc_pd(dev, pd); 446 (void)_ocrdma_dealloc_pd(dev, pd);
338 return status; 447 return 0;
339} 448}
340 449
341static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx) 450static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
@@ -569,7 +678,7 @@ err:
569 if (is_uctx_pd) { 678 if (is_uctx_pd) {
570 ocrdma_release_ucontext_pd(uctx); 679 ocrdma_release_ucontext_pd(uctx);
571 } else { 680 } else {
572 status = ocrdma_mbx_dealloc_pd(dev, pd); 681 status = _ocrdma_dealloc_pd(dev, pd);
573 kfree(pd); 682 kfree(pd);
574 } 683 }
575exit: 684exit:
@@ -837,9 +946,8 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
837{ 946{
838 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); 947 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
839 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device); 948 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
840 int status;
841 949
842 status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); 950 (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
843 951
844 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); 952 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
845 953
@@ -850,11 +958,10 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
850 958
851 /* Don't stop cleanup, in case FW is unresponsive */ 959 /* Don't stop cleanup, in case FW is unresponsive */
852 if (dev->mqe_ctx.fw_error_state) { 960 if (dev->mqe_ctx.fw_error_state) {
853 status = 0;
854 pr_err("%s(%d) fw not responding.\n", 961 pr_err("%s(%d) fw not responding.\n",
855 __func__, dev->id); 962 __func__, dev->id);
856 } 963 }
857 return status; 964 return 0;
858} 965}
859 966
860static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, 967static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
@@ -986,7 +1093,6 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq)
986 1093
987int ocrdma_destroy_cq(struct ib_cq *ibcq) 1094int ocrdma_destroy_cq(struct ib_cq *ibcq)
988{ 1095{
989 int status;
990 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 1096 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
991 struct ocrdma_eq *eq = NULL; 1097 struct ocrdma_eq *eq = NULL;
992 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); 1098 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
@@ -1003,7 +1109,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
1003 synchronize_irq(irq); 1109 synchronize_irq(irq);
1004 ocrdma_flush_cq(cq); 1110 ocrdma_flush_cq(cq);
1005 1111
1006 status = ocrdma_mbx_destroy_cq(dev, cq); 1112 (void)ocrdma_mbx_destroy_cq(dev, cq);
1007 if (cq->ucontext) { 1113 if (cq->ucontext) {
1008 pdid = cq->ucontext->cntxt_pd->id; 1114 pdid = cq->ucontext->cntxt_pd->id;
1009 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, 1115 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
@@ -1014,7 +1120,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
1014 } 1120 }
1015 1121
1016 kfree(cq); 1122 kfree(cq);
1017 return status; 1123 return 0;
1018} 1124}
1019 1125
1020static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) 1126static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
@@ -1113,8 +1219,8 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1113 int status = 0; 1219 int status = 0;
1114 u64 usr_db; 1220 u64 usr_db;
1115 struct ocrdma_create_qp_uresp uresp; 1221 struct ocrdma_create_qp_uresp uresp;
1116 struct ocrdma_dev *dev = qp->dev;
1117 struct ocrdma_pd *pd = qp->pd; 1222 struct ocrdma_pd *pd = qp->pd;
1223 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
1118 1224
1119 memset(&uresp, 0, sizeof(uresp)); 1225 memset(&uresp, 0, sizeof(uresp));
1120 usr_db = dev->nic_info.unmapped_db + 1226 usr_db = dev->nic_info.unmapped_db +
@@ -1253,7 +1359,6 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1253 status = -ENOMEM; 1359 status = -ENOMEM;
1254 goto gen_err; 1360 goto gen_err;
1255 } 1361 }
1256 qp->dev = dev;
1257 ocrdma_set_qp_init_params(qp, pd, attrs); 1362 ocrdma_set_qp_init_params(qp, pd, attrs);
1258 if (udata == NULL) 1363 if (udata == NULL)
1259 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 | 1364 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
@@ -1312,7 +1417,7 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1312 enum ib_qp_state old_qps; 1417 enum ib_qp_state old_qps;
1313 1418
1314 qp = get_ocrdma_qp(ibqp); 1419 qp = get_ocrdma_qp(ibqp);
1315 dev = qp->dev; 1420 dev = get_ocrdma_dev(ibqp->device);
1316 if (attr_mask & IB_QP_STATE) 1421 if (attr_mask & IB_QP_STATE)
1317 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps); 1422 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
1318 /* if new and previous states are same hw doesn't need to 1423 /* if new and previous states are same hw doesn't need to
@@ -1335,7 +1440,7 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1335 enum ib_qp_state old_qps, new_qps; 1440 enum ib_qp_state old_qps, new_qps;
1336 1441
1337 qp = get_ocrdma_qp(ibqp); 1442 qp = get_ocrdma_qp(ibqp);
1338 dev = qp->dev; 1443 dev = get_ocrdma_dev(ibqp->device);
1339 1444
1340 /* syncronize with multiple context trying to change, retrive qps */ 1445 /* syncronize with multiple context trying to change, retrive qps */
1341 mutex_lock(&dev->dev_lock); 1446 mutex_lock(&dev->dev_lock);
@@ -1402,7 +1507,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
1402 u32 qp_state; 1507 u32 qp_state;
1403 struct ocrdma_qp_params params; 1508 struct ocrdma_qp_params params;
1404 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); 1509 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1405 struct ocrdma_dev *dev = qp->dev; 1510 struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
1406 1511
1407 memset(&params, 0, sizeof(params)); 1512 memset(&params, 0, sizeof(params));
1408 mutex_lock(&dev->dev_lock); 1513 mutex_lock(&dev->dev_lock);
@@ -1412,8 +1517,6 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
1412 goto mbx_err; 1517 goto mbx_err;
1413 if (qp->qp_type == IB_QPT_UD) 1518 if (qp->qp_type == IB_QPT_UD)
1414 qp_attr->qkey = params.qkey; 1519 qp_attr->qkey = params.qkey;
1415 qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
1416 qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
1417 qp_attr->path_mtu = 1520 qp_attr->path_mtu =
1418 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx & 1521 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1419 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >> 1522 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
@@ -1468,6 +1571,8 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
1468 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); 1571 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1469 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >> 1572 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1470 OCRDMA_QP_PARAMS_STATE_SHIFT; 1573 OCRDMA_QP_PARAMS_STATE_SHIFT;
1574 qp_attr->qp_state = get_ibqp_state(qp_state);
1575 qp_attr->cur_qp_state = qp_attr->qp_state;
1471 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0; 1576 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1472 qp_attr->max_dest_rd_atomic = 1577 qp_attr->max_dest_rd_atomic =
1473 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT; 1578 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
@@ -1475,19 +1580,18 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
1475 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK; 1580 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1476 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags & 1581 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1477 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0; 1582 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1583 /* Sync driver QP state with FW */
1584 ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
1478mbx_err: 1585mbx_err:
1479 return status; 1586 return status;
1480} 1587}
1481 1588
1482static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx) 1589static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
1483{ 1590{
1484 int i = idx / 32; 1591 unsigned int i = idx / 32;
1485 unsigned int mask = (1 << (idx % 32)); 1592 u32 mask = (1U << (idx % 32));
1486 1593
1487 if (srq->idx_bit_fields[i] & mask) 1594 srq->idx_bit_fields[i] ^= mask;
1488 srq->idx_bit_fields[i] &= ~mask;
1489 else
1490 srq->idx_bit_fields[i] |= mask;
1491} 1595}
1492 1596
1493static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) 1597static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
@@ -1596,7 +1700,7 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1596{ 1700{
1597 int found = false; 1701 int found = false;
1598 unsigned long flags; 1702 unsigned long flags;
1599 struct ocrdma_dev *dev = qp->dev; 1703 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
1600 /* sync with any active CQ poll */ 1704 /* sync with any active CQ poll */
1601 1705
1602 spin_lock_irqsave(&dev->flush_q_lock, flags); 1706 spin_lock_irqsave(&dev->flush_q_lock, flags);
@@ -1613,7 +1717,6 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1613 1717
1614int ocrdma_destroy_qp(struct ib_qp *ibqp) 1718int ocrdma_destroy_qp(struct ib_qp *ibqp)
1615{ 1719{
1616 int status;
1617 struct ocrdma_pd *pd; 1720 struct ocrdma_pd *pd;
1618 struct ocrdma_qp *qp; 1721 struct ocrdma_qp *qp;
1619 struct ocrdma_dev *dev; 1722 struct ocrdma_dev *dev;
@@ -1622,7 +1725,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
1622 unsigned long flags; 1725 unsigned long flags;
1623 1726
1624 qp = get_ocrdma_qp(ibqp); 1727 qp = get_ocrdma_qp(ibqp);
1625 dev = qp->dev; 1728 dev = get_ocrdma_dev(ibqp->device);
1626 1729
1627 attrs.qp_state = IB_QPS_ERR; 1730 attrs.qp_state = IB_QPS_ERR;
1628 pd = qp->pd; 1731 pd = qp->pd;
@@ -1635,7 +1738,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
1635 * discarded until the old CQEs are discarded. 1738 * discarded until the old CQEs are discarded.
1636 */ 1739 */
1637 mutex_lock(&dev->dev_lock); 1740 mutex_lock(&dev->dev_lock);
1638 status = ocrdma_mbx_destroy_qp(dev, qp); 1741 (void) ocrdma_mbx_destroy_qp(dev, qp);
1639 1742
1640 /* 1743 /*
1641 * acquire CQ lock while destroy is in progress, in order to 1744 * acquire CQ lock while destroy is in progress, in order to
@@ -1670,7 +1773,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
1670 kfree(qp->wqe_wr_id_tbl); 1773 kfree(qp->wqe_wr_id_tbl);
1671 kfree(qp->rqe_wr_id_tbl); 1774 kfree(qp->rqe_wr_id_tbl);
1672 kfree(qp); 1775 kfree(qp);
1673 return status; 1776 return 0;
1674} 1777}
1675 1778
1676static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq, 1779static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
@@ -1831,6 +1934,8 @@ static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1831 else 1934 else
1832 ud_hdr->qkey = wr->wr.ud.remote_qkey; 1935 ud_hdr->qkey = wr->wr.ud.remote_qkey;
1833 ud_hdr->rsvd_ahid = ah->id; 1936 ud_hdr->rsvd_ahid = ah->id;
1937 if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
1938 hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
1834} 1939}
1835 1940
1836static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr, 1941static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
@@ -2007,11 +2112,12 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2007 u64 fbo; 2112 u64 fbo;
2008 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1); 2113 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
2009 struct ocrdma_mr *mr; 2114 struct ocrdma_mr *mr;
2115 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2010 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr); 2116 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
2011 2117
2012 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES); 2118 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
2013 2119
2014 if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr) 2120 if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr)
2015 return -EINVAL; 2121 return -EINVAL;
2016 2122
2017 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT); 2123 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
@@ -2039,7 +2145,7 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2039 fast_reg->size_sge = 2145 fast_reg->size_sge =
2040 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift); 2146 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
2041 mr = (struct ocrdma_mr *) (unsigned long) 2147 mr = (struct ocrdma_mr *) (unsigned long)
2042 qp->dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)]; 2148 dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
2043 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr); 2149 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
2044 return 0; 2150 return 0;
2045} 2151}
@@ -2112,8 +2218,6 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2112 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); 2218 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2113 status = ocrdma_build_write(qp, hdr, wr); 2219 status = ocrdma_build_write(qp, hdr, wr);
2114 break; 2220 break;
2115 case IB_WR_RDMA_READ_WITH_INV:
2116 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2117 case IB_WR_RDMA_READ: 2221 case IB_WR_RDMA_READ:
2118 ocrdma_build_read(qp, hdr, wr); 2222 ocrdma_build_read(qp, hdr, wr);
2119 break; 2223 break;
@@ -2484,8 +2588,11 @@ static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2484 bool *polled, bool *stop) 2588 bool *polled, bool *stop)
2485{ 2589{
2486 bool expand; 2590 bool expand;
2591 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2487 int status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2592 int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2488 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; 2593 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2594 if (status < OCRDMA_MAX_CQE_ERR)
2595 atomic_inc(&dev->cqe_err_stats[status]);
2489 2596
2490 /* when hw sq is empty, but rq is not empty, so we continue 2597 /* when hw sq is empty, but rq is not empty, so we continue
2491 * to keep the cqe in order to get the cq event again. 2598 * to keep the cqe in order to get the cq event again.
@@ -2604,6 +2711,10 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2604 int status) 2711 int status)
2605{ 2712{
2606 bool expand; 2713 bool expand;
2714 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2715
2716 if (status < OCRDMA_MAX_CQE_ERR)
2717 atomic_inc(&dev->cqe_err_stats[status]);
2607 2718
2608 /* when hw_rq is empty, but wq is not empty, so continue 2719 /* when hw_rq is empty, but wq is not empty, so continue
2609 * to keep the cqe to get the cq event again. 2720 * to keep the cqe to get the cq event again.
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index c00ae093b6f8..ffd48bfc4923 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1082,12 +1082,6 @@ struct qib_devdata {
1082 /* control high-level access to EEPROM */ 1082 /* control high-level access to EEPROM */
1083 struct mutex eep_lock; 1083 struct mutex eep_lock;
1084 uint64_t traffic_wds; 1084 uint64_t traffic_wds;
1085 /* active time is kept in seconds, but logged in hours */
1086 atomic_t active_time;
1087 /* Below are nominal shadow of EEPROM, new since last EEPROM update */
1088 uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
1089 uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
1090 uint16_t eep_hrs;
1091 /* 1085 /*
1092 * masks for which bits of errs, hwerrs that cause 1086 * masks for which bits of errs, hwerrs that cause
1093 * each of the counters to increment. 1087 * each of the counters to increment.
@@ -1309,8 +1303,7 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
1309int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr, 1303int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
1310 const void *buffer, int len); 1304 const void *buffer, int len);
1311void qib_get_eeprom_info(struct qib_devdata *); 1305void qib_get_eeprom_info(struct qib_devdata *);
1312int qib_update_eeprom_log(struct qib_devdata *dd); 1306#define qib_inc_eeprom_err(dd, eidx, incr)
1313void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
1314void qib_dump_lookup_output_queue(struct qib_devdata *); 1307void qib_dump_lookup_output_queue(struct qib_devdata *);
1315void qib_force_pio_avail_update(struct qib_devdata *); 1308void qib_force_pio_avail_update(struct qib_devdata *);
1316void qib_clear_symerror_on_linkup(unsigned long opaque); 1309void qib_clear_symerror_on_linkup(unsigned long opaque);
@@ -1467,11 +1460,14 @@ const char *qib_get_unit_name(int unit);
1467 * Flush write combining store buffers (if present) and perform a write 1460 * Flush write combining store buffers (if present) and perform a write
1468 * barrier. 1461 * barrier.
1469 */ 1462 */
1463static inline void qib_flush_wc(void)
1464{
1470#if defined(CONFIG_X86_64) 1465#if defined(CONFIG_X86_64)
1471#define qib_flush_wc() asm volatile("sfence" : : : "memory") 1466 asm volatile("sfence" : : : "memory");
1472#else 1467#else
1473#define qib_flush_wc() wmb() /* no reorder around wc flush */ 1468 wmb(); /* no reorder around wc flush */
1474#endif 1469#endif
1470}
1475 1471
1476/* global module parameter variables */ 1472/* global module parameter variables */
1477extern unsigned qib_ibmtu; 1473extern unsigned qib_ibmtu;
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
index 5670ace27c63..4fb78abd8ba1 100644
--- a/drivers/infiniband/hw/qib/qib_common.h
+++ b/drivers/infiniband/hw/qib/qib_common.h
@@ -257,7 +257,7 @@ struct qib_base_info {
257 257
258 /* shared memory page for send buffer disarm status */ 258 /* shared memory page for send buffer disarm status */
259 __u64 spi_sendbuf_status; 259 __u64 spi_sendbuf_status;
260} __attribute__ ((aligned(8))); 260} __aligned(8);
261 261
262/* 262/*
263 * This version number is given to the driver by the user code during 263 * This version number is given to the driver by the user code during
@@ -361,7 +361,7 @@ struct qib_user_info {
361 */ 361 */
362 __u64 spu_base_info; 362 __u64 spu_base_info;
363 363
364} __attribute__ ((aligned(8))); 364} __aligned(8);
365 365
366/* User commands. */ 366/* User commands. */
367 367
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c
index 6abd3ed3cd51..5e75b43c596b 100644
--- a/drivers/infiniband/hw/qib/qib_debugfs.c
+++ b/drivers/infiniband/hw/qib/qib_debugfs.c
@@ -255,7 +255,6 @@ void qib_dbg_ibdev_init(struct qib_ibdev *ibd)
255 DEBUGFS_FILE_CREATE(opcode_stats); 255 DEBUGFS_FILE_CREATE(opcode_stats);
256 DEBUGFS_FILE_CREATE(ctx_stats); 256 DEBUGFS_FILE_CREATE(ctx_stats);
257 DEBUGFS_FILE_CREATE(qp_stats); 257 DEBUGFS_FILE_CREATE(qp_stats);
258 return;
259} 258}
260 259
261void qib_dbg_ibdev_exit(struct qib_ibdev *ibd) 260void qib_dbg_ibdev_exit(struct qib_ibdev *ibd)
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
index 5dfda4c5cc9c..8c34b23e5bf6 100644
--- a/drivers/infiniband/hw/qib/qib_diag.c
+++ b/drivers/infiniband/hw/qib/qib_diag.c
@@ -85,7 +85,7 @@ static struct qib_diag_client *get_client(struct qib_devdata *dd)
85 client_pool = dc->next; 85 client_pool = dc->next;
86 else 86 else
87 /* None in pool, alloc and init */ 87 /* None in pool, alloc and init */
88 dc = kmalloc(sizeof *dc, GFP_KERNEL); 88 dc = kmalloc(sizeof(*dc), GFP_KERNEL);
89 89
90 if (dc) { 90 if (dc) {
91 dc->next = NULL; 91 dc->next = NULL;
@@ -257,6 +257,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
257 if (dd->userbase) { 257 if (dd->userbase) {
258 /* If user regs mapped, they are after send, so set limit. */ 258 /* If user regs mapped, they are after send, so set limit. */
259 u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase; 259 u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
260
260 if (!dd->piovl15base) 261 if (!dd->piovl15base)
261 snd_lim = dd->uregbase; 262 snd_lim = dd->uregbase;
262 krb32 = (u32 __iomem *)dd->userbase; 263 krb32 = (u32 __iomem *)dd->userbase;
@@ -280,6 +281,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
280 snd_bottom = dd->pio2k_bufbase; 281 snd_bottom = dd->pio2k_bufbase;
281 if (snd_lim == 0) { 282 if (snd_lim == 0) {
282 u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign); 283 u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
284
283 snd_lim = snd_bottom + tot2k; 285 snd_lim = snd_bottom + tot2k;
284 } 286 }
285 /* If 4k buffers exist, account for them by bumping 287 /* If 4k buffers exist, account for them by bumping
@@ -398,6 +400,7 @@ static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs,
398 /* not very efficient, but it works for now */ 400 /* not very efficient, but it works for now */
399 while (reg_addr < reg_end) { 401 while (reg_addr < reg_end) {
400 u64 data; 402 u64 data;
403
401 if (copy_from_user(&data, uaddr, sizeof(data))) { 404 if (copy_from_user(&data, uaddr, sizeof(data))) {
402 ret = -EFAULT; 405 ret = -EFAULT;
403 goto bail; 406 goto bail;
@@ -698,7 +701,7 @@ int qib_register_observer(struct qib_devdata *dd,
698 701
699 if (!dd || !op) 702 if (!dd || !op)
700 return -EINVAL; 703 return -EINVAL;
701 olp = vmalloc(sizeof *olp); 704 olp = vmalloc(sizeof(*olp));
702 if (!olp) { 705 if (!olp) {
703 pr_err("vmalloc for observer failed\n"); 706 pr_err("vmalloc for observer failed\n");
704 return -ENOMEM; 707 return -ENOMEM;
@@ -796,6 +799,7 @@ static ssize_t qib_diag_read(struct file *fp, char __user *data,
796 op = diag_get_observer(dd, *off); 799 op = diag_get_observer(dd, *off);
797 if (op) { 800 if (op) {
798 u32 offset = *off; 801 u32 offset = *off;
802
799 ret = op->hook(dd, op, offset, &data64, 0, use_32); 803 ret = op->hook(dd, op, offset, &data64, 0, use_32);
800 } 804 }
801 /* 805 /*
@@ -873,6 +877,7 @@ static ssize_t qib_diag_write(struct file *fp, const char __user *data,
873 if (count == 4 || count == 8) { 877 if (count == 4 || count == 8) {
874 u64 data64; 878 u64 data64;
875 u32 offset = *off; 879 u32 offset = *off;
880
876 ret = copy_from_user(&data64, data, count); 881 ret = copy_from_user(&data64, data, count);
877 if (ret) { 882 if (ret) {
878 ret = -EFAULT; 883 ret = -EFAULT;
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 5bee08f16d74..f58fdc3d25a2 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -86,7 +86,7 @@ const char *qib_get_unit_name(int unit)
86{ 86{
87 static char iname[16]; 87 static char iname[16];
88 88
89 snprintf(iname, sizeof iname, "infinipath%u", unit); 89 snprintf(iname, sizeof(iname), "infinipath%u", unit);
90 return iname; 90 return iname;
91} 91}
92 92
@@ -349,6 +349,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
349 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK; 349 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
350 if (qp_num != QIB_MULTICAST_QPN) { 350 if (qp_num != QIB_MULTICAST_QPN) {
351 int ruc_res; 351 int ruc_res;
352
352 qp = qib_lookup_qpn(ibp, qp_num); 353 qp = qib_lookup_qpn(ibp, qp_num);
353 if (!qp) 354 if (!qp)
354 goto drop; 355 goto drop;
@@ -461,6 +462,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
461 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; 462 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
462 if (dd->flags & QIB_NODMA_RTAIL) { 463 if (dd->flags & QIB_NODMA_RTAIL) {
463 u32 seq = qib_hdrget_seq(rhf_addr); 464 u32 seq = qib_hdrget_seq(rhf_addr);
465
464 if (seq != rcd->seq_cnt) 466 if (seq != rcd->seq_cnt)
465 goto bail; 467 goto bail;
466 hdrqtail = 0; 468 hdrqtail = 0;
@@ -651,6 +653,7 @@ bail:
651int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc) 653int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
652{ 654{
653 struct qib_devdata *dd = ppd->dd; 655 struct qib_devdata *dd = ppd->dd;
656
654 ppd->lid = lid; 657 ppd->lid = lid;
655 ppd->lmc = lmc; 658 ppd->lmc = lmc;
656 659
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
index 4d5d71aaa2b4..311ee6c3dd5e 100644
--- a/drivers/infiniband/hw/qib/qib_eeprom.c
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -153,6 +153,7 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
153 153
154 if (t && dd0->nguid > 1 && t <= dd0->nguid) { 154 if (t && dd0->nguid > 1 && t <= dd0->nguid) {
155 u8 oguid; 155 u8 oguid;
156
156 dd->base_guid = dd0->base_guid; 157 dd->base_guid = dd0->base_guid;
157 bguid = (u8 *) &dd->base_guid; 158 bguid = (u8 *) &dd->base_guid;
158 159
@@ -251,206 +252,25 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
251 * This board has a Serial-prefix, which is stored 252 * This board has a Serial-prefix, which is stored
252 * elsewhere for backward-compatibility. 253 * elsewhere for backward-compatibility.
253 */ 254 */
254 memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix); 255 memcpy(snp, ifp->if_sprefix, sizeof(ifp->if_sprefix));
255 snp[sizeof ifp->if_sprefix] = '\0'; 256 snp[sizeof(ifp->if_sprefix)] = '\0';
256 len = strlen(snp); 257 len = strlen(snp);
257 snp += len; 258 snp += len;
258 len = (sizeof dd->serial) - len; 259 len = sizeof(dd->serial) - len;
259 if (len > sizeof ifp->if_serial) 260 if (len > sizeof(ifp->if_serial))
260 len = sizeof ifp->if_serial; 261 len = sizeof(ifp->if_serial);
261 memcpy(snp, ifp->if_serial, len); 262 memcpy(snp, ifp->if_serial, len);
262 } else 263 } else {
263 memcpy(dd->serial, ifp->if_serial, 264 memcpy(dd->serial, ifp->if_serial, sizeof(ifp->if_serial));
264 sizeof ifp->if_serial); 265 }
265 if (!strstr(ifp->if_comment, "Tested successfully")) 266 if (!strstr(ifp->if_comment, "Tested successfully"))
266 qib_dev_err(dd, 267 qib_dev_err(dd,
267 "Board SN %s did not pass functional test: %s\n", 268 "Board SN %s did not pass functional test: %s\n",
268 dd->serial, ifp->if_comment); 269 dd->serial, ifp->if_comment);
269 270
270 memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
271 /*
272 * Power-on (actually "active") hours are kept as little-endian value
273 * in EEPROM, but as seconds in a (possibly as small as 24-bit)
274 * atomic_t while running.
275 */
276 atomic_set(&dd->active_time, 0);
277 dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
278
279done: 271done:
280 vfree(buf); 272 vfree(buf);
281 273
282bail:; 274bail:;
283} 275}
284 276
285/**
286 * qib_update_eeprom_log - copy active-time and error counters to eeprom
287 * @dd: the qlogic_ib device
288 *
289 * Although the time is kept as seconds in the qib_devdata struct, it is
290 * rounded to hours for re-write, as we have only 16 bits in EEPROM.
291 * First-cut code reads whole (expected) struct qib_flash, modifies,
292 * re-writes. Future direction: read/write only what we need, assuming
293 * that the EEPROM had to have been "good enough" for driver init, and
294 * if not, we aren't making it worse.
295 *
296 */
297int qib_update_eeprom_log(struct qib_devdata *dd)
298{
299 void *buf;
300 struct qib_flash *ifp;
301 int len, hi_water;
302 uint32_t new_time, new_hrs;
303 u8 csum;
304 int ret, idx;
305 unsigned long flags;
306
307 /* first, check if we actually need to do anything. */
308 ret = 0;
309 for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
310 if (dd->eep_st_new_errs[idx]) {
311 ret = 1;
312 break;
313 }
314 }
315 new_time = atomic_read(&dd->active_time);
316
317 if (ret == 0 && new_time < 3600)
318 goto bail;
319
320 /*
321 * The quick-check above determined that there is something worthy
322 * of logging, so get current contents and do a more detailed idea.
323 * read full flash, not just currently used part, since it may have
324 * been written with a newer definition
325 */
326 len = sizeof(struct qib_flash);
327 buf = vmalloc(len);
328 ret = 1;
329 if (!buf) {
330 qib_dev_err(dd,
331 "Couldn't allocate memory to read %u bytes from eeprom for logging\n",
332 len);
333 goto bail;
334 }
335
336 /* Grab semaphore and read current EEPROM. If we get an
337 * error, let go, but if not, keep it until we finish write.
338 */
339 ret = mutex_lock_interruptible(&dd->eep_lock);
340 if (ret) {
341 qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
342 goto free_bail;
343 }
344 ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
345 if (ret) {
346 mutex_unlock(&dd->eep_lock);
347 qib_dev_err(dd, "Unable read EEPROM for logging\n");
348 goto free_bail;
349 }
350 ifp = (struct qib_flash *)buf;
351
352 csum = flash_csum(ifp, 0);
353 if (csum != ifp->if_csum) {
354 mutex_unlock(&dd->eep_lock);
355 qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
356 csum, ifp->if_csum);
357 ret = 1;
358 goto free_bail;
359 }
360 hi_water = 0;
361 spin_lock_irqsave(&dd->eep_st_lock, flags);
362 for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
363 int new_val = dd->eep_st_new_errs[idx];
364 if (new_val) {
365 /*
366 * If we have seen any errors, add to EEPROM values
367 * We need to saturate at 0xFF (255) and we also
368 * would need to adjust the checksum if we were
369 * trying to minimize EEPROM traffic
370 * Note that we add to actual current count in EEPROM,
371 * in case it was altered while we were running.
372 */
373 new_val += ifp->if_errcntp[idx];
374 if (new_val > 0xFF)
375 new_val = 0xFF;
376 if (ifp->if_errcntp[idx] != new_val) {
377 ifp->if_errcntp[idx] = new_val;
378 hi_water = offsetof(struct qib_flash,
379 if_errcntp) + idx;
380 }
381 /*
382 * update our shadow (used to minimize EEPROM
383 * traffic), to match what we are about to write.
384 */
385 dd->eep_st_errs[idx] = new_val;
386 dd->eep_st_new_errs[idx] = 0;
387 }
388 }
389 /*
390 * Now update active-time. We would like to round to the nearest hour
391 * but unless atomic_t are sure to be proper signed ints we cannot,
392 * because we need to account for what we "transfer" to EEPROM and
393 * if we log an hour at 31 minutes, then we would need to set
394 * active_time to -29 to accurately count the _next_ hour.
395 */
396 if (new_time >= 3600) {
397 new_hrs = new_time / 3600;
398 atomic_sub((new_hrs * 3600), &dd->active_time);
399 new_hrs += dd->eep_hrs;
400 if (new_hrs > 0xFFFF)
401 new_hrs = 0xFFFF;
402 dd->eep_hrs = new_hrs;
403 if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
404 ifp->if_powerhour[0] = new_hrs & 0xFF;
405 hi_water = offsetof(struct qib_flash, if_powerhour);
406 }
407 if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
408 ifp->if_powerhour[1] = new_hrs >> 8;
409 hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
410 }
411 }
412 /*
413 * There is a tiny possibility that we could somehow fail to write
414 * the EEPROM after updating our shadows, but problems from holding
415 * the spinlock too long are a much bigger issue.
416 */
417 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
418 if (hi_water) {
419 /* we made some change to the data, uopdate cksum and write */
420 csum = flash_csum(ifp, 1);
421 ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
422 }
423 mutex_unlock(&dd->eep_lock);
424 if (ret)
425 qib_dev_err(dd, "Failed updating EEPROM\n");
426
427free_bail:
428 vfree(buf);
429bail:
430 return ret;
431}
432
433/**
434 * qib_inc_eeprom_err - increment one of the four error counters
435 * that are logged to EEPROM.
436 * @dd: the qlogic_ib device
437 * @eidx: 0..3, the counter to increment
438 * @incr: how much to add
439 *
440 * Each counter is 8-bits, and saturates at 255 (0xFF). They
441 * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
442 * is called, but it can only be called in a context that allows sleep.
443 * This function can be called even at interrupt level.
444 */
445void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
446{
447 uint new_val;
448 unsigned long flags;
449
450 spin_lock_irqsave(&dd->eep_st_lock, flags);
451 new_val = dd->eep_st_new_errs[eidx] + incr;
452 if (new_val > 255)
453 new_val = 255;
454 dd->eep_st_new_errs[eidx] = new_val;
455 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
456}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index b15e34eeef68..41937c6f888a 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -351,9 +351,10 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
351 * unless perhaps the user has mpin'ed the pages 351 * unless perhaps the user has mpin'ed the pages
352 * themselves. 352 * themselves.
353 */ 353 */
354 qib_devinfo(dd->pcidev, 354 qib_devinfo(
355 "Failed to lock addr %p, %u pages: " 355 dd->pcidev,
356 "errno %d\n", (void *) vaddr, cnt, -ret); 356 "Failed to lock addr %p, %u pages: errno %d\n",
357 (void *) vaddr, cnt, -ret);
357 goto done; 358 goto done;
358 } 359 }
359 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { 360 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
@@ -437,7 +438,7 @@ cleanup:
437 goto cleanup; 438 goto cleanup;
438 } 439 }
439 if (copy_to_user((void __user *) (unsigned long) ti->tidmap, 440 if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
440 tidmap, sizeof tidmap)) { 441 tidmap, sizeof(tidmap))) {
441 ret = -EFAULT; 442 ret = -EFAULT;
442 goto cleanup; 443 goto cleanup;
443 } 444 }
@@ -484,7 +485,7 @@ static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
484 } 485 }
485 486
486 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap, 487 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
487 sizeof tidmap)) { 488 sizeof(tidmap))) {
488 ret = -EFAULT; 489 ret = -EFAULT;
489 goto done; 490 goto done;
490 } 491 }
@@ -951,8 +952,8 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
951 /* rcvegrbufs are read-only on the slave */ 952 /* rcvegrbufs are read-only on the slave */
952 if (vma->vm_flags & VM_WRITE) { 953 if (vma->vm_flags & VM_WRITE) {
953 qib_devinfo(dd->pcidev, 954 qib_devinfo(dd->pcidev,
954 "Can't map eager buffers as " 955 "Can't map eager buffers as writable (flags=%lx)\n",
955 "writable (flags=%lx)\n", vma->vm_flags); 956 vma->vm_flags);
956 ret = -EPERM; 957 ret = -EPERM;
957 goto bail; 958 goto bail;
958 } 959 }
@@ -1185,6 +1186,7 @@ static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
1185 */ 1186 */
1186 if (weight >= qib_cpulist_count) { 1187 if (weight >= qib_cpulist_count) {
1187 int cpu; 1188 int cpu;
1189
1188 cpu = find_first_zero_bit(qib_cpulist, 1190 cpu = find_first_zero_bit(qib_cpulist,
1189 qib_cpulist_count); 1191 qib_cpulist_count);
1190 if (cpu == qib_cpulist_count) 1192 if (cpu == qib_cpulist_count)
@@ -1247,10 +1249,7 @@ static int init_subctxts(struct qib_devdata *dd,
1247 if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16, 1249 if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
1248 uinfo->spu_userversion & 0xffff)) { 1250 uinfo->spu_userversion & 0xffff)) {
1249 qib_devinfo(dd->pcidev, 1251 qib_devinfo(dd->pcidev,
1250 "Mismatched user version (%d.%d) and driver " 1252 "Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n",
1251 "version (%d.%d) while context sharing. Ensure "
1252 "that driver and library are from the same "
1253 "release.\n",
1254 (int) (uinfo->spu_userversion >> 16), 1253 (int) (uinfo->spu_userversion >> 16),
1255 (int) (uinfo->spu_userversion & 0xffff), 1254 (int) (uinfo->spu_userversion & 0xffff),
1256 QIB_USER_SWMAJOR, QIB_USER_SWMINOR); 1255 QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
@@ -1391,6 +1390,7 @@ static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
1391 } 1390 }
1392 if (!ppd) { 1391 if (!ppd) {
1393 u32 pidx = ctxt % dd->num_pports; 1392 u32 pidx = ctxt % dd->num_pports;
1393
1394 if (usable(dd->pport + pidx)) 1394 if (usable(dd->pport + pidx))
1395 ppd = dd->pport + pidx; 1395 ppd = dd->pport + pidx;
1396 else { 1396 else {
@@ -1438,10 +1438,12 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1438 1438
1439 if (alg == QIB_PORT_ALG_ACROSS) { 1439 if (alg == QIB_PORT_ALG_ACROSS) {
1440 unsigned inuse = ~0U; 1440 unsigned inuse = ~0U;
1441
1441 /* find device (with ACTIVE ports) with fewest ctxts in use */ 1442 /* find device (with ACTIVE ports) with fewest ctxts in use */
1442 for (ndev = 0; ndev < devmax; ndev++) { 1443 for (ndev = 0; ndev < devmax; ndev++) {
1443 struct qib_devdata *dd = qib_lookup(ndev); 1444 struct qib_devdata *dd = qib_lookup(ndev);
1444 unsigned cused = 0, cfree = 0, pusable = 0; 1445 unsigned cused = 0, cfree = 0, pusable = 0;
1446
1445 if (!dd) 1447 if (!dd)
1446 continue; 1448 continue;
1447 if (port && port <= dd->num_pports && 1449 if (port && port <= dd->num_pports &&
@@ -1471,6 +1473,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1471 } else { 1473 } else {
1472 for (ndev = 0; ndev < devmax; ndev++) { 1474 for (ndev = 0; ndev < devmax; ndev++) {
1473 struct qib_devdata *dd = qib_lookup(ndev); 1475 struct qib_devdata *dd = qib_lookup(ndev);
1476
1474 if (dd) { 1477 if (dd) {
1475 ret = choose_port_ctxt(fp, dd, port, uinfo); 1478 ret = choose_port_ctxt(fp, dd, port, uinfo);
1476 if (!ret) 1479 if (!ret)
@@ -1556,6 +1559,7 @@ static int find_hca(unsigned int cpu, int *unit)
1556 } 1559 }
1557 for (ndev = 0; ndev < devmax; ndev++) { 1560 for (ndev = 0; ndev < devmax; ndev++) {
1558 struct qib_devdata *dd = qib_lookup(ndev); 1561 struct qib_devdata *dd = qib_lookup(ndev);
1562
1559 if (dd) { 1563 if (dd) {
1560 if (pcibus_to_node(dd->pcidev->bus) < 0) { 1564 if (pcibus_to_node(dd->pcidev->bus) < 0) {
1561 ret = -EINVAL; 1565 ret = -EINVAL;
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index 81854586c081..650897a8591e 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -106,7 +106,7 @@ static ssize_t driver_stats_read(struct file *file, char __user *buf,
106{ 106{
107 qib_stats.sps_ints = qib_sps_ints(); 107 qib_stats.sps_ints = qib_sps_ints();
108 return simple_read_from_buffer(buf, count, ppos, &qib_stats, 108 return simple_read_from_buffer(buf, count, ppos, &qib_stats,
109 sizeof qib_stats); 109 sizeof(qib_stats));
110} 110}
111 111
112/* 112/*
@@ -133,7 +133,7 @@ static ssize_t driver_names_read(struct file *file, char __user *buf,
133 size_t count, loff_t *ppos) 133 size_t count, loff_t *ppos)
134{ 134{
135 return simple_read_from_buffer(buf, count, ppos, qib_statnames, 135 return simple_read_from_buffer(buf, count, ppos, qib_statnames,
136 sizeof qib_statnames - 1); /* no null */ 136 sizeof(qib_statnames) - 1); /* no null */
137} 137}
138 138
139static const struct file_operations driver_ops[] = { 139static const struct file_operations driver_ops[] = {
@@ -379,7 +379,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
379 int ret, i; 379 int ret, i;
380 380
381 /* create the per-unit directory */ 381 /* create the per-unit directory */
382 snprintf(unit, sizeof unit, "%u", dd->unit); 382 snprintf(unit, sizeof(unit), "%u", dd->unit);
383 ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir, 383 ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
384 &simple_dir_operations, dd); 384 &simple_dir_operations, dd);
385 if (ret) { 385 if (ret) {
@@ -455,7 +455,7 @@ static int remove_file(struct dentry *parent, char *name)
455 } 455 }
456 456
457 spin_lock(&tmp->d_lock); 457 spin_lock(&tmp->d_lock);
458 if (!(d_unhashed(tmp) && tmp->d_inode)) { 458 if (!d_unhashed(tmp) && tmp->d_inode) {
459 __d_drop(tmp); 459 __d_drop(tmp);
460 spin_unlock(&tmp->d_lock); 460 spin_unlock(&tmp->d_lock);
461 simple_unlink(parent->d_inode, tmp); 461 simple_unlink(parent->d_inode, tmp);
@@ -482,7 +482,7 @@ static int remove_device_files(struct super_block *sb,
482 482
483 root = dget(sb->s_root); 483 root = dget(sb->s_root);
484 mutex_lock(&root->d_inode->i_mutex); 484 mutex_lock(&root->d_inode->i_mutex);
485 snprintf(unit, sizeof unit, "%u", dd->unit); 485 snprintf(unit, sizeof(unit), "%u", dd->unit);
486 dir = lookup_one_len(unit, root, strlen(unit)); 486 dir = lookup_one_len(unit, root, strlen(unit));
487 487
488 if (IS_ERR(dir)) { 488 if (IS_ERR(dir)) {
@@ -560,6 +560,7 @@ static struct dentry *qibfs_mount(struct file_system_type *fs_type, int flags,
560 const char *dev_name, void *data) 560 const char *dev_name, void *data)
561{ 561{
562 struct dentry *ret; 562 struct dentry *ret;
563
563 ret = mount_single(fs_type, flags, data, qibfs_fill_super); 564 ret = mount_single(fs_type, flags, data, qibfs_fill_super);
564 if (!IS_ERR(ret)) 565 if (!IS_ERR(ret))
565 qib_super = ret->d_sb; 566 qib_super = ret->d_sb;
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index d68266ac7619..0d2ba59af30a 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -333,6 +333,7 @@ static inline void qib_write_ureg(const struct qib_devdata *dd,
333 enum qib_ureg regno, u64 value, int ctxt) 333 enum qib_ureg regno, u64 value, int ctxt)
334{ 334{
335 u64 __iomem *ubase; 335 u64 __iomem *ubase;
336
336 if (dd->userbase) 337 if (dd->userbase)
337 ubase = (u64 __iomem *) 338 ubase = (u64 __iomem *)
338 ((char __iomem *) dd->userbase + 339 ((char __iomem *) dd->userbase +
@@ -834,14 +835,14 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
834 bits = (u32) ((hwerrs >> 835 bits = (u32) ((hwerrs >>
835 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) & 836 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
836 QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK); 837 QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
837 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, 838 snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
838 "[PCIe Mem Parity Errs %x] ", bits); 839 "[PCIe Mem Parity Errs %x] ", bits);
839 strlcat(msg, bitsmsg, msgl); 840 strlcat(msg, bitsmsg, msgl);
840 } 841 }
841 842
842 if (hwerrs & _QIB_PLL_FAIL) { 843 if (hwerrs & _QIB_PLL_FAIL) {
843 isfatal = 1; 844 isfatal = 1;
844 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, 845 snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
845 "[PLL failed (%llx), InfiniPath hardware unusable]", 846 "[PLL failed (%llx), InfiniPath hardware unusable]",
846 (unsigned long long) hwerrs & _QIB_PLL_FAIL); 847 (unsigned long long) hwerrs & _QIB_PLL_FAIL);
847 strlcat(msg, bitsmsg, msgl); 848 strlcat(msg, bitsmsg, msgl);
@@ -1014,7 +1015,7 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
1014 1015
1015 /* do these first, they are most important */ 1016 /* do these first, they are most important */
1016 if (errs & ERR_MASK(HardwareErr)) 1017 if (errs & ERR_MASK(HardwareErr))
1017 qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); 1018 qib_handle_6120_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1018 else 1019 else
1019 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) 1020 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1020 if (errs & dd->eep_st_masks[log_idx].errs_to_log) 1021 if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1062,7 +1063,7 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
1062 */ 1063 */
1063 mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) | 1064 mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) |
1064 ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr); 1065 ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr);
1065 qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask); 1066 qib_decode_6120_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
1066 1067
1067 if (errs & E_SUM_PKTERRS) 1068 if (errs & E_SUM_PKTERRS)
1068 qib_stats.sps_rcverrs++; 1069 qib_stats.sps_rcverrs++;
@@ -1670,6 +1671,7 @@ static irqreturn_t qib_6120intr(int irq, void *data)
1670 } 1671 }
1671 if (crcs) { 1672 if (crcs) {
1672 u32 cntr = dd->cspec->lli_counter; 1673 u32 cntr = dd->cspec->lli_counter;
1674
1673 cntr += crcs; 1675 cntr += crcs;
1674 if (cntr) { 1676 if (cntr) {
1675 if (cntr > dd->cspec->lli_thresh) { 1677 if (cntr > dd->cspec->lli_thresh) {
@@ -1722,6 +1724,7 @@ static void qib_setup_6120_interrupt(struct qib_devdata *dd)
1722 "irq is 0, BIOS error? Interrupts won't work\n"); 1724 "irq is 0, BIOS error? Interrupts won't work\n");
1723 else { 1725 else {
1724 int ret; 1726 int ret;
1727
1725 ret = request_irq(dd->cspec->irq, qib_6120intr, 0, 1728 ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
1726 QIB_DRV_NAME, dd); 1729 QIB_DRV_NAME, dd);
1727 if (ret) 1730 if (ret)
@@ -2681,8 +2684,6 @@ static void qib_get_6120_faststats(unsigned long opaque)
2681 spin_lock_irqsave(&dd->eep_st_lock, flags); 2684 spin_lock_irqsave(&dd->eep_st_lock, flags);
2682 traffic_wds -= dd->traffic_wds; 2685 traffic_wds -= dd->traffic_wds;
2683 dd->traffic_wds += traffic_wds; 2686 dd->traffic_wds += traffic_wds;
2684 if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
2685 atomic_add(5, &dd->active_time); /* S/B #define */
2686 spin_unlock_irqrestore(&dd->eep_st_lock, flags); 2687 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
2687 2688
2688 qib_chk_6120_errormask(dd); 2689 qib_chk_6120_errormask(dd);
@@ -2929,6 +2930,7 @@ bail:
2929static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what) 2930static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
2930{ 2931{
2931 int ret = 0; 2932 int ret = 0;
2933
2932 if (!strncmp(what, "ibc", 3)) { 2934 if (!strncmp(what, "ibc", 3)) {
2933 ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback); 2935 ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
2934 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n", 2936 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
@@ -3170,6 +3172,7 @@ static void get_6120_chip_params(struct qib_devdata *dd)
3170static void set_6120_baseaddrs(struct qib_devdata *dd) 3172static void set_6120_baseaddrs(struct qib_devdata *dd)
3171{ 3173{
3172 u32 cregbase; 3174 u32 cregbase;
3175
3173 cregbase = qib_read_kreg32(dd, kr_counterregbase); 3176 cregbase = qib_read_kreg32(dd, kr_counterregbase);
3174 dd->cspec->cregbase = (u64 __iomem *) 3177 dd->cspec->cregbase = (u64 __iomem *)
3175 ((char __iomem *) dd->kregbase + cregbase); 3178 ((char __iomem *) dd->kregbase + cregbase);
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 7dec89fdc124..22affda8af88 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -902,7 +902,8 @@ static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs)
902 errs &= QLOGIC_IB_E_SDMAERRS; 902 errs &= QLOGIC_IB_E_SDMAERRS;
903 903
904 msg = dd->cspec->sdmamsgbuf; 904 msg = dd->cspec->sdmamsgbuf;
905 qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf); 905 qib_decode_7220_sdma_errs(ppd, errs, msg,
906 sizeof(dd->cspec->sdmamsgbuf));
906 spin_lock_irqsave(&ppd->sdma_lock, flags); 907 spin_lock_irqsave(&ppd->sdma_lock, flags);
907 908
908 if (errs & ERR_MASK(SendBufMisuseErr)) { 909 if (errs & ERR_MASK(SendBufMisuseErr)) {
@@ -1043,6 +1044,7 @@ done:
1043static void reenable_7220_chase(unsigned long opaque) 1044static void reenable_7220_chase(unsigned long opaque)
1044{ 1045{
1045 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; 1046 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
1047
1046 ppd->cpspec->chase_timer.expires = 0; 1048 ppd->cpspec->chase_timer.expires = 0;
1047 qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN, 1049 qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1048 QLOGIC_IB_IBCC_LINKINITCMD_POLL); 1050 QLOGIC_IB_IBCC_LINKINITCMD_POLL);
@@ -1101,7 +1103,7 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
1101 1103
1102 /* do these first, they are most important */ 1104 /* do these first, they are most important */
1103 if (errs & ERR_MASK(HardwareErr)) 1105 if (errs & ERR_MASK(HardwareErr))
1104 qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); 1106 qib_7220_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1105 else 1107 else
1106 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) 1108 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1107 if (errs & dd->eep_st_masks[log_idx].errs_to_log) 1109 if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1155,7 +1157,7 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
1155 ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | 1157 ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |
1156 ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr); 1158 ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);
1157 1159
1158 qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask); 1160 qib_decode_7220_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
1159 1161
1160 if (errs & E_SUM_PKTERRS) 1162 if (errs & E_SUM_PKTERRS)
1161 qib_stats.sps_rcverrs++; 1163 qib_stats.sps_rcverrs++;
@@ -1380,7 +1382,7 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
1380 bits = (u32) ((hwerrs >> 1382 bits = (u32) ((hwerrs >>
1381 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) & 1383 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
1382 QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK); 1384 QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
1383 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, 1385 snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
1384 "[PCIe Mem Parity Errs %x] ", bits); 1386 "[PCIe Mem Parity Errs %x] ", bits);
1385 strlcat(msg, bitsmsg, msgl); 1387 strlcat(msg, bitsmsg, msgl);
1386 } 1388 }
@@ -1390,7 +1392,7 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
1390 1392
1391 if (hwerrs & _QIB_PLL_FAIL) { 1393 if (hwerrs & _QIB_PLL_FAIL) {
1392 isfatal = 1; 1394 isfatal = 1;
1393 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, 1395 snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
1394 "[PLL failed (%llx), InfiniPath hardware unusable]", 1396 "[PLL failed (%llx), InfiniPath hardware unusable]",
1395 (unsigned long long) hwerrs & _QIB_PLL_FAIL); 1397 (unsigned long long) hwerrs & _QIB_PLL_FAIL);
1396 strlcat(msg, bitsmsg, msgl); 1398 strlcat(msg, bitsmsg, msgl);
@@ -3297,8 +3299,6 @@ static void qib_get_7220_faststats(unsigned long opaque)
3297 spin_lock_irqsave(&dd->eep_st_lock, flags); 3299 spin_lock_irqsave(&dd->eep_st_lock, flags);
3298 traffic_wds -= dd->traffic_wds; 3300 traffic_wds -= dd->traffic_wds;
3299 dd->traffic_wds += traffic_wds; 3301 dd->traffic_wds += traffic_wds;
3300 if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
3301 atomic_add(5, &dd->active_time); /* S/B #define */
3302 spin_unlock_irqrestore(&dd->eep_st_lock, flags); 3302 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
3303done: 3303done:
3304 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); 3304 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index a7eb32517a04..ef97b71c8f7d 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -117,7 +117,7 @@ MODULE_PARM_DESC(chase, "Enable state chase handling");
117 117
118static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */ 118static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
119module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO); 119module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
120MODULE_PARM_DESC(long_attenuation, \ 120MODULE_PARM_DESC(long_attenuation,
121 "attenuation cutoff (dB) for long copper cable setup"); 121 "attenuation cutoff (dB) for long copper cable setup");
122 122
123static ushort qib_singleport; 123static ushort qib_singleport;
@@ -153,11 +153,12 @@ static struct kparam_string kp_txselect = {
153static int setup_txselect(const char *, struct kernel_param *); 153static int setup_txselect(const char *, struct kernel_param *);
154module_param_call(txselect, setup_txselect, param_get_string, 154module_param_call(txselect, setup_txselect, param_get_string,
155 &kp_txselect, S_IWUSR | S_IRUGO); 155 &kp_txselect, S_IWUSR | S_IRUGO);
156MODULE_PARM_DESC(txselect, \ 156MODULE_PARM_DESC(txselect,
157 "Tx serdes indices (for no QSFP or invalid QSFP data)"); 157 "Tx serdes indices (for no QSFP or invalid QSFP data)");
158 158
159#define BOARD_QME7342 5 159#define BOARD_QME7342 5
160#define BOARD_QMH7342 6 160#define BOARD_QMH7342 6
161#define BOARD_QMH7360 9
161#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ 162#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
162 BOARD_QMH7342) 163 BOARD_QMH7342)
163#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ 164#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
@@ -817,6 +818,7 @@ static inline void qib_write_ureg(const struct qib_devdata *dd,
817 enum qib_ureg regno, u64 value, int ctxt) 818 enum qib_ureg regno, u64 value, int ctxt)
818{ 819{
819 u64 __iomem *ubase; 820 u64 __iomem *ubase;
821
820 if (dd->userbase) 822 if (dd->userbase)
821 ubase = (u64 __iomem *) 823 ubase = (u64 __iomem *)
822 ((char __iomem *) dd->userbase + 824 ((char __iomem *) dd->userbase +
@@ -1677,7 +1679,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
1677 /* do these first, they are most important */ 1679 /* do these first, they are most important */
1678 if (errs & QIB_E_HARDWARE) { 1680 if (errs & QIB_E_HARDWARE) {
1679 *msg = '\0'; 1681 *msg = '\0';
1680 qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); 1682 qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1681 } else 1683 } else
1682 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) 1684 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1683 if (errs & dd->eep_st_masks[log_idx].errs_to_log) 1685 if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1702,7 +1704,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
1702 mask = QIB_E_HARDWARE; 1704 mask = QIB_E_HARDWARE;
1703 *msg = '\0'; 1705 *msg = '\0';
1704 1706
1705 err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask, 1707 err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
1706 qib_7322error_msgs); 1708 qib_7322error_msgs);
1707 1709
1708 /* 1710 /*
@@ -1889,10 +1891,10 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1889 *msg = '\0'; 1891 *msg = '\0';
1890 1892
1891 if (errs & ~QIB_E_P_BITSEXTANT) { 1893 if (errs & ~QIB_E_P_BITSEXTANT) {
1892 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, 1894 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1893 errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs); 1895 errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1894 if (!*msg) 1896 if (!*msg)
1895 snprintf(msg, sizeof ppd->cpspec->epmsgbuf, 1897 snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
1896 "no others"); 1898 "no others");
1897 qib_dev_porterr(dd, ppd->port, 1899 qib_dev_porterr(dd, ppd->port,
1898 "error interrupt with unknown errors 0x%016Lx set (and %s)\n", 1900 "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
@@ -1906,7 +1908,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1906 /* determine cause, then write to clear */ 1908 /* determine cause, then write to clear */
1907 symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom); 1909 symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1908 qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0); 1910 qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1909 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom, 1911 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
1910 hdrchk_msgs); 1912 hdrchk_msgs);
1911 *msg = '\0'; 1913 *msg = '\0';
1912 /* senderrbuf cleared in SPKTERRS below */ 1914 /* senderrbuf cleared in SPKTERRS below */
@@ -1922,7 +1924,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1922 * isn't valid. We don't want to confuse people, so 1924 * isn't valid. We don't want to confuse people, so
1923 * we just don't print them, except at debug 1925 * we just don't print them, except at debug
1924 */ 1926 */
1925 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, 1927 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1926 (errs & QIB_E_P_LINK_PKTERRS), 1928 (errs & QIB_E_P_LINK_PKTERRS),
1927 qib_7322p_error_msgs); 1929 qib_7322p_error_msgs);
1928 *msg = '\0'; 1930 *msg = '\0';
@@ -1938,7 +1940,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1938 * valid. We don't want to confuse people, so we just 1940 * valid. We don't want to confuse people, so we just
1939 * don't print them, except at debug 1941 * don't print them, except at debug
1940 */ 1942 */
1941 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs, 1943 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
1942 qib_7322p_error_msgs); 1944 qib_7322p_error_msgs);
1943 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS; 1945 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1944 *msg = '\0'; 1946 *msg = '\0';
@@ -2031,6 +2033,7 @@ static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
2031 if (dd->cspec->num_msix_entries) { 2033 if (dd->cspec->num_msix_entries) {
2032 /* and same for MSIx */ 2034 /* and same for MSIx */
2033 u64 val = qib_read_kreg64(dd, kr_intgranted); 2035 u64 val = qib_read_kreg64(dd, kr_intgranted);
2036
2034 if (val) 2037 if (val)
2035 qib_write_kreg(dd, kr_intgranted, val); 2038 qib_write_kreg(dd, kr_intgranted, val);
2036 } 2039 }
@@ -2176,6 +2179,7 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2176 int err; 2179 int err;
2177 unsigned long flags; 2180 unsigned long flags;
2178 struct qib_pportdata *ppd = dd->pport; 2181 struct qib_pportdata *ppd = dd->pport;
2182
2179 for (; pidx < dd->num_pports; ++pidx, ppd++) { 2183 for (; pidx < dd->num_pports; ++pidx, ppd++) {
2180 err = 0; 2184 err = 0;
2181 if (pidx == 0 && (hwerrs & 2185 if (pidx == 0 && (hwerrs &
@@ -2801,9 +2805,11 @@ static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
2801 2805
2802 if (n->rcv) { 2806 if (n->rcv) {
2803 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; 2807 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2808
2804 qib_update_rhdrq_dca(rcd, cpu); 2809 qib_update_rhdrq_dca(rcd, cpu);
2805 } else { 2810 } else {
2806 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; 2811 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2812
2807 qib_update_sdma_dca(ppd, cpu); 2813 qib_update_sdma_dca(ppd, cpu);
2808 } 2814 }
2809} 2815}
@@ -2816,9 +2822,11 @@ static void qib_irq_notifier_release(struct kref *ref)
2816 2822
2817 if (n->rcv) { 2823 if (n->rcv) {
2818 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; 2824 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2825
2819 dd = rcd->dd; 2826 dd = rcd->dd;
2820 } else { 2827 } else {
2821 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; 2828 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2829
2822 dd = ppd->dd; 2830 dd = ppd->dd;
2823 } 2831 }
2824 qib_devinfo(dd->pcidev, 2832 qib_devinfo(dd->pcidev,
@@ -2994,6 +3002,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2994 struct qib_pportdata *ppd; 3002 struct qib_pportdata *ppd;
2995 struct qib_qsfp_data *qd; 3003 struct qib_qsfp_data *qd;
2996 u32 mask; 3004 u32 mask;
3005
2997 if (!dd->pport[pidx].link_speed_supported) 3006 if (!dd->pport[pidx].link_speed_supported)
2998 continue; 3007 continue;
2999 mask = QSFP_GPIO_MOD_PRS_N; 3008 mask = QSFP_GPIO_MOD_PRS_N;
@@ -3001,6 +3010,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
3001 mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx); 3010 mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
3002 if (gpiostatus & dd->cspec->gpio_mask & mask) { 3011 if (gpiostatus & dd->cspec->gpio_mask & mask) {
3003 u64 pins; 3012 u64 pins;
3013
3004 qd = &ppd->cpspec->qsfp_data; 3014 qd = &ppd->cpspec->qsfp_data;
3005 gpiostatus &= ~mask; 3015 gpiostatus &= ~mask;
3006 pins = qib_read_kreg64(dd, kr_extstatus); 3016 pins = qib_read_kreg64(dd, kr_extstatus);
@@ -3442,7 +3452,7 @@ try_intx:
3442 } 3452 }
3443 3453
3444 /* Try to get MSIx interrupts */ 3454 /* Try to get MSIx interrupts */
3445 memset(redirect, 0, sizeof redirect); 3455 memset(redirect, 0, sizeof(redirect));
3446 mask = ~0ULL; 3456 mask = ~0ULL;
3447 msixnum = 0; 3457 msixnum = 0;
3448 local_mask = cpumask_of_pcibus(dd->pcidev->bus); 3458 local_mask = cpumask_of_pcibus(dd->pcidev->bus);
@@ -3617,6 +3627,10 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
3617 n = "InfiniPath_QME7362"; 3627 n = "InfiniPath_QME7362";
3618 dd->flags |= QIB_HAS_QSFP; 3628 dd->flags |= QIB_HAS_QSFP;
3619 break; 3629 break;
3630 case BOARD_QMH7360:
3631 n = "Intel IB QDR 1P FLR-QSFP Adptr";
3632 dd->flags |= QIB_HAS_QSFP;
3633 break;
3620 case 15: 3634 case 15:
3621 n = "InfiniPath_QLE7342_TEST"; 3635 n = "InfiniPath_QLE7342_TEST";
3622 dd->flags |= QIB_HAS_QSFP; 3636 dd->flags |= QIB_HAS_QSFP;
@@ -3694,6 +3708,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
3694 */ 3708 */
3695 for (i = 0; i < msix_entries; i++) { 3709 for (i = 0; i < msix_entries; i++) {
3696 u64 vecaddr, vecdata; 3710 u64 vecaddr, vecdata;
3711
3697 vecaddr = qib_read_kreg64(dd, 2 * i + 3712 vecaddr = qib_read_kreg64(dd, 2 * i +
3698 (QIB_7322_MsixTable_OFFS / sizeof(u64))); 3713 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3699 vecdata = qib_read_kreg64(dd, 1 + 2 * i + 3714 vecdata = qib_read_kreg64(dd, 1 + 2 * i +
@@ -5178,8 +5193,6 @@ static void qib_get_7322_faststats(unsigned long opaque)
5178 spin_lock_irqsave(&ppd->dd->eep_st_lock, flags); 5193 spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
5179 traffic_wds -= ppd->dd->traffic_wds; 5194 traffic_wds -= ppd->dd->traffic_wds;
5180 ppd->dd->traffic_wds += traffic_wds; 5195 ppd->dd->traffic_wds += traffic_wds;
5181 if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
5182 atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
5183 spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags); 5196 spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
5184 if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active & 5197 if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
5185 QIB_IB_QDR) && 5198 QIB_IB_QDR) &&
@@ -5357,6 +5370,7 @@ static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5357static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed) 5370static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5358{ 5371{
5359 u64 newctrlb; 5372 u64 newctrlb;
5373
5360 newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK | 5374 newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5361 IBA7322_IBC_IBTA_1_2_MASK | 5375 IBA7322_IBC_IBTA_1_2_MASK |
5362 IBA7322_IBC_MAX_SPEED_MASK); 5376 IBA7322_IBC_MAX_SPEED_MASK);
@@ -5843,6 +5857,7 @@ static void get_7322_chip_params(struct qib_devdata *dd)
5843static void qib_7322_set_baseaddrs(struct qib_devdata *dd) 5857static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5844{ 5858{
5845 u32 cregbase; 5859 u32 cregbase;
5860
5846 cregbase = qib_read_kreg32(dd, kr_counterregbase); 5861 cregbase = qib_read_kreg32(dd, kr_counterregbase);
5847 5862
5848 dd->cspec->cregbase = (u64 __iomem *)(cregbase + 5863 dd->cspec->cregbase = (u64 __iomem *)(cregbase +
@@ -6183,6 +6198,7 @@ static int setup_txselect(const char *str, struct kernel_param *kp)
6183 struct qib_devdata *dd; 6198 struct qib_devdata *dd;
6184 unsigned long val; 6199 unsigned long val;
6185 char *n; 6200 char *n;
6201
6186 if (strlen(str) >= MAX_ATTEN_LEN) { 6202 if (strlen(str) >= MAX_ATTEN_LEN) {
6187 pr_info("txselect_values string too long\n"); 6203 pr_info("txselect_values string too long\n");
6188 return -ENOSPC; 6204 return -ENOSPC;
@@ -6393,6 +6409,7 @@ static void write_7322_initregs(struct qib_devdata *dd)
6393 val = TIDFLOW_ERRBITS; /* these are W1C */ 6409 val = TIDFLOW_ERRBITS; /* these are W1C */
6394 for (i = 0; i < dd->cfgctxts; i++) { 6410 for (i = 0; i < dd->cfgctxts; i++) {
6395 int flow; 6411 int flow;
6412
6396 for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++) 6413 for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6397 qib_write_ureg(dd, ur_rcvflowtable+flow, val, i); 6414 qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6398 } 6415 }
@@ -6503,6 +6520,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
6503 6520
6504 for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) { 6521 for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6505 struct qib_chippport_specific *cp = ppd->cpspec; 6522 struct qib_chippport_specific *cp = ppd->cpspec;
6523
6506 ppd->link_speed_supported = features & PORT_SPD_CAP; 6524 ppd->link_speed_supported = features & PORT_SPD_CAP;
6507 features >>= PORT_SPD_CAP_SHIFT; 6525 features >>= PORT_SPD_CAP_SHIFT;
6508 if (!ppd->link_speed_supported) { 6526 if (!ppd->link_speed_supported) {
@@ -6581,8 +6599,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
6581 ppd->vls_supported = IB_VL_VL0_7; 6599 ppd->vls_supported = IB_VL_VL0_7;
6582 else { 6600 else {
6583 qib_devinfo(dd->pcidev, 6601 qib_devinfo(dd->pcidev,
6584 "Invalid num_vls %u for MTU %d " 6602 "Invalid num_vls %u for MTU %d , using 4 VLs\n",
6585 ", using 4 VLs\n",
6586 qib_num_cfg_vls, mtu); 6603 qib_num_cfg_vls, mtu);
6587 ppd->vls_supported = IB_VL_VL0_3; 6604 ppd->vls_supported = IB_VL_VL0_3;
6588 qib_num_cfg_vls = 4; 6605 qib_num_cfg_vls = 4;
@@ -7890,6 +7907,7 @@ static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7890static int serdes_7322_init(struct qib_pportdata *ppd) 7907static int serdes_7322_init(struct qib_pportdata *ppd)
7891{ 7908{
7892 int ret = 0; 7909 int ret = 0;
7910
7893 if (ppd->dd->cspec->r1) 7911 if (ppd->dd->cspec->r1)
7894 ret = serdes_7322_init_old(ppd); 7912 ret = serdes_7322_init_old(ppd);
7895 else 7913 else
@@ -8305,8 +8323,8 @@ static void force_h1(struct qib_pportdata *ppd)
8305 8323
8306static int qib_r_grab(struct qib_devdata *dd) 8324static int qib_r_grab(struct qib_devdata *dd)
8307{ 8325{
8308 u64 val; 8326 u64 val = SJA_EN;
8309 val = SJA_EN; 8327
8310 qib_write_kreg(dd, kr_r_access, val); 8328 qib_write_kreg(dd, kr_r_access, val);
8311 qib_read_kreg32(dd, kr_scratch); 8329 qib_read_kreg32(dd, kr_scratch);
8312 return 0; 8330 return 0;
@@ -8319,6 +8337,7 @@ static int qib_r_wait_for_rdy(struct qib_devdata *dd)
8319{ 8337{
8320 u64 val; 8338 u64 val;
8321 int timeout; 8339 int timeout;
8340
8322 for (timeout = 0; timeout < 100 ; ++timeout) { 8341 for (timeout = 0; timeout < 100 ; ++timeout) {
8323 val = qib_read_kreg32(dd, kr_r_access); 8342 val = qib_read_kreg32(dd, kr_r_access);
8324 if (val & R_RDY) 8343 if (val & R_RDY)
@@ -8346,6 +8365,7 @@ static int qib_r_shift(struct qib_devdata *dd, int bisten,
8346 } 8365 }
8347 if (inp) { 8366 if (inp) {
8348 int tdi = inp[pos >> 3] >> (pos & 7); 8367 int tdi = inp[pos >> 3] >> (pos & 7);
8368
8349 val |= ((tdi & 1) << R_TDI_LSB); 8369 val |= ((tdi & 1) << R_TDI_LSB);
8350 } 8370 }
8351 qib_write_kreg(dd, kr_r_access, val); 8371 qib_write_kreg(dd, kr_r_access, val);
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 729da39c49ed..2ee36953e234 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -140,7 +140,7 @@ int qib_create_ctxts(struct qib_devdata *dd)
140 * Allocate full ctxtcnt array, rather than just cfgctxts, because 140 * Allocate full ctxtcnt array, rather than just cfgctxts, because
141 * cleanup iterates across all possible ctxts. 141 * cleanup iterates across all possible ctxts.
142 */ 142 */
143 dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL); 143 dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
144 if (!dd->rcd) { 144 if (!dd->rcd) {
145 qib_dev_err(dd, 145 qib_dev_err(dd,
146 "Unable to allocate ctxtdata array, failing\n"); 146 "Unable to allocate ctxtdata array, failing\n");
@@ -234,6 +234,7 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
234 u8 hw_pidx, u8 port) 234 u8 hw_pidx, u8 port)
235{ 235{
236 int size; 236 int size;
237
237 ppd->dd = dd; 238 ppd->dd = dd;
238 ppd->hw_pidx = hw_pidx; 239 ppd->hw_pidx = hw_pidx;
239 ppd->port = port; /* IB port number, not index */ 240 ppd->port = port; /* IB port number, not index */
@@ -613,6 +614,7 @@ static int qib_create_workqueues(struct qib_devdata *dd)
613 ppd = dd->pport + pidx; 614 ppd = dd->pport + pidx;
614 if (!ppd->qib_wq) { 615 if (!ppd->qib_wq) {
615 char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */ 616 char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */
617
616 snprintf(wq_name, sizeof(wq_name), "qib%d_%d", 618 snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
617 dd->unit, pidx); 619 dd->unit, pidx);
618 ppd->qib_wq = 620 ppd->qib_wq =
@@ -714,6 +716,7 @@ int qib_init(struct qib_devdata *dd, int reinit)
714 716
715 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 717 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
716 int mtu; 718 int mtu;
719
717 if (lastfail) 720 if (lastfail)
718 ret = lastfail; 721 ret = lastfail;
719 ppd = dd->pport + pidx; 722 ppd = dd->pport + pidx;
@@ -931,7 +934,6 @@ static void qib_shutdown_device(struct qib_devdata *dd)
931 qib_free_pportdata(ppd); 934 qib_free_pportdata(ppd);
932 } 935 }
933 936
934 qib_update_eeprom_log(dd);
935} 937}
936 938
937/** 939/**
@@ -1026,8 +1028,7 @@ static void qib_verify_pioperf(struct qib_devdata *dd)
1026 addr = vmalloc(cnt); 1028 addr = vmalloc(cnt);
1027 if (!addr) { 1029 if (!addr) {
1028 qib_devinfo(dd->pcidev, 1030 qib_devinfo(dd->pcidev,
1029 "Couldn't get memory for checking PIO perf," 1031 "Couldn't get memory for checking PIO perf, skipping\n");
1030 " skipping\n");
1031 goto done; 1032 goto done;
1032 } 1033 }
1033 1034
@@ -1163,6 +1164,7 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
1163 1164
1164 if (!qib_cpulist_count) { 1165 if (!qib_cpulist_count) {
1165 u32 count = num_online_cpus(); 1166 u32 count = num_online_cpus();
1167
1166 qib_cpulist = kzalloc(BITS_TO_LONGS(count) * 1168 qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
1167 sizeof(long), GFP_KERNEL); 1169 sizeof(long), GFP_KERNEL);
1168 if (qib_cpulist) 1170 if (qib_cpulist)
@@ -1179,7 +1181,7 @@ bail:
1179 if (!list_empty(&dd->list)) 1181 if (!list_empty(&dd->list))
1180 list_del_init(&dd->list); 1182 list_del_init(&dd->list);
1181 ib_dealloc_device(&dd->verbs_dev.ibdev); 1183 ib_dealloc_device(&dd->verbs_dev.ibdev);
1182 return ERR_PTR(ret);; 1184 return ERR_PTR(ret);
1183} 1185}
1184 1186
1185/* 1187/*
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index f4918f2165ec..086616d071b9 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -168,7 +168,6 @@ skip_ibchange:
168 ppd->lastibcstat = ibcs; 168 ppd->lastibcstat = ibcs;
169 if (ev) 169 if (ev)
170 signal_ib_event(ppd, ev); 170 signal_ib_event(ppd, ev);
171 return;
172} 171}
173 172
174void qib_clear_symerror_on_linkup(unsigned long opaque) 173void qib_clear_symerror_on_linkup(unsigned long opaque)
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 3b9afccaaade..ad843c786e72 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -122,10 +122,10 @@ void qib_free_lkey(struct qib_mregion *mr)
122 if (!mr->lkey_published) 122 if (!mr->lkey_published)
123 goto out; 123 goto out;
124 if (lkey == 0) 124 if (lkey == 0)
125 rcu_assign_pointer(dev->dma_mr, NULL); 125 RCU_INIT_POINTER(dev->dma_mr, NULL);
126 else { 126 else {
127 r = lkey >> (32 - ib_qib_lkey_table_size); 127 r = lkey >> (32 - ib_qib_lkey_table_size);
128 rcu_assign_pointer(rkt->table[r], NULL); 128 RCU_INIT_POINTER(rkt->table[r], NULL);
129 } 129 }
130 qib_put_mr(mr); 130 qib_put_mr(mr);
131 mr->lkey_published = 0; 131 mr->lkey_published = 0;
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 636be117b578..395f4046dba2 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -152,14 +152,14 @@ void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
152 data.trap_num = trap_num; 152 data.trap_num = trap_num;
153 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 153 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
154 data.toggle_count = 0; 154 data.toggle_count = 0;
155 memset(&data.details, 0, sizeof data.details); 155 memset(&data.details, 0, sizeof(data.details));
156 data.details.ntc_257_258.lid1 = lid1; 156 data.details.ntc_257_258.lid1 = lid1;
157 data.details.ntc_257_258.lid2 = lid2; 157 data.details.ntc_257_258.lid2 = lid2;
158 data.details.ntc_257_258.key = cpu_to_be32(key); 158 data.details.ntc_257_258.key = cpu_to_be32(key);
159 data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1); 159 data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
160 data.details.ntc_257_258.qp2 = cpu_to_be32(qp2); 160 data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
161 161
162 qib_send_trap(ibp, &data, sizeof data); 162 qib_send_trap(ibp, &data, sizeof(data));
163} 163}
164 164
165/* 165/*
@@ -176,7 +176,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
176 data.trap_num = IB_NOTICE_TRAP_BAD_MKEY; 176 data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
177 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 177 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
178 data.toggle_count = 0; 178 data.toggle_count = 0;
179 memset(&data.details, 0, sizeof data.details); 179 memset(&data.details, 0, sizeof(data.details));
180 data.details.ntc_256.lid = data.issuer_lid; 180 data.details.ntc_256.lid = data.issuer_lid;
181 data.details.ntc_256.method = smp->method; 181 data.details.ntc_256.method = smp->method;
182 data.details.ntc_256.attr_id = smp->attr_id; 182 data.details.ntc_256.attr_id = smp->attr_id;
@@ -198,7 +198,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
198 hop_cnt); 198 hop_cnt);
199 } 199 }
200 200
201 qib_send_trap(ibp, &data, sizeof data); 201 qib_send_trap(ibp, &data, sizeof(data));
202} 202}
203 203
204/* 204/*
@@ -214,11 +214,11 @@ void qib_cap_mask_chg(struct qib_ibport *ibp)
214 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; 214 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
215 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 215 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
216 data.toggle_count = 0; 216 data.toggle_count = 0;
217 memset(&data.details, 0, sizeof data.details); 217 memset(&data.details, 0, sizeof(data.details));
218 data.details.ntc_144.lid = data.issuer_lid; 218 data.details.ntc_144.lid = data.issuer_lid;
219 data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags); 219 data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
220 220
221 qib_send_trap(ibp, &data, sizeof data); 221 qib_send_trap(ibp, &data, sizeof(data));
222} 222}
223 223
224/* 224/*
@@ -234,11 +234,11 @@ void qib_sys_guid_chg(struct qib_ibport *ibp)
234 data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG; 234 data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
235 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 235 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
236 data.toggle_count = 0; 236 data.toggle_count = 0;
237 memset(&data.details, 0, sizeof data.details); 237 memset(&data.details, 0, sizeof(data.details));
238 data.details.ntc_145.lid = data.issuer_lid; 238 data.details.ntc_145.lid = data.issuer_lid;
239 data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid; 239 data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
240 240
241 qib_send_trap(ibp, &data, sizeof data); 241 qib_send_trap(ibp, &data, sizeof(data));
242} 242}
243 243
244/* 244/*
@@ -254,12 +254,12 @@ void qib_node_desc_chg(struct qib_ibport *ibp)
254 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; 254 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
255 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 255 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
256 data.toggle_count = 0; 256 data.toggle_count = 0;
257 memset(&data.details, 0, sizeof data.details); 257 memset(&data.details, 0, sizeof(data.details));
258 data.details.ntc_144.lid = data.issuer_lid; 258 data.details.ntc_144.lid = data.issuer_lid;
259 data.details.ntc_144.local_changes = 1; 259 data.details.ntc_144.local_changes = 1;
260 data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG; 260 data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
261 261
262 qib_send_trap(ibp, &data, sizeof data); 262 qib_send_trap(ibp, &data, sizeof(data));
263} 263}
264 264
265static int subn_get_nodedescription(struct ib_smp *smp, 265static int subn_get_nodedescription(struct ib_smp *smp,
diff --git a/drivers/infiniband/hw/qib/qib_mmap.c b/drivers/infiniband/hw/qib/qib_mmap.c
index 8b73a11d571c..146cf29a2e1d 100644
--- a/drivers/infiniband/hw/qib/qib_mmap.c
+++ b/drivers/infiniband/hw/qib/qib_mmap.c
@@ -134,7 +134,7 @@ struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev,
134 void *obj) { 134 void *obj) {
135 struct qib_mmap_info *ip; 135 struct qib_mmap_info *ip;
136 136
137 ip = kmalloc(sizeof *ip, GFP_KERNEL); 137 ip = kmalloc(sizeof(*ip), GFP_KERNEL);
138 if (!ip) 138 if (!ip)
139 goto bail; 139 goto bail;
140 140
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
index a77fb4fb14e4..c4473db46699 100644
--- a/drivers/infiniband/hw/qib/qib_mr.c
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -55,7 +55,7 @@ static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd,
55 55
56 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; 56 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
57 for (; i < m; i++) { 57 for (; i < m; i++) {
58 mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL); 58 mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
59 if (!mr->map[i]) 59 if (!mr->map[i])
60 goto bail; 60 goto bail;
61 } 61 }
@@ -104,7 +104,7 @@ struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
104 goto bail; 104 goto bail;
105 } 105 }
106 106
107 mr = kzalloc(sizeof *mr, GFP_KERNEL); 107 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
108 if (!mr) { 108 if (!mr) {
109 ret = ERR_PTR(-ENOMEM); 109 ret = ERR_PTR(-ENOMEM);
110 goto bail; 110 goto bail;
@@ -143,7 +143,7 @@ static struct qib_mr *alloc_mr(int count, struct ib_pd *pd)
143 143
144 /* Allocate struct plus pointers to first level page tables. */ 144 /* Allocate struct plus pointers to first level page tables. */
145 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; 145 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
146 mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL); 146 mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
147 if (!mr) 147 if (!mr)
148 goto bail; 148 goto bail;
149 149
@@ -347,7 +347,7 @@ qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
347 if (size > PAGE_SIZE) 347 if (size > PAGE_SIZE)
348 return ERR_PTR(-EINVAL); 348 return ERR_PTR(-EINVAL);
349 349
350 pl = kzalloc(sizeof *pl, GFP_KERNEL); 350 pl = kzalloc(sizeof(*pl), GFP_KERNEL);
351 if (!pl) 351 if (!pl)
352 return ERR_PTR(-ENOMEM); 352 return ERR_PTR(-ENOMEM);
353 353
@@ -386,7 +386,7 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
386 386
387 /* Allocate struct plus pointers to first level page tables. */ 387 /* Allocate struct plus pointers to first level page tables. */
388 m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ; 388 m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
389 fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); 389 fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
390 if (!fmr) 390 if (!fmr)
391 goto bail; 391 goto bail;
392 392
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 61a0046efb76..4758a3801ae8 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -210,7 +210,7 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
210 /* We can't pass qib_msix_entry array to qib_msix_setup 210 /* We can't pass qib_msix_entry array to qib_msix_setup
211 * so use a dummy msix_entry array and copy the allocated 211 * so use a dummy msix_entry array and copy the allocated
212 * irq back to the qib_msix_entry array. */ 212 * irq back to the qib_msix_entry array. */
213 msix_entry = kmalloc(nvec * sizeof(*msix_entry), GFP_KERNEL); 213 msix_entry = kcalloc(nvec, sizeof(*msix_entry), GFP_KERNEL);
214 if (!msix_entry) 214 if (!msix_entry)
215 goto do_intx; 215 goto do_intx;
216 216
@@ -234,8 +234,10 @@ free_msix_entry:
234 kfree(msix_entry); 234 kfree(msix_entry);
235 235
236do_intx: 236do_intx:
237 qib_dev_err(dd, "pci_enable_msix_range %d vectors failed: %d, " 237 qib_dev_err(
238 "falling back to INTx\n", nvec, ret); 238 dd,
239 "pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n",
240 nvec, ret);
239 *msixcnt = 0; 241 *msixcnt = 0;
240 qib_enable_intx(dd->pcidev); 242 qib_enable_intx(dd->pcidev);
241} 243}
@@ -459,6 +461,7 @@ void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
459void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline) 461void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
460{ 462{
461 int r; 463 int r;
464
462 r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, 465 r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
463 dd->pcibar0); 466 dd->pcibar0);
464 if (r) 467 if (r)
@@ -696,6 +699,7 @@ static void
696qib_pci_resume(struct pci_dev *pdev) 699qib_pci_resume(struct pci_dev *pdev)
697{ 700{
698 struct qib_devdata *dd = pci_get_drvdata(pdev); 701 struct qib_devdata *dd = pci_get_drvdata(pdev);
702
699 qib_devinfo(pdev, "QIB resume function called\n"); 703 qib_devinfo(pdev, "QIB resume function called\n");
700 pci_cleanup_aer_uncorrect_error_status(pdev); 704 pci_cleanup_aer_uncorrect_error_status(pdev);
701 /* 705 /*
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 6ddc0264aad2..4fa88ba2963e 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -255,10 +255,10 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
255 255
256 if (rcu_dereference_protected(ibp->qp0, 256 if (rcu_dereference_protected(ibp->qp0,
257 lockdep_is_held(&dev->qpt_lock)) == qp) { 257 lockdep_is_held(&dev->qpt_lock)) == qp) {
258 rcu_assign_pointer(ibp->qp0, NULL); 258 RCU_INIT_POINTER(ibp->qp0, NULL);
259 } else if (rcu_dereference_protected(ibp->qp1, 259 } else if (rcu_dereference_protected(ibp->qp1,
260 lockdep_is_held(&dev->qpt_lock)) == qp) { 260 lockdep_is_held(&dev->qpt_lock)) == qp) {
261 rcu_assign_pointer(ibp->qp1, NULL); 261 RCU_INIT_POINTER(ibp->qp1, NULL);
262 } else { 262 } else {
263 struct qib_qp *q; 263 struct qib_qp *q;
264 struct qib_qp __rcu **qpp; 264 struct qib_qp __rcu **qpp;
@@ -269,7 +269,7 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
269 lockdep_is_held(&dev->qpt_lock))) != NULL; 269 lockdep_is_held(&dev->qpt_lock))) != NULL;
270 qpp = &q->next) 270 qpp = &q->next)
271 if (q == qp) { 271 if (q == qp) {
272 rcu_assign_pointer(*qpp, 272 RCU_INIT_POINTER(*qpp,
273 rcu_dereference_protected(qp->next, 273 rcu_dereference_protected(qp->next,
274 lockdep_is_held(&dev->qpt_lock))); 274 lockdep_is_held(&dev->qpt_lock)));
275 removed = 1; 275 removed = 1;
@@ -315,7 +315,7 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
315 for (n = 0; n < dev->qp_table_size; n++) { 315 for (n = 0; n < dev->qp_table_size; n++) {
316 qp = rcu_dereference_protected(dev->qp_table[n], 316 qp = rcu_dereference_protected(dev->qp_table[n],
317 lockdep_is_held(&dev->qpt_lock)); 317 lockdep_is_held(&dev->qpt_lock));
318 rcu_assign_pointer(dev->qp_table[n], NULL); 318 RCU_INIT_POINTER(dev->qp_table[n], NULL);
319 319
320 for (; qp; qp = rcu_dereference_protected(qp->next, 320 for (; qp; qp = rcu_dereference_protected(qp->next,
321 lockdep_is_held(&dev->qpt_lock))) 321 lockdep_is_held(&dev->qpt_lock)))
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index fa71b1e666c5..5e27f76805e2 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -81,7 +81,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
81 * Module could take up to 2 Msec to respond to MOD_SEL, and there 81 * Module could take up to 2 Msec to respond to MOD_SEL, and there
82 * is no way to tell if it is ready, so we must wait. 82 * is no way to tell if it is ready, so we must wait.
83 */ 83 */
84 msleep(2); 84 msleep(20);
85 85
86 /* Make sure TWSI bus is in sane state. */ 86 /* Make sure TWSI bus is in sane state. */
87 ret = qib_twsi_reset(dd); 87 ret = qib_twsi_reset(dd);
@@ -99,6 +99,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
99 while (cnt < len) { 99 while (cnt < len) {
100 unsigned in_page; 100 unsigned in_page;
101 int wlen = len - cnt; 101 int wlen = len - cnt;
102
102 in_page = addr % QSFP_PAGESIZE; 103 in_page = addr % QSFP_PAGESIZE;
103 if ((in_page + wlen) > QSFP_PAGESIZE) 104 if ((in_page + wlen) > QSFP_PAGESIZE)
104 wlen = QSFP_PAGESIZE - in_page; 105 wlen = QSFP_PAGESIZE - in_page;
@@ -139,7 +140,7 @@ deselect:
139 else if (pass) 140 else if (pass)
140 qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass); 141 qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
141 142
142 msleep(2); 143 msleep(20);
143 144
144bail: 145bail:
145 mutex_unlock(&dd->eep_lock); 146 mutex_unlock(&dd->eep_lock);
@@ -189,7 +190,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
189 * Module could take up to 2 Msec to respond to MOD_SEL, 190 * Module could take up to 2 Msec to respond to MOD_SEL,
190 * and there is no way to tell if it is ready, so we must wait. 191 * and there is no way to tell if it is ready, so we must wait.
191 */ 192 */
192 msleep(2); 193 msleep(20);
193 194
194 /* Make sure TWSI bus is in sane state. */ 195 /* Make sure TWSI bus is in sane state. */
195 ret = qib_twsi_reset(dd); 196 ret = qib_twsi_reset(dd);
@@ -206,6 +207,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
206 while (cnt < len) { 207 while (cnt < len) {
207 unsigned in_page; 208 unsigned in_page;
208 int wlen = len - cnt; 209 int wlen = len - cnt;
210
209 in_page = addr % QSFP_PAGESIZE; 211 in_page = addr % QSFP_PAGESIZE;
210 if ((in_page + wlen) > QSFP_PAGESIZE) 212 if ((in_page + wlen) > QSFP_PAGESIZE)
211 wlen = QSFP_PAGESIZE - in_page; 213 wlen = QSFP_PAGESIZE - in_page;
@@ -234,7 +236,7 @@ deselect:
234 * going away, and there is no way to tell if it is ready. 236 * going away, and there is no way to tell if it is ready.
235 * so we must wait. 237 * so we must wait.
236 */ 238 */
237 msleep(2); 239 msleep(20);
238 240
239bail: 241bail:
240 mutex_unlock(&dd->eep_lock); 242 mutex_unlock(&dd->eep_lock);
@@ -296,6 +298,7 @@ int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
296 * set the page to zero, Even if it already appears to be zero. 298 * set the page to zero, Even if it already appears to be zero.
297 */ 299 */
298 u8 poke = 0; 300 u8 poke = 0;
301
299 ret = qib_qsfp_write(ppd, 127, &poke, 1); 302 ret = qib_qsfp_write(ppd, 127, &poke, 1);
300 udelay(50); 303 udelay(50);
301 if (ret != 1) { 304 if (ret != 1) {
@@ -480,7 +483,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
480 udelay(20); /* Generous RST dwell */ 483 udelay(20); /* Generous RST dwell */
481 484
482 dd->f_gpio_mod(dd, mask, mask, mask); 485 dd->f_gpio_mod(dd, mask, mask, mask);
483 return;
484} 486}
485 487
486void qib_qsfp_deinit(struct qib_qsfp_data *qd) 488void qib_qsfp_deinit(struct qib_qsfp_data *qd)
@@ -540,6 +542,7 @@ int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
540 542
541 while (bidx < QSFP_DEFAULT_HDR_CNT) { 543 while (bidx < QSFP_DEFAULT_HDR_CNT) {
542 int iidx; 544 int iidx;
545
543 ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK); 546 ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK);
544 if (ret < 0) 547 if (ret < 0)
545 goto bail; 548 goto bail;
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 2f2501890c4e..4544d6f88ad7 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1017,7 +1017,7 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
1017 /* Post a send completion queue entry if requested. */ 1017 /* Post a send completion queue entry if requested. */
1018 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || 1018 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
1019 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { 1019 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
1020 memset(&wc, 0, sizeof wc); 1020 memset(&wc, 0, sizeof(wc));
1021 wc.wr_id = wqe->wr.wr_id; 1021 wc.wr_id = wqe->wr.wr_id;
1022 wc.status = IB_WC_SUCCESS; 1022 wc.status = IB_WC_SUCCESS;
1023 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; 1023 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
@@ -1073,7 +1073,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
1073 /* Post a send completion queue entry if requested. */ 1073 /* Post a send completion queue entry if requested. */
1074 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || 1074 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
1075 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { 1075 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
1076 memset(&wc, 0, sizeof wc); 1076 memset(&wc, 0, sizeof(wc));
1077 wc.wr_id = wqe->wr.wr_id; 1077 wc.wr_id = wqe->wr.wr_id;
1078 wc.status = IB_WC_SUCCESS; 1078 wc.status = IB_WC_SUCCESS;
1079 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; 1079 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 4c07a8b34ffe..f42bd0f47577 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -247,8 +247,8 @@ static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
247 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 247 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
248 248
249 return ppd->guid; 249 return ppd->guid;
250 } else 250 }
251 return ibp->guids[index - 1]; 251 return ibp->guids[index - 1];
252} 252}
253 253
254static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id) 254static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
@@ -420,7 +420,7 @@ again:
420 goto serr; 420 goto serr;
421 } 421 }
422 422
423 memset(&wc, 0, sizeof wc); 423 memset(&wc, 0, sizeof(wc));
424 send_status = IB_WC_SUCCESS; 424 send_status = IB_WC_SUCCESS;
425 425
426 release = 1; 426 release = 1;
@@ -792,7 +792,7 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
792 status != IB_WC_SUCCESS) { 792 status != IB_WC_SUCCESS) {
793 struct ib_wc wc; 793 struct ib_wc wc;
794 794
795 memset(&wc, 0, sizeof wc); 795 memset(&wc, 0, sizeof(wc));
796 wc.wr_id = wqe->wr.wr_id; 796 wc.wr_id = wqe->wr.wr_id;
797 wc.status = status; 797 wc.status = status;
798 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; 798 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index 911205d3d5a0..c72775f27212 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -259,6 +259,7 @@ static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst)
259 * it again during startup. 259 * it again during startup.
260 */ 260 */
261 u64 val; 261 u64 val;
262
262 rst_val &= ~(1ULL); 263 rst_val &= ~(1ULL);
263 qib_write_kreg(dd, kr_hwerrmask, 264 qib_write_kreg(dd, kr_hwerrmask,
264 dd->cspec->hwerrmask & 265 dd->cspec->hwerrmask &
@@ -590,6 +591,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
590 * Both should be clear 591 * Both should be clear
591 */ 592 */
592 u64 newval = 0; 593 u64 newval = 0;
594
593 qib_write_kreg(dd, acc, newval); 595 qib_write_kreg(dd, acc, newval);
594 /* First read after write is not trustworthy */ 596 /* First read after write is not trustworthy */
595 pollval = qib_read_kreg32(dd, acc); 597 pollval = qib_read_kreg32(dd, acc);
@@ -601,6 +603,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
601 /* Need to claim */ 603 /* Need to claim */
602 u64 pollval; 604 u64 pollval;
603 u64 newval = EPB_ACC_REQ | oct_sel; 605 u64 newval = EPB_ACC_REQ | oct_sel;
606
604 qib_write_kreg(dd, acc, newval); 607 qib_write_kreg(dd, acc, newval);
605 /* First read after write is not trustworthy */ 608 /* First read after write is not trustworthy */
606 pollval = qib_read_kreg32(dd, acc); 609 pollval = qib_read_kreg32(dd, acc);
@@ -812,6 +815,7 @@ static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
812 if (!sofar) { 815 if (!sofar) {
813 /* Only set address at start of chunk */ 816 /* Only set address at start of chunk */
814 int addrbyte = (addr + sofar) >> 8; 817 int addrbyte = (addr + sofar) >> 8;
818
815 transval = csbit | EPB_MADDRH | addrbyte; 819 transval = csbit | EPB_MADDRH | addrbyte;
816 tries = epb_trans(dd, trans, transval, 820 tries = epb_trans(dd, trans, transval,
817 &transval); 821 &transval);
@@ -922,7 +926,7 @@ qib_sd7220_ib_vfy(struct qib_devdata *dd, const struct firmware *fw)
922 * IRQ not set up at this point in init, so we poll. 926 * IRQ not set up at this point in init, so we poll.
923 */ 927 */
924#define IB_SERDES_TRIM_DONE (1ULL << 11) 928#define IB_SERDES_TRIM_DONE (1ULL << 11)
925#define TRIM_TMO (30) 929#define TRIM_TMO (15)
926 930
927static int qib_sd_trimdone_poll(struct qib_devdata *dd) 931static int qib_sd_trimdone_poll(struct qib_devdata *dd)
928{ 932{
@@ -940,7 +944,7 @@ static int qib_sd_trimdone_poll(struct qib_devdata *dd)
940 ret = 1; 944 ret = 1;
941 break; 945 break;
942 } 946 }
943 msleep(10); 947 msleep(20);
944 } 948 }
945 if (trim_tmo >= TRIM_TMO) { 949 if (trim_tmo >= TRIM_TMO) {
946 qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo); 950 qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
@@ -1071,6 +1075,7 @@ static int qib_sd_setvals(struct qib_devdata *dd)
1071 dds_reg_map >>= 4; 1075 dds_reg_map >>= 4;
1072 for (midx = 0; midx < DDS_ROWS; ++midx) { 1076 for (midx = 0; midx < DDS_ROWS; ++midx) {
1073 u64 __iomem *daddr = taddr + ((midx << 4) + idx); 1077 u64 __iomem *daddr = taddr + ((midx << 4) + idx);
1078
1074 data = dds_init_vals[midx].reg_vals[idx]; 1079 data = dds_init_vals[midx].reg_vals[idx];
1075 writeq(data, daddr); 1080 writeq(data, daddr);
1076 mmiowb(); 1081 mmiowb();
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 3c8e4e3caca6..81f56cdff2bc 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -586,8 +586,8 @@ static ssize_t show_serial(struct device *device,
586 container_of(device, struct qib_ibdev, ibdev.dev); 586 container_of(device, struct qib_ibdev, ibdev.dev);
587 struct qib_devdata *dd = dd_from_dev(dev); 587 struct qib_devdata *dd = dd_from_dev(dev);
588 588
589 buf[sizeof dd->serial] = '\0'; 589 buf[sizeof(dd->serial)] = '\0';
590 memcpy(buf, dd->serial, sizeof dd->serial); 590 memcpy(buf, dd->serial, sizeof(dd->serial));
591 strcat(buf, "\n"); 591 strcat(buf, "\n");
592 return strlen(buf); 592 return strlen(buf);
593} 593}
@@ -611,28 +611,6 @@ bail:
611 return ret < 0 ? ret : count; 611 return ret < 0 ? ret : count;
612} 612}
613 613
614static ssize_t show_logged_errs(struct device *device,
615 struct device_attribute *attr, char *buf)
616{
617 struct qib_ibdev *dev =
618 container_of(device, struct qib_ibdev, ibdev.dev);
619 struct qib_devdata *dd = dd_from_dev(dev);
620 int idx, count;
621
622 /* force consistency with actual EEPROM */
623 if (qib_update_eeprom_log(dd) != 0)
624 return -ENXIO;
625
626 count = 0;
627 for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
628 count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
629 dd->eep_st_errs[idx],
630 idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
631 }
632
633 return count;
634}
635
636/* 614/*
637 * Dump tempsense regs. in decimal, to ease shell-scripts. 615 * Dump tempsense regs. in decimal, to ease shell-scripts.
638 */ 616 */
@@ -679,7 +657,6 @@ static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
679static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL); 657static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
680static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL); 658static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
681static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); 659static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
682static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
683static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL); 660static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
684static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL); 661static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
685static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset); 662static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
@@ -693,7 +670,6 @@ static struct device_attribute *qib_attributes[] = {
693 &dev_attr_nfreectxts, 670 &dev_attr_nfreectxts,
694 &dev_attr_serial, 671 &dev_attr_serial,
695 &dev_attr_boardversion, 672 &dev_attr_boardversion,
696 &dev_attr_logged_errors,
697 &dev_attr_tempsense, 673 &dev_attr_tempsense,
698 &dev_attr_localbus_info, 674 &dev_attr_localbus_info,
699 &dev_attr_chip_reset, 675 &dev_attr_chip_reset,
diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c
index 647f7beb1b0a..f5698664419b 100644
--- a/drivers/infiniband/hw/qib/qib_twsi.c
+++ b/drivers/infiniband/hw/qib/qib_twsi.c
@@ -105,6 +105,7 @@ static void scl_out(struct qib_devdata *dd, u8 bit)
105 udelay(2); 105 udelay(2);
106 else { 106 else {
107 int rise_usec; 107 int rise_usec;
108
108 for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) { 109 for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
109 if (mask & dd->f_gpio_mod(dd, 0, 0, 0)) 110 if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
110 break; 111 break;
@@ -326,6 +327,7 @@ int qib_twsi_reset(struct qib_devdata *dd)
326static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags) 327static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
327{ 328{
328 int ret = 1; 329 int ret = 1;
330
329 if (flags & QIB_TWSI_START) 331 if (flags & QIB_TWSI_START)
330 start_seq(dd); 332 start_seq(dd);
331 333
@@ -435,8 +437,7 @@ int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
435 int sub_len; 437 int sub_len;
436 const u8 *bp = buffer; 438 const u8 *bp = buffer;
437 int max_wait_time, i; 439 int max_wait_time, i;
438 int ret; 440 int ret = 1;
439 ret = 1;
440 441
441 while (len > 0) { 442 while (len > 0) {
442 if (dev == QIB_TWSI_NO_DEV) { 443 if (dev == QIB_TWSI_NO_DEV) {
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
index 31d3561400a4..eface3b3dacf 100644
--- a/drivers/infiniband/hw/qib/qib_tx.c
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -180,6 +180,7 @@ void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
180 180
181 for (i = 0; i < cnt; i++) { 181 for (i = 0; i < cnt; i++) {
182 int which; 182 int which;
183
183 if (!test_bit(i, mask)) 184 if (!test_bit(i, mask))
184 continue; 185 continue;
185 /* 186 /*
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index aaf7039f8ed2..26243b722b5e 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -127,7 +127,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
127 * present on the wire. 127 * present on the wire.
128 */ 128 */
129 length = swqe->length; 129 length = swqe->length;
130 memset(&wc, 0, sizeof wc); 130 memset(&wc, 0, sizeof(wc));
131 wc.byte_len = length + sizeof(struct ib_grh); 131 wc.byte_len = length + sizeof(struct ib_grh);
132 132
133 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 133 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index d2806cae234c..3e0677c51276 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -50,7 +50,7 @@
50/* expected size of headers (for dma_pool) */ 50/* expected size of headers (for dma_pool) */
51#define QIB_USER_SDMA_EXP_HEADER_LENGTH 64 51#define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
52/* attempt to drain the queue for 5secs */ 52/* attempt to drain the queue for 5secs */
53#define QIB_USER_SDMA_DRAIN_TIMEOUT 500 53#define QIB_USER_SDMA_DRAIN_TIMEOUT 250
54 54
55/* 55/*
56 * track how many times a process open this driver. 56 * track how many times a process open this driver.
@@ -226,6 +226,7 @@ qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
226 sdma_rb_node->refcount++; 226 sdma_rb_node->refcount++;
227 } else { 227 } else {
228 int ret; 228 int ret;
229
229 sdma_rb_node = kmalloc(sizeof( 230 sdma_rb_node = kmalloc(sizeof(
230 struct qib_user_sdma_rb_node), GFP_KERNEL); 231 struct qib_user_sdma_rb_node), GFP_KERNEL);
231 if (!sdma_rb_node) 232 if (!sdma_rb_node)
@@ -936,6 +937,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
936 937
937 if (tiddma) { 938 if (tiddma) {
938 char *tidsm = (char *)pkt + pktsize; 939 char *tidsm = (char *)pkt + pktsize;
940
939 cfur = copy_from_user(tidsm, 941 cfur = copy_from_user(tidsm,
940 iov[idx].iov_base, tidsmsize); 942 iov[idx].iov_base, tidsmsize);
941 if (cfur) { 943 if (cfur) {
@@ -1142,7 +1144,7 @@ void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
1142 qib_user_sdma_hwqueue_clean(ppd); 1144 qib_user_sdma_hwqueue_clean(ppd);
1143 qib_user_sdma_queue_clean(ppd, pq); 1145 qib_user_sdma_queue_clean(ppd, pq);
1144 mutex_unlock(&pq->lock); 1146 mutex_unlock(&pq->lock);
1145 msleep(10); 1147 msleep(20);
1146 } 1148 }
1147 1149
1148 if (pq->num_pending || pq->num_sending) { 1150 if (pq->num_pending || pq->num_sending) {
@@ -1316,8 +1318,6 @@ retry:
1316 1318
1317 if (nfree && !list_empty(pktlist)) 1319 if (nfree && !list_empty(pktlist))
1318 goto retry; 1320 goto retry;
1319
1320 return;
1321} 1321}
1322 1322
1323/* pq->lock must be held, get packets on the wire... */ 1323/* pq->lock must be held, get packets on the wire... */
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 9bcfbd842980..4a3599890ea5 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1342,6 +1342,7 @@ static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
1342done: 1342done:
1343 if (dd->flags & QIB_USE_SPCL_TRIG) { 1343 if (dd->flags & QIB_USE_SPCL_TRIG) {
1344 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; 1344 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
1345
1345 qib_flush_wc(); 1346 qib_flush_wc();
1346 __raw_writel(0xaebecede, piobuf_orig + spcl_off); 1347 __raw_writel(0xaebecede, piobuf_orig + spcl_off);
1347 } 1348 }
@@ -1744,7 +1745,7 @@ static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
1744 * we allow allocations of more than we report for this value. 1745 * we allow allocations of more than we report for this value.
1745 */ 1746 */
1746 1747
1747 pd = kmalloc(sizeof *pd, GFP_KERNEL); 1748 pd = kmalloc(sizeof(*pd), GFP_KERNEL);
1748 if (!pd) { 1749 if (!pd) {
1749 ret = ERR_PTR(-ENOMEM); 1750 ret = ERR_PTR(-ENOMEM);
1750 goto bail; 1751 goto bail;
@@ -1829,7 +1830,7 @@ static struct ib_ah *qib_create_ah(struct ib_pd *pd,
1829 goto bail; 1830 goto bail;
1830 } 1831 }
1831 1832
1832 ah = kmalloc(sizeof *ah, GFP_ATOMIC); 1833 ah = kmalloc(sizeof(*ah), GFP_ATOMIC);
1833 if (!ah) { 1834 if (!ah) {
1834 ret = ERR_PTR(-ENOMEM); 1835 ret = ERR_PTR(-ENOMEM);
1835 goto bail; 1836 goto bail;
@@ -1862,7 +1863,7 @@ struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
1862 struct ib_ah *ah = ERR_PTR(-EINVAL); 1863 struct ib_ah *ah = ERR_PTR(-EINVAL);
1863 struct qib_qp *qp0; 1864 struct qib_qp *qp0;
1864 1865
1865 memset(&attr, 0, sizeof attr); 1866 memset(&attr, 0, sizeof(attr));
1866 attr.dlid = dlid; 1867 attr.dlid = dlid;
1867 attr.port_num = ppd_from_ibp(ibp)->port; 1868 attr.port_num = ppd_from_ibp(ibp)->port;
1868 rcu_read_lock(); 1869 rcu_read_lock();
@@ -1977,7 +1978,7 @@ static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
1977 struct qib_ucontext *context; 1978 struct qib_ucontext *context;
1978 struct ib_ucontext *ret; 1979 struct ib_ucontext *ret;
1979 1980
1980 context = kmalloc(sizeof *context, GFP_KERNEL); 1981 context = kmalloc(sizeof(*context), GFP_KERNEL);
1981 if (!context) { 1982 if (!context) {
1982 ret = ERR_PTR(-ENOMEM); 1983 ret = ERR_PTR(-ENOMEM);
1983 goto bail; 1984 goto bail;
@@ -2054,7 +2055,9 @@ int qib_register_ib_device(struct qib_devdata *dd)
2054 2055
2055 dev->qp_table_size = ib_qib_qp_table_size; 2056 dev->qp_table_size = ib_qib_qp_table_size;
2056 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd)); 2057 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
2057 dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table, 2058 dev->qp_table = kmalloc_array(
2059 dev->qp_table_size,
2060 sizeof(*dev->qp_table),
2058 GFP_KERNEL); 2061 GFP_KERNEL);
2059 if (!dev->qp_table) { 2062 if (!dev->qp_table) {
2060 ret = -ENOMEM; 2063 ret = -ENOMEM;
@@ -2122,7 +2125,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
2122 for (i = 0; i < ppd->sdma_descq_cnt; i++) { 2125 for (i = 0; i < ppd->sdma_descq_cnt; i++) {
2123 struct qib_verbs_txreq *tx; 2126 struct qib_verbs_txreq *tx;
2124 2127
2125 tx = kzalloc(sizeof *tx, GFP_KERNEL); 2128 tx = kzalloc(sizeof(*tx), GFP_KERNEL);
2126 if (!tx) { 2129 if (!tx) {
2127 ret = -ENOMEM; 2130 ret = -ENOMEM;
2128 goto err_tx; 2131 goto err_tx;
diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
index dabb697b1c2a..f8ea069a3eaf 100644
--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
@@ -43,7 +43,7 @@ static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
43{ 43{
44 struct qib_mcast_qp *mqp; 44 struct qib_mcast_qp *mqp;
45 45
46 mqp = kmalloc(sizeof *mqp, GFP_KERNEL); 46 mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
47 if (!mqp) 47 if (!mqp)
48 goto bail; 48 goto bail;
49 49
@@ -75,7 +75,7 @@ static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
75{ 75{
76 struct qib_mcast *mcast; 76 struct qib_mcast *mcast;
77 77
78 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 78 mcast = kmalloc(sizeof(*mcast), GFP_KERNEL);
79 if (!mcast) 79 if (!mcast)
80 goto bail; 80 goto bail;
81 81
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
index 1d7281c5a02e..81b225f2300a 100644
--- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c
+++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
@@ -72,6 +72,7 @@ int qib_enable_wc(struct qib_devdata *dd)
72 if (dd->piobcnt2k && dd->piobcnt4k) { 72 if (dd->piobcnt2k && dd->piobcnt4k) {
73 /* 2 sizes for chip */ 73 /* 2 sizes for chip */
74 unsigned long pio2kbase, pio4kbase; 74 unsigned long pio2kbase, pio4kbase;
75
75 pio2kbase = dd->piobufbase & 0xffffffffUL; 76 pio2kbase = dd->piobufbase & 0xffffffffUL;
76 pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL; 77 pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
77 if (pio2kbase < pio4kbase) { 78 if (pio2kbase < pio4kbase) {
@@ -91,7 +92,7 @@ int qib_enable_wc(struct qib_devdata *dd)
91 } 92 }
92 93
93 for (bits = 0; !(piolen & (1ULL << bits)); bits++) 94 for (bits = 0; !(piolen & (1ULL << bits)); bits++)
94 /* do nothing */ ; 95 ; /* do nothing */
95 96
96 if (piolen != (1ULL << bits)) { 97 if (piolen != (1ULL << bits)) {
97 piolen >>= bits; 98 piolen >>= bits;
@@ -100,8 +101,8 @@ int qib_enable_wc(struct qib_devdata *dd)
100 piolen = 1ULL << (bits + 1); 101 piolen = 1ULL << (bits + 1);
101 } 102 }
102 if (pioaddr & (piolen - 1)) { 103 if (pioaddr & (piolen - 1)) {
103 u64 atmp; 104 u64 atmp = pioaddr & ~(piolen - 1);
104 atmp = pioaddr & ~(piolen - 1); 105
105 if (atmp < addr || (atmp + piolen) > (addr + len)) { 106 if (atmp < addr || (atmp + piolen) > (addr + len)) {
106 qib_dev_err(dd, 107 qib_dev_err(dd,
107 "No way to align address/size (%llx/%llx), no WC mtrr\n", 108 "No way to align address/size (%llx/%llx), no WC mtrr\n",
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 5ce26817e7e1..b47aea1094b2 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -654,7 +654,9 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
654 enum dma_data_direction dma_dir); 654 enum dma_data_direction dma_dir);
655 655
656void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, 656void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
657 struct iser_data_buf *data); 657 struct iser_data_buf *data,
658 enum dma_data_direction dir);
659
658int iser_initialize_task_headers(struct iscsi_task *task, 660int iser_initialize_task_headers(struct iscsi_task *task,
659 struct iser_tx_desc *tx_desc); 661 struct iser_tx_desc *tx_desc);
660int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, 662int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 3821633f1065..20e859a6f1a6 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -320,9 +320,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
320 struct ib_conn *ib_conn = &iser_conn->ib_conn; 320 struct ib_conn *ib_conn = &iser_conn->ib_conn;
321 struct iser_device *device = ib_conn->device; 321 struct iser_device *device = ib_conn->device;
322 322
323 if (!iser_conn->rx_descs)
324 goto free_login_buf;
325
326 if (device->iser_free_rdma_reg_res) 323 if (device->iser_free_rdma_reg_res)
327 device->iser_free_rdma_reg_res(ib_conn); 324 device->iser_free_rdma_reg_res(ib_conn);
328 325
@@ -334,7 +331,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
334 /* make sure we never redo any unmapping */ 331 /* make sure we never redo any unmapping */
335 iser_conn->rx_descs = NULL; 332 iser_conn->rx_descs = NULL;
336 333
337free_login_buf:
338 iser_free_login_buf(iser_conn); 334 iser_free_login_buf(iser_conn);
339} 335}
340 336
@@ -714,19 +710,23 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
714 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN); 710 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
715 if (is_rdma_data_aligned) 711 if (is_rdma_data_aligned)
716 iser_dma_unmap_task_data(iser_task, 712 iser_dma_unmap_task_data(iser_task,
717 &iser_task->data[ISER_DIR_IN]); 713 &iser_task->data[ISER_DIR_IN],
714 DMA_FROM_DEVICE);
718 if (prot_count && is_rdma_prot_aligned) 715 if (prot_count && is_rdma_prot_aligned)
719 iser_dma_unmap_task_data(iser_task, 716 iser_dma_unmap_task_data(iser_task,
720 &iser_task->prot[ISER_DIR_IN]); 717 &iser_task->prot[ISER_DIR_IN],
718 DMA_FROM_DEVICE);
721 } 719 }
722 720
723 if (iser_task->dir[ISER_DIR_OUT]) { 721 if (iser_task->dir[ISER_DIR_OUT]) {
724 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT); 722 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
725 if (is_rdma_data_aligned) 723 if (is_rdma_data_aligned)
726 iser_dma_unmap_task_data(iser_task, 724 iser_dma_unmap_task_data(iser_task,
727 &iser_task->data[ISER_DIR_OUT]); 725 &iser_task->data[ISER_DIR_OUT],
726 DMA_TO_DEVICE);
728 if (prot_count && is_rdma_prot_aligned) 727 if (prot_count && is_rdma_prot_aligned)
729 iser_dma_unmap_task_data(iser_task, 728 iser_dma_unmap_task_data(iser_task,
730 &iser_task->prot[ISER_DIR_OUT]); 729 &iser_task->prot[ISER_DIR_OUT],
730 DMA_TO_DEVICE);
731 } 731 }
732} 732}
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index abce9339333f..341040bf0984 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -332,12 +332,13 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
332} 332}
333 333
334void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, 334void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
335 struct iser_data_buf *data) 335 struct iser_data_buf *data,
336 enum dma_data_direction dir)
336{ 337{
337 struct ib_device *dev; 338 struct ib_device *dev;
338 339
339 dev = iser_task->iser_conn->ib_conn.device->ib_device; 340 dev = iser_task->iser_conn->ib_conn.device->ib_device;
340 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); 341 ib_dma_unmap_sg(dev, data->buf, data->size, dir);
341} 342}
342 343
343static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, 344static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
@@ -357,7 +358,9 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
357 iser_data_buf_dump(mem, ibdev); 358 iser_data_buf_dump(mem, ibdev);
358 359
359 /* unmap the command data before accessing it */ 360 /* unmap the command data before accessing it */
360 iser_dma_unmap_task_data(iser_task, mem); 361 iser_dma_unmap_task_data(iser_task, mem,
362 (cmd_dir == ISER_DIR_OUT) ?
363 DMA_TO_DEVICE : DMA_FROM_DEVICE);
361 364
362 /* allocate copy buf, if we are writing, copy the */ 365 /* allocate copy buf, if we are writing, copy the */
363 /* unaligned scatterlist, dma map the copy */ 366 /* unaligned scatterlist, dma map the copy */
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 695a2704bd43..4065abe28829 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -600,16 +600,16 @@ void iser_release_work(struct work_struct *work)
600/** 600/**
601 * iser_free_ib_conn_res - release IB related resources 601 * iser_free_ib_conn_res - release IB related resources
602 * @iser_conn: iser connection struct 602 * @iser_conn: iser connection struct
603 * @destroy_device: indicator if we need to try to release 603 * @destroy: indicator if we need to try to release the
604 * the iser device (only iscsi shutdown and DEVICE_REMOVAL 604 * iser device and memory regoins pool (only iscsi
605 * will use this. 605 * shutdown and DEVICE_REMOVAL will use this).
606 * 606 *
607 * This routine is called with the iser state mutex held 607 * This routine is called with the iser state mutex held
608 * so the cm_id removal is out of here. It is Safe to 608 * so the cm_id removal is out of here. It is Safe to
609 * be invoked multiple times. 609 * be invoked multiple times.
610 */ 610 */
611static void iser_free_ib_conn_res(struct iser_conn *iser_conn, 611static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
612 bool destroy_device) 612 bool destroy)
613{ 613{
614 struct ib_conn *ib_conn = &iser_conn->ib_conn; 614 struct ib_conn *ib_conn = &iser_conn->ib_conn;
615 struct iser_device *device = ib_conn->device; 615 struct iser_device *device = ib_conn->device;
@@ -617,17 +617,20 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
617 iser_info("freeing conn %p cma_id %p qp %p\n", 617 iser_info("freeing conn %p cma_id %p qp %p\n",
618 iser_conn, ib_conn->cma_id, ib_conn->qp); 618 iser_conn, ib_conn->cma_id, ib_conn->qp);
619 619
620 iser_free_rx_descriptors(iser_conn);
621
622 if (ib_conn->qp != NULL) { 620 if (ib_conn->qp != NULL) {
623 ib_conn->comp->active_qps--; 621 ib_conn->comp->active_qps--;
624 rdma_destroy_qp(ib_conn->cma_id); 622 rdma_destroy_qp(ib_conn->cma_id);
625 ib_conn->qp = NULL; 623 ib_conn->qp = NULL;
626 } 624 }
627 625
628 if (destroy_device && device != NULL) { 626 if (destroy) {
629 iser_device_try_release(device); 627 if (iser_conn->rx_descs)
630 ib_conn->device = NULL; 628 iser_free_rx_descriptors(iser_conn);
629
630 if (device != NULL) {
631 iser_device_try_release(device);
632 ib_conn->device = NULL;
633 }
631 } 634 }
632} 635}
633 636
@@ -643,9 +646,11 @@ void iser_conn_release(struct iser_conn *iser_conn)
643 mutex_unlock(&ig.connlist_mutex); 646 mutex_unlock(&ig.connlist_mutex);
644 647
645 mutex_lock(&iser_conn->state_mutex); 648 mutex_lock(&iser_conn->state_mutex);
649 /* In case we endup here without ep_disconnect being invoked. */
646 if (iser_conn->state != ISER_CONN_DOWN) { 650 if (iser_conn->state != ISER_CONN_DOWN) {
647 iser_warn("iser conn %p state %d, expected state down.\n", 651 iser_warn("iser conn %p state %d, expected state down.\n",
648 iser_conn, iser_conn->state); 652 iser_conn, iser_conn->state);
653 iscsi_destroy_endpoint(iser_conn->ep);
649 iser_conn->state = ISER_CONN_DOWN; 654 iser_conn->state = ISER_CONN_DOWN;
650 } 655 }
651 /* 656 /*
@@ -840,7 +845,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
840} 845}
841 846
842static void iser_cleanup_handler(struct rdma_cm_id *cma_id, 847static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
843 bool destroy_device) 848 bool destroy)
844{ 849{
845 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; 850 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
846 851
@@ -850,7 +855,7 @@ static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
850 * and flush errors. 855 * and flush errors.
851 */ 856 */
852 iser_disconnected_handler(cma_id); 857 iser_disconnected_handler(cma_id);
853 iser_free_ib_conn_res(iser_conn, destroy_device); 858 iser_free_ib_conn_res(iser_conn, destroy);
854 complete(&iser_conn->ib_completion); 859 complete(&iser_conn->ib_completion);
855}; 860};
856 861
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index dafb3c531f96..075b19cc78e8 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -38,7 +38,7 @@
38#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \ 38#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
39 ISERT_MAX_CONN) 39 ISERT_MAX_CONN)
40 40
41int isert_debug_level = 0; 41static int isert_debug_level;
42module_param_named(debug_level, isert_debug_level, int, 0644); 42module_param_named(debug_level, isert_debug_level, int, 0644);
43MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)"); 43MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
44 44
@@ -949,7 +949,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
949 isert_err("ib_post_recv() failed with ret: %d\n", ret); 949 isert_err("ib_post_recv() failed with ret: %d\n", ret);
950 isert_conn->post_recv_buf_count -= count; 950 isert_conn->post_recv_buf_count -= count;
951 } else { 951 } else {
952 isert_dbg("isert_post_recv(): Posted %d RX buffers\n", count); 952 isert_dbg("Posted %d RX buffers\n", count);
953 isert_conn->conn_rx_desc_head = rx_head; 953 isert_conn->conn_rx_desc_head = rx_head;
954 } 954 }
955 return ret; 955 return ret;
@@ -1351,17 +1351,19 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
1351 struct iscsi_conn *conn = isert_conn->conn; 1351 struct iscsi_conn *conn = isert_conn->conn;
1352 u32 payload_length = ntoh24(hdr->dlength); 1352 u32 payload_length = ntoh24(hdr->dlength);
1353 int rc; 1353 int rc;
1354 unsigned char *text_in; 1354 unsigned char *text_in = NULL;
1355 1355
1356 rc = iscsit_setup_text_cmd(conn, cmd, hdr); 1356 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1357 if (rc < 0) 1357 if (rc < 0)
1358 return rc; 1358 return rc;
1359 1359
1360 text_in = kzalloc(payload_length, GFP_KERNEL); 1360 if (payload_length) {
1361 if (!text_in) { 1361 text_in = kzalloc(payload_length, GFP_KERNEL);
1362 isert_err("Unable to allocate text_in of payload_length: %u\n", 1362 if (!text_in) {
1363 payload_length); 1363 isert_err("Unable to allocate text_in of payload_length: %u\n",
1364 return -ENOMEM; 1364 payload_length);
1365 return -ENOMEM;
1366 }
1365 } 1367 }
1366 cmd->text_in_ptr = text_in; 1368 cmd->text_in_ptr = text_in;
1367 1369
@@ -1434,9 +1436,15 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1434 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); 1436 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1435 break; 1437 break;
1436 case ISCSI_OP_TEXT: 1438 case ISCSI_OP_TEXT:
1437 cmd = isert_allocate_cmd(conn); 1439 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) {
1438 if (!cmd) 1440 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
1439 break; 1441 if (!cmd)
1442 break;
1443 } else {
1444 cmd = isert_allocate_cmd(conn);
1445 if (!cmd)
1446 break;
1447 }
1440 1448
1441 isert_cmd = iscsit_priv_cmd(cmd); 1449 isert_cmd = iscsit_priv_cmd(cmd);
1442 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, 1450 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
@@ -1658,6 +1666,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1658 struct isert_conn *isert_conn = isert_cmd->conn; 1666 struct isert_conn *isert_conn = isert_cmd->conn;
1659 struct iscsi_conn *conn = isert_conn->conn; 1667 struct iscsi_conn *conn = isert_conn->conn;
1660 struct isert_device *device = isert_conn->conn_device; 1668 struct isert_device *device = isert_conn->conn_device;
1669 struct iscsi_text_rsp *hdr;
1661 1670
1662 isert_dbg("Cmd %p\n", isert_cmd); 1671 isert_dbg("Cmd %p\n", isert_cmd);
1663 1672
@@ -1698,6 +1707,11 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1698 case ISCSI_OP_REJECT: 1707 case ISCSI_OP_REJECT:
1699 case ISCSI_OP_NOOP_OUT: 1708 case ISCSI_OP_NOOP_OUT:
1700 case ISCSI_OP_TEXT: 1709 case ISCSI_OP_TEXT:
1710 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1711 /* If the continue bit is on, keep the command alive */
1712 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
1713 break;
1714
1701 spin_lock_bh(&conn->cmd_lock); 1715 spin_lock_bh(&conn->cmd_lock);
1702 if (!list_empty(&cmd->i_conn_node)) 1716 if (!list_empty(&cmd->i_conn_node))
1703 list_del_init(&cmd->i_conn_node); 1717 list_del_init(&cmd->i_conn_node);
@@ -1709,8 +1723,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1709 * associated cmd->se_cmd needs to be released. 1723 * associated cmd->se_cmd needs to be released.
1710 */ 1724 */
1711 if (cmd->se_cmd.se_tfo != NULL) { 1725 if (cmd->se_cmd.se_tfo != NULL) {
1712 isert_dbg("Calling transport_generic_free_cmd from" 1726 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1713 " isert_put_cmd for 0x%02x\n",
1714 cmd->iscsi_opcode); 1727 cmd->iscsi_opcode);
1715 transport_generic_free_cmd(&cmd->se_cmd, 0); 1728 transport_generic_free_cmd(&cmd->se_cmd, 0);
1716 break; 1729 break;
@@ -2275,7 +2288,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2275 } 2288 }
2276 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 2289 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2277 2290
2278 isert_dbg("conn %p Text Reject\n", isert_conn); 2291 isert_dbg("conn %p Text Response\n", isert_conn);
2279 2292
2280 return isert_post_response(isert_conn, isert_cmd); 2293 return isert_post_response(isert_conn, isert_cmd);
2281} 2294}
@@ -3136,7 +3149,7 @@ accept_wait:
3136 spin_lock_bh(&np->np_thread_lock); 3149 spin_lock_bh(&np->np_thread_lock);
3137 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { 3150 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
3138 spin_unlock_bh(&np->np_thread_lock); 3151 spin_unlock_bh(&np->np_thread_lock);
3139 isert_dbg("np_thread_state %d for isert_accept_np\n", 3152 isert_dbg("np_thread_state %d\n",
3140 np->np_thread_state); 3153 np->np_thread_state);
3141 /** 3154 /**
3142 * No point in stalling here when np_thread 3155 * No point in stalling here when np_thread
@@ -3320,7 +3333,8 @@ static int __init isert_init(void)
3320{ 3333{
3321 int ret; 3334 int ret;
3322 3335
3323 isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0); 3336 isert_comp_wq = alloc_workqueue("isert_comp_wq",
3337 WQ_UNBOUND | WQ_HIGHPRI, 0);
3324 if (!isert_comp_wq) { 3338 if (!isert_comp_wq) {
3325 isert_err("Unable to allocate isert_comp_wq\n"); 3339 isert_err("Unable to allocate isert_comp_wq\n");
3326 ret = -ENOMEM; 3340 ret = -ENOMEM;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index eb694ddad79f..6e0a477681e9 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -3518,7 +3518,7 @@ static void srpt_close_session(struct se_session *se_sess)
3518 DECLARE_COMPLETION_ONSTACK(release_done); 3518 DECLARE_COMPLETION_ONSTACK(release_done);
3519 struct srpt_rdma_ch *ch; 3519 struct srpt_rdma_ch *ch;
3520 struct srpt_device *sdev; 3520 struct srpt_device *sdev;
3521 int res; 3521 unsigned long res;
3522 3522
3523 ch = se_sess->fabric_sess_ptr; 3523 ch = se_sess->fabric_sess_ptr;
3524 WARN_ON(ch->sess != se_sess); 3524 WARN_ON(ch->sess != se_sess);
@@ -3533,7 +3533,7 @@ static void srpt_close_session(struct se_session *se_sess)
3533 spin_unlock_irq(&sdev->spinlock); 3533 spin_unlock_irq(&sdev->spinlock);
3534 3534
3535 res = wait_for_completion_timeout(&release_done, 60 * HZ); 3535 res = wait_for_completion_timeout(&release_done, 60 * HZ);
3536 WARN_ON(res <= 0); 3536 WARN_ON(res == 0);
3537} 3537}
3538 3538
3539/** 3539/**
diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c
index b78425765d3e..d09cefa37931 100644
--- a/drivers/input/joystick/adi.c
+++ b/drivers/input/joystick/adi.c
@@ -535,8 +535,7 @@ static int adi_connect(struct gameport *gameport, struct gameport_driver *drv)
535 } 535 }
536 } 536 }
537 fail2: for (i = 0; i < 2; i++) 537 fail2: for (i = 0; i < 2; i++)
538 if (port->adi[i].dev) 538 input_free_device(port->adi[i].dev);
539 input_free_device(port->adi[i].dev);
540 gameport_close(gameport); 539 gameport_close(gameport);
541 fail1: gameport_set_drvdata(gameport, NULL); 540 fail1: gameport_set_drvdata(gameport, NULL);
542 kfree(port); 541 kfree(port);
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index a89488aa1aa4..fcef5d1365e2 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -345,13 +345,11 @@ static int pxa27x_keypad_build_keycode(struct pxa27x_keypad *keypad)
345{ 345{
346 const struct pxa27x_keypad_platform_data *pdata = keypad->pdata; 346 const struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
347 struct input_dev *input_dev = keypad->input_dev; 347 struct input_dev *input_dev = keypad->input_dev;
348 const struct matrix_keymap_data *keymap_data =
349 pdata ? pdata->matrix_keymap_data : NULL;
350 unsigned short keycode; 348 unsigned short keycode;
351 int i; 349 int i;
352 int error; 350 int error;
353 351
354 error = matrix_keypad_build_keymap(keymap_data, NULL, 352 error = matrix_keypad_build_keymap(pdata->matrix_keymap_data, NULL,
355 pdata->matrix_key_rows, 353 pdata->matrix_key_rows,
356 pdata->matrix_key_cols, 354 pdata->matrix_key_cols,
357 keypad->keycodes, input_dev); 355 keypad->keycodes, input_dev);
diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c
index 3f4351579372..a0fc18fdfc0c 100644
--- a/drivers/input/misc/bfin_rotary.c
+++ b/drivers/input/misc/bfin_rotary.c
@@ -7,29 +7,37 @@
7 7
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/interrupt.h> 9#include <linux/interrupt.h>
10#include <linux/io.h>
10#include <linux/irq.h> 11#include <linux/irq.h>
11#include <linux/pm.h> 12#include <linux/pm.h>
12#include <linux/platform_device.h> 13#include <linux/platform_device.h>
13#include <linux/input.h> 14#include <linux/input.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/platform_data/bfin_rotary.h>
15 17
16#include <asm/portmux.h> 18#include <asm/portmux.h>
17#include <asm/bfin_rotary.h>
18 19
19static const u16 per_cnt[] = { 20#define CNT_CONFIG_OFF 0 /* CNT Config Offset */
20 P_CNT_CUD, 21#define CNT_IMASK_OFF 4 /* CNT Interrupt Mask Offset */
21 P_CNT_CDG, 22#define CNT_STATUS_OFF 8 /* CNT Status Offset */
22 P_CNT_CZM, 23#define CNT_COMMAND_OFF 12 /* CNT Command Offset */
23 0 24#define CNT_DEBOUNCE_OFF 16 /* CNT Debounce Offset */
24}; 25#define CNT_COUNTER_OFF 20 /* CNT Counter Offset */
26#define CNT_MAX_OFF 24 /* CNT Maximum Count Offset */
27#define CNT_MIN_OFF 28 /* CNT Minimum Count Offset */
25 28
26struct bfin_rot { 29struct bfin_rot {
27 struct input_dev *input; 30 struct input_dev *input;
31 void __iomem *base;
28 int irq; 32 int irq;
29 unsigned int up_key; 33 unsigned int up_key;
30 unsigned int down_key; 34 unsigned int down_key;
31 unsigned int button_key; 35 unsigned int button_key;
32 unsigned int rel_code; 36 unsigned int rel_code;
37
38 unsigned short mode;
39 unsigned short debounce;
40
33 unsigned short cnt_config; 41 unsigned short cnt_config;
34 unsigned short cnt_imask; 42 unsigned short cnt_imask;
35 unsigned short cnt_debounce; 43 unsigned short cnt_debounce;
@@ -59,18 +67,17 @@ static void report_rotary_event(struct bfin_rot *rotary, int delta)
59 67
60static irqreturn_t bfin_rotary_isr(int irq, void *dev_id) 68static irqreturn_t bfin_rotary_isr(int irq, void *dev_id)
61{ 69{
62 struct platform_device *pdev = dev_id; 70 struct bfin_rot *rotary = dev_id;
63 struct bfin_rot *rotary = platform_get_drvdata(pdev);
64 int delta; 71 int delta;
65 72
66 switch (bfin_read_CNT_STATUS()) { 73 switch (readw(rotary->base + CNT_STATUS_OFF)) {
67 74
68 case ICII: 75 case ICII:
69 break; 76 break;
70 77
71 case UCII: 78 case UCII:
72 case DCII: 79 case DCII:
73 delta = bfin_read_CNT_COUNTER(); 80 delta = readl(rotary->base + CNT_COUNTER_OFF);
74 if (delta) 81 if (delta)
75 report_rotary_event(rotary, delta); 82 report_rotary_event(rotary, delta);
76 break; 83 break;
@@ -83,16 +90,52 @@ static irqreturn_t bfin_rotary_isr(int irq, void *dev_id)
83 break; 90 break;
84 } 91 }
85 92
86 bfin_write_CNT_COMMAND(W1LCNT_ZERO); /* Clear COUNTER */ 93 writew(W1LCNT_ZERO, rotary->base + CNT_COMMAND_OFF); /* Clear COUNTER */
87 bfin_write_CNT_STATUS(-1); /* Clear STATUS */ 94 writew(-1, rotary->base + CNT_STATUS_OFF); /* Clear STATUS */
88 95
89 return IRQ_HANDLED; 96 return IRQ_HANDLED;
90} 97}
91 98
99static int bfin_rotary_open(struct input_dev *input)
100{
101 struct bfin_rot *rotary = input_get_drvdata(input);
102 unsigned short val;
103
104 if (rotary->mode & ROT_DEBE)
105 writew(rotary->debounce & DPRESCALE,
106 rotary->base + CNT_DEBOUNCE_OFF);
107
108 writew(rotary->mode & ~CNTE, rotary->base + CNT_CONFIG_OFF);
109
110 val = UCIE | DCIE;
111 if (rotary->button_key)
112 val |= CZMIE;
113 writew(val, rotary->base + CNT_IMASK_OFF);
114
115 writew(rotary->mode | CNTE, rotary->base + CNT_CONFIG_OFF);
116
117 return 0;
118}
119
120static void bfin_rotary_close(struct input_dev *input)
121{
122 struct bfin_rot *rotary = input_get_drvdata(input);
123
124 writew(0, rotary->base + CNT_CONFIG_OFF);
125 writew(0, rotary->base + CNT_IMASK_OFF);
126}
127
128static void bfin_rotary_free_action(void *data)
129{
130 peripheral_free_list(data);
131}
132
92static int bfin_rotary_probe(struct platform_device *pdev) 133static int bfin_rotary_probe(struct platform_device *pdev)
93{ 134{
94 struct bfin_rotary_platform_data *pdata = dev_get_platdata(&pdev->dev); 135 struct device *dev = &pdev->dev;
136 const struct bfin_rotary_platform_data *pdata = dev_get_platdata(dev);
95 struct bfin_rot *rotary; 137 struct bfin_rot *rotary;
138 struct resource *res;
96 struct input_dev *input; 139 struct input_dev *input;
97 int error; 140 int error;
98 141
@@ -102,18 +145,37 @@ static int bfin_rotary_probe(struct platform_device *pdev)
102 return -EINVAL; 145 return -EINVAL;
103 } 146 }
104 147
105 error = peripheral_request_list(per_cnt, dev_name(&pdev->dev)); 148 if (pdata->pin_list) {
106 if (error) { 149 error = peripheral_request_list(pdata->pin_list,
107 dev_err(&pdev->dev, "requesting peripherals failed\n"); 150 dev_name(&pdev->dev));
108 return error; 151 if (error) {
152 dev_err(dev, "requesting peripherals failed: %d\n",
153 error);
154 return error;
155 }
156
157 error = devm_add_action(dev, bfin_rotary_free_action,
158 pdata->pin_list);
159 if (error) {
160 dev_err(dev, "setting cleanup action failed: %d\n",
161 error);
162 peripheral_free_list(pdata->pin_list);
163 return error;
164 }
109 } 165 }
110 166
111 rotary = kzalloc(sizeof(struct bfin_rot), GFP_KERNEL); 167 rotary = devm_kzalloc(dev, sizeof(struct bfin_rot), GFP_KERNEL);
112 input = input_allocate_device(); 168 if (!rotary)
113 if (!rotary || !input) { 169 return -ENOMEM;
114 error = -ENOMEM; 170
115 goto out1; 171 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
116 } 172 rotary->base = devm_ioremap_resource(dev, res);
173 if (IS_ERR(rotary->base))
174 return PTR_ERR(rotary->base);
175
176 input = devm_input_allocate_device(dev);
177 if (!input)
178 return -ENOMEM;
117 179
118 rotary->input = input; 180 rotary->input = input;
119 181
@@ -122,9 +184,8 @@ static int bfin_rotary_probe(struct platform_device *pdev)
122 rotary->button_key = pdata->rotary_button_key; 184 rotary->button_key = pdata->rotary_button_key;
123 rotary->rel_code = pdata->rotary_rel_code; 185 rotary->rel_code = pdata->rotary_rel_code;
124 186
125 error = rotary->irq = platform_get_irq(pdev, 0); 187 rotary->mode = pdata->mode;
126 if (error < 0) 188 rotary->debounce = pdata->debounce;
127 goto out1;
128 189
129 input->name = pdev->name; 190 input->name = pdev->name;
130 input->phys = "bfin-rotary/input0"; 191 input->phys = "bfin-rotary/input0";
@@ -137,6 +198,9 @@ static int bfin_rotary_probe(struct platform_device *pdev)
137 input->id.product = 0x0001; 198 input->id.product = 0x0001;
138 input->id.version = 0x0100; 199 input->id.version = 0x0100;
139 200
201 input->open = bfin_rotary_open;
202 input->close = bfin_rotary_close;
203
140 if (rotary->up_key) { 204 if (rotary->up_key) {
141 __set_bit(EV_KEY, input->evbit); 205 __set_bit(EV_KEY, input->evbit);
142 __set_bit(rotary->up_key, input->keybit); 206 __set_bit(rotary->up_key, input->keybit);
@@ -151,75 +215,43 @@ static int bfin_rotary_probe(struct platform_device *pdev)
151 __set_bit(rotary->button_key, input->keybit); 215 __set_bit(rotary->button_key, input->keybit);
152 } 216 }
153 217
154 error = request_irq(rotary->irq, bfin_rotary_isr, 218 /* Quiesce the device before requesting irq */
155 0, dev_name(&pdev->dev), pdev); 219 bfin_rotary_close(input);
220
221 rotary->irq = platform_get_irq(pdev, 0);
222 if (rotary->irq < 0) {
223 dev_err(dev, "No rotary IRQ specified\n");
224 return -ENOENT;
225 }
226
227 error = devm_request_irq(dev, rotary->irq, bfin_rotary_isr,
228 0, dev_name(dev), rotary);
156 if (error) { 229 if (error) {
157 dev_err(&pdev->dev, 230 dev_err(dev, "unable to claim irq %d; error %d\n",
158 "unable to claim irq %d; error %d\n",
159 rotary->irq, error); 231 rotary->irq, error);
160 goto out1; 232 return error;
161 } 233 }
162 234
163 error = input_register_device(input); 235 error = input_register_device(input);
164 if (error) { 236 if (error) {
165 dev_err(&pdev->dev, 237 dev_err(dev, "unable to register input device (%d)\n", error);
166 "unable to register input device (%d)\n", error); 238 return error;
167 goto out2;
168 } 239 }
169 240
170 if (pdata->rotary_button_key)
171 bfin_write_CNT_IMASK(CZMIE);
172
173 if (pdata->mode & ROT_DEBE)
174 bfin_write_CNT_DEBOUNCE(pdata->debounce & DPRESCALE);
175
176 if (pdata->mode)
177 bfin_write_CNT_CONFIG(bfin_read_CNT_CONFIG() |
178 (pdata->mode & ~CNTE));
179
180 bfin_write_CNT_IMASK(bfin_read_CNT_IMASK() | UCIE | DCIE);
181 bfin_write_CNT_CONFIG(bfin_read_CNT_CONFIG() | CNTE);
182
183 platform_set_drvdata(pdev, rotary); 241 platform_set_drvdata(pdev, rotary);
184 device_init_wakeup(&pdev->dev, 1); 242 device_init_wakeup(&pdev->dev, 1);
185 243
186 return 0; 244 return 0;
187
188out2:
189 free_irq(rotary->irq, pdev);
190out1:
191 input_free_device(input);
192 kfree(rotary);
193 peripheral_free_list(per_cnt);
194
195 return error;
196} 245}
197 246
198static int bfin_rotary_remove(struct platform_device *pdev) 247static int __maybe_unused bfin_rotary_suspend(struct device *dev)
199{
200 struct bfin_rot *rotary = platform_get_drvdata(pdev);
201
202 bfin_write_CNT_CONFIG(0);
203 bfin_write_CNT_IMASK(0);
204
205 free_irq(rotary->irq, pdev);
206 input_unregister_device(rotary->input);
207 peripheral_free_list(per_cnt);
208
209 kfree(rotary);
210
211 return 0;
212}
213
214#ifdef CONFIG_PM
215static int bfin_rotary_suspend(struct device *dev)
216{ 248{
217 struct platform_device *pdev = to_platform_device(dev); 249 struct platform_device *pdev = to_platform_device(dev);
218 struct bfin_rot *rotary = platform_get_drvdata(pdev); 250 struct bfin_rot *rotary = platform_get_drvdata(pdev);
219 251
220 rotary->cnt_config = bfin_read_CNT_CONFIG(); 252 rotary->cnt_config = readw(rotary->base + CNT_CONFIG_OFF);
221 rotary->cnt_imask = bfin_read_CNT_IMASK(); 253 rotary->cnt_imask = readw(rotary->base + CNT_IMASK_OFF);
222 rotary->cnt_debounce = bfin_read_CNT_DEBOUNCE(); 254 rotary->cnt_debounce = readw(rotary->base + CNT_DEBOUNCE_OFF);
223 255
224 if (device_may_wakeup(&pdev->dev)) 256 if (device_may_wakeup(&pdev->dev))
225 enable_irq_wake(rotary->irq); 257 enable_irq_wake(rotary->irq);
@@ -227,38 +259,32 @@ static int bfin_rotary_suspend(struct device *dev)
227 return 0; 259 return 0;
228} 260}
229 261
230static int bfin_rotary_resume(struct device *dev) 262static int __maybe_unused bfin_rotary_resume(struct device *dev)
231{ 263{
232 struct platform_device *pdev = to_platform_device(dev); 264 struct platform_device *pdev = to_platform_device(dev);
233 struct bfin_rot *rotary = platform_get_drvdata(pdev); 265 struct bfin_rot *rotary = platform_get_drvdata(pdev);
234 266
235 bfin_write_CNT_DEBOUNCE(rotary->cnt_debounce); 267 writew(rotary->cnt_debounce, rotary->base + CNT_DEBOUNCE_OFF);
236 bfin_write_CNT_IMASK(rotary->cnt_imask); 268 writew(rotary->cnt_imask, rotary->base + CNT_IMASK_OFF);
237 bfin_write_CNT_CONFIG(rotary->cnt_config & ~CNTE); 269 writew(rotary->cnt_config & ~CNTE, rotary->base + CNT_CONFIG_OFF);
238 270
239 if (device_may_wakeup(&pdev->dev)) 271 if (device_may_wakeup(&pdev->dev))
240 disable_irq_wake(rotary->irq); 272 disable_irq_wake(rotary->irq);
241 273
242 if (rotary->cnt_config & CNTE) 274 if (rotary->cnt_config & CNTE)
243 bfin_write_CNT_CONFIG(rotary->cnt_config); 275 writew(rotary->cnt_config, rotary->base + CNT_CONFIG_OFF);
244 276
245 return 0; 277 return 0;
246} 278}
247 279
248static const struct dev_pm_ops bfin_rotary_pm_ops = { 280static SIMPLE_DEV_PM_OPS(bfin_rotary_pm_ops,
249 .suspend = bfin_rotary_suspend, 281 bfin_rotary_suspend, bfin_rotary_resume);
250 .resume = bfin_rotary_resume,
251};
252#endif
253 282
254static struct platform_driver bfin_rotary_device_driver = { 283static struct platform_driver bfin_rotary_device_driver = {
255 .probe = bfin_rotary_probe, 284 .probe = bfin_rotary_probe,
256 .remove = bfin_rotary_remove,
257 .driver = { 285 .driver = {
258 .name = "bfin-rotary", 286 .name = "bfin-rotary",
259#ifdef CONFIG_PM
260 .pm = &bfin_rotary_pm_ops, 287 .pm = &bfin_rotary_pm_ops,
261#endif
262 }, 288 },
263}; 289};
264module_platform_driver(bfin_rotary_device_driver); 290module_platform_driver(bfin_rotary_device_driver);
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index 79cc0f79896f..e8e010a85484 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -195,7 +195,7 @@ static int soc_button_probe(struct platform_device *pdev)
195 195
196static struct soc_button_info soc_button_PNP0C40[] = { 196static struct soc_button_info soc_button_PNP0C40[] = {
197 { "power", 0, EV_KEY, KEY_POWER, false, true }, 197 { "power", 0, EV_KEY, KEY_POWER, false, true },
198 { "home", 1, EV_KEY, KEY_HOME, false, true }, 198 { "home", 1, EV_KEY, KEY_LEFTMETA, false, true },
199 { "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false }, 199 { "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false },
200 { "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false }, 200 { "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false },
201 { "rotation_lock", 4, EV_SW, SW_ROTATE_LOCK, false, false }, 201 { "rotation_lock", 4, EV_SW, SW_ROTATE_LOCK, false, false },
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index f205b8be2ce4..d28726a0ef85 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -99,36 +99,58 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
99#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */ 99#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */
100#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with 100#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with
101 6-byte ALPS packet */ 101 6-byte ALPS packet */
102#define ALPS_IS_RUSHMORE 0x100 /* device is a rushmore */
103#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */ 102#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */
104 103
105static const struct alps_model_info alps_model_data[] = { 104static const struct alps_model_info alps_model_data[] = {
106 { { 0x32, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */ 105 { { 0x32, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } }, /* Toshiba Salellite Pro M10 */
107 { { 0x33, 0x02, 0x0a }, 0x00, ALPS_PROTO_V1, 0x88, 0xf8, 0 }, /* UMAX-530T */ 106 { { 0x33, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V1, 0x88, 0xf8, 0 } }, /* UMAX-530T */
108 { { 0x53, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, 107 { { 0x53, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
109 { { 0x53, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, 108 { { 0x53, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
110 { { 0x60, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, /* HP ze1115 */ 109 { { 0x60, 0x03, 0xc8 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } }, /* HP ze1115 */
111 { { 0x63, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, 110 { { 0x63, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
112 { { 0x63, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, 111 { { 0x63, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
113 { { 0x63, 0x02, 0x28 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Fujitsu Siemens S6010 */ 112 { { 0x63, 0x02, 0x28 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 } }, /* Fujitsu Siemens S6010 */
114 { { 0x63, 0x02, 0x3c }, 0x00, ALPS_PROTO_V2, 0x8f, 0x8f, ALPS_WHEEL }, /* Toshiba Satellite S2400-103 */ 113 { { 0x63, 0x02, 0x3c }, 0x00, { ALPS_PROTO_V2, 0x8f, 0x8f, ALPS_WHEEL } }, /* Toshiba Satellite S2400-103 */
115 { { 0x63, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xef, 0xef, ALPS_FW_BK_1 }, /* NEC Versa L320 */ 114 { { 0x63, 0x02, 0x50 }, 0x00, { ALPS_PROTO_V2, 0xef, 0xef, ALPS_FW_BK_1 } }, /* NEC Versa L320 */
116 { { 0x63, 0x02, 0x64 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, 115 { { 0x63, 0x02, 0x64 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
117 { { 0x63, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D800 */ 116 { { 0x63, 0x03, 0xc8 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } }, /* Dell Latitude D800 */
118 { { 0x73, 0x00, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT }, /* ThinkPad R61 8918-5QG */ 117 { { 0x73, 0x00, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT } }, /* ThinkPad R61 8918-5QG */
119 { { 0x73, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, 118 { { 0x73, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
120 { { 0x73, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Ahtec Laptop */ 119 { { 0x73, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 } }, /* Ahtec Laptop */
121 { { 0x20, 0x02, 0x0e }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */ 120
122 { { 0x22, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, 121 /*
123 { { 0x22, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */ 122 * XXX This entry is suspicious. First byte has zero lower nibble,
123 * which is what a normal mouse would report. Also, the value 0x0e
124 * isn't valid per PS/2 spec.
125 */
126 { { 0x20, 0x02, 0x0e }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } },
127
128 { { 0x22, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } },
129 { { 0x22, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT } }, /* Dell Latitude D600 */
124 /* Dell Latitude E5500, E6400, E6500, Precision M4400 */ 130 /* Dell Latitude E5500, E6400, E6500, Precision M4400 */
125 { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, 131 { { 0x62, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xcf, 0xcf,
126 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, 132 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED } },
127 { { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V6, 0xff, 0xff, ALPS_DUALPOINT }, /* Dell XT2 */ 133 { { 0x73, 0x00, 0x14 }, 0x00, { ALPS_PROTO_V6, 0xff, 0xff, ALPS_DUALPOINT } }, /* Dell XT2 */
128 { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ 134 { { 0x73, 0x02, 0x50 }, 0x00, { ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS } }, /* Dell Vostro 1400 */
129 { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, 135 { { 0x52, 0x01, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xff, 0xff,
130 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ 136 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED } }, /* Toshiba Tecra A11-11L */
131 { { 0x73, 0x02, 0x64 }, 0x8a, ALPS_PROTO_V4, 0x8f, 0x8f, 0 }, 137 { { 0x73, 0x02, 0x64 }, 0x8a, { ALPS_PROTO_V4, 0x8f, 0x8f, 0 } },
138};
139
140static const struct alps_protocol_info alps_v3_protocol_data = {
141 ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT
142};
143
144static const struct alps_protocol_info alps_v3_rushmore_data = {
145 ALPS_PROTO_V3_RUSHMORE, 0x8f, 0x8f, ALPS_DUALPOINT
146};
147
148static const struct alps_protocol_info alps_v5_protocol_data = {
149 ALPS_PROTO_V5, 0xc8, 0xd8, 0
150};
151
152static const struct alps_protocol_info alps_v7_protocol_data = {
153 ALPS_PROTO_V7, 0x48, 0x48, ALPS_DUALPOINT
132}; 154};
133 155
134static void alps_set_abs_params_st(struct alps_data *priv, 156static void alps_set_abs_params_st(struct alps_data *priv,
@@ -136,12 +158,6 @@ static void alps_set_abs_params_st(struct alps_data *priv,
136static void alps_set_abs_params_mt(struct alps_data *priv, 158static void alps_set_abs_params_mt(struct alps_data *priv,
137 struct input_dev *dev1); 159 struct input_dev *dev1);
138 160
139/*
140 * XXX - this entry is suspicious. First byte has zero lower nibble,
141 * which is what a normal mouse would report. Also, the value 0x0e
142 * isn't valid per PS/2 spec.
143 */
144
145/* Packet formats are described in Documentation/input/alps.txt */ 161/* Packet formats are described in Documentation/input/alps.txt */
146 162
147static bool alps_is_valid_first_byte(struct alps_data *priv, 163static bool alps_is_valid_first_byte(struct alps_data *priv,
@@ -150,8 +166,7 @@ static bool alps_is_valid_first_byte(struct alps_data *priv,
150 return (data & priv->mask0) == priv->byte0; 166 return (data & priv->mask0) == priv->byte0;
151} 167}
152 168
153static void alps_report_buttons(struct psmouse *psmouse, 169static void alps_report_buttons(struct input_dev *dev1, struct input_dev *dev2,
154 struct input_dev *dev1, struct input_dev *dev2,
155 int left, int right, int middle) 170 int left, int right, int middle)
156{ 171{
157 struct input_dev *dev; 172 struct input_dev *dev;
@@ -161,20 +176,21 @@ static void alps_report_buttons(struct psmouse *psmouse,
161 * other device (dev2) then this event should be also 176 * other device (dev2) then this event should be also
162 * sent through that device. 177 * sent through that device.
163 */ 178 */
164 dev = test_bit(BTN_LEFT, dev2->key) ? dev2 : dev1; 179 dev = (dev2 && test_bit(BTN_LEFT, dev2->key)) ? dev2 : dev1;
165 input_report_key(dev, BTN_LEFT, left); 180 input_report_key(dev, BTN_LEFT, left);
166 181
167 dev = test_bit(BTN_RIGHT, dev2->key) ? dev2 : dev1; 182 dev = (dev2 && test_bit(BTN_RIGHT, dev2->key)) ? dev2 : dev1;
168 input_report_key(dev, BTN_RIGHT, right); 183 input_report_key(dev, BTN_RIGHT, right);
169 184
170 dev = test_bit(BTN_MIDDLE, dev2->key) ? dev2 : dev1; 185 dev = (dev2 && test_bit(BTN_MIDDLE, dev2->key)) ? dev2 : dev1;
171 input_report_key(dev, BTN_MIDDLE, middle); 186 input_report_key(dev, BTN_MIDDLE, middle);
172 187
173 /* 188 /*
174 * Sync the _other_ device now, we'll do the first 189 * Sync the _other_ device now, we'll do the first
175 * device later once we report the rest of the events. 190 * device later once we report the rest of the events.
176 */ 191 */
177 input_sync(dev2); 192 if (dev2)
193 input_sync(dev2);
178} 194}
179 195
180static void alps_process_packet_v1_v2(struct psmouse *psmouse) 196static void alps_process_packet_v1_v2(struct psmouse *psmouse)
@@ -221,13 +237,13 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
221 input_report_rel(dev2, REL_X, (x > 383 ? (x - 768) : x)); 237 input_report_rel(dev2, REL_X, (x > 383 ? (x - 768) : x));
222 input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y)); 238 input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y));
223 239
224 alps_report_buttons(psmouse, dev2, dev, left, right, middle); 240 alps_report_buttons(dev2, dev, left, right, middle);
225 241
226 input_sync(dev2); 242 input_sync(dev2);
227 return; 243 return;
228 } 244 }
229 245
230 alps_report_buttons(psmouse, dev, dev2, left, right, middle); 246 alps_report_buttons(dev, dev2, left, right, middle);
231 247
232 /* Convert hardware tap to a reasonable Z value */ 248 /* Convert hardware tap to a reasonable Z value */
233 if (ges && !fin) 249 if (ges && !fin)
@@ -412,7 +428,7 @@ static int alps_process_bitmap(struct alps_data *priv,
412 (2 * (priv->y_bits - 1)); 428 (2 * (priv->y_bits - 1));
413 429
414 /* y-bitmap order is reversed, except on rushmore */ 430 /* y-bitmap order is reversed, except on rushmore */
415 if (!(priv->flags & ALPS_IS_RUSHMORE)) { 431 if (priv->proto_version != ALPS_PROTO_V3_RUSHMORE) {
416 fields->mt[0].y = priv->y_max - fields->mt[0].y; 432 fields->mt[0].y = priv->y_max - fields->mt[0].y;
417 fields->mt[1].y = priv->y_max - fields->mt[1].y; 433 fields->mt[1].y = priv->y_max - fields->mt[1].y;
418 } 434 }
@@ -648,7 +664,8 @@ static void alps_process_touchpad_packet_v3_v5(struct psmouse *psmouse)
648 */ 664 */
649 if (f->is_mp) { 665 if (f->is_mp) {
650 fingers = f->fingers; 666 fingers = f->fingers;
651 if (priv->proto_version == ALPS_PROTO_V3) { 667 if (priv->proto_version == ALPS_PROTO_V3 ||
668 priv->proto_version == ALPS_PROTO_V3_RUSHMORE) {
652 if (alps_process_bitmap(priv, f) == 0) 669 if (alps_process_bitmap(priv, f) == 0)
653 fingers = 0; /* Use st data */ 670 fingers = 0; /* Use st data */
654 671
@@ -892,34 +909,6 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
892 unsigned char *pkt, 909 unsigned char *pkt,
893 unsigned char pkt_id) 910 unsigned char pkt_id)
894{ 911{
895 /*
896 * packet-fmt b7 b6 b5 b4 b3 b2 b1 b0
897 * Byte0 TWO & MULTI L 1 R M 1 Y0-2 Y0-1 Y0-0
898 * Byte0 NEW L 1 X1-5 1 1 Y0-2 Y0-1 Y0-0
899 * Byte1 Y0-10 Y0-9 Y0-8 Y0-7 Y0-6 Y0-5 Y0-4 Y0-3
900 * Byte2 X0-11 1 X0-10 X0-9 X0-8 X0-7 X0-6 X0-5
901 * Byte3 X1-11 1 X0-4 X0-3 1 X0-2 X0-1 X0-0
902 * Byte4 TWO X1-10 TWO X1-9 X1-8 X1-7 X1-6 X1-5 X1-4
903 * Byte4 MULTI X1-10 TWO X1-9 X1-8 X1-7 X1-6 Y1-5 1
904 * Byte4 NEW X1-10 TWO X1-9 X1-8 X1-7 X1-6 0 0
905 * Byte5 TWO & NEW Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 Y1-5 Y1-4
906 * Byte5 MULTI Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 F-1 F-0
907 * L: Left button
908 * R / M: Non-clickpads: Right / Middle button
909 * Clickpads: When > 2 fingers are down, and some fingers
910 * are in the button area, then the 2 coordinates reported
911 * are for fingers outside the button area and these report
912 * extra fingers being present in the right / left button
913 * area. Note these fingers are not added to the F field!
914 * so if a TWO packet is received and R = 1 then there are
915 * 3 fingers down, etc.
916 * TWO: 1: Two touches present, byte 0/4/5 are in TWO fmt
917 * 0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt
918 * otherwise byte 0 bit 4 must be set and byte 0/4/5 are
919 * in NEW fmt
920 * F: Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
921 */
922
923 mt[0].x = ((pkt[2] & 0x80) << 4); 912 mt[0].x = ((pkt[2] & 0x80) << 4);
924 mt[0].x |= ((pkt[2] & 0x3F) << 5); 913 mt[0].x |= ((pkt[2] & 0x3F) << 5);
925 mt[0].x |= ((pkt[3] & 0x30) >> 1); 914 mt[0].x |= ((pkt[3] & 0x30) >> 1);
@@ -1044,17 +1033,6 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
1044 return; 1033 return;
1045 } 1034 }
1046 1035
1047 /*
1048 * b7 b6 b5 b4 b3 b2 b1 b0
1049 * Byte0 0 1 0 0 1 0 0 0
1050 * Byte1 1 1 * * 1 M R L
1051 * Byte2 X7 1 X5 X4 X3 X2 X1 X0
1052 * Byte3 Z6 1 Y6 X6 1 Y2 Y1 Y0
1053 * Byte4 Y7 0 Y5 Y4 Y3 1 1 0
1054 * Byte5 T&P 0 Z5 Z4 Z3 Z2 Z1 Z0
1055 * M / R / L: Middle / Right / Left button
1056 */
1057
1058 x = ((packet[2] & 0xbf)) | ((packet[3] & 0x10) << 2); 1036 x = ((packet[2] & 0xbf)) | ((packet[3] & 0x10) << 2);
1059 y = (packet[3] & 0x07) | (packet[4] & 0xb8) | 1037 y = (packet[3] & 0x07) | (packet[4] & 0xb8) |
1060 ((packet[3] & 0x20) << 1); 1038 ((packet[3] & 0x20) << 1);
@@ -1107,23 +1085,89 @@ static void alps_process_packet_v7(struct psmouse *psmouse)
1107 alps_process_touchpad_packet_v7(psmouse); 1085 alps_process_touchpad_packet_v7(psmouse);
1108} 1086}
1109 1087
1110static void alps_report_bare_ps2_packet(struct psmouse *psmouse, 1088static DEFINE_MUTEX(alps_mutex);
1089
1090static void alps_register_bare_ps2_mouse(struct work_struct *work)
1091{
1092 struct alps_data *priv =
1093 container_of(work, struct alps_data, dev3_register_work.work);
1094 struct psmouse *psmouse = priv->psmouse;
1095 struct input_dev *dev3;
1096 int error = 0;
1097
1098 mutex_lock(&alps_mutex);
1099
1100 if (priv->dev3)
1101 goto out;
1102
1103 dev3 = input_allocate_device();
1104 if (!dev3) {
1105 psmouse_err(psmouse, "failed to allocate secondary device\n");
1106 error = -ENOMEM;
1107 goto out;
1108 }
1109
1110 snprintf(priv->phys3, sizeof(priv->phys3), "%s/%s",
1111 psmouse->ps2dev.serio->phys,
1112 (priv->dev2 ? "input2" : "input1"));
1113 dev3->phys = priv->phys3;
1114
1115 /*
1116 * format of input device name is: "protocol vendor name"
1117 * see function psmouse_switch_protocol() in psmouse-base.c
1118 */
1119 dev3->name = "PS/2 ALPS Mouse";
1120
1121 dev3->id.bustype = BUS_I8042;
1122 dev3->id.vendor = 0x0002;
1123 dev3->id.product = PSMOUSE_PS2;
1124 dev3->id.version = 0x0000;
1125 dev3->dev.parent = &psmouse->ps2dev.serio->dev;
1126
1127 input_set_capability(dev3, EV_REL, REL_X);
1128 input_set_capability(dev3, EV_REL, REL_Y);
1129 input_set_capability(dev3, EV_KEY, BTN_LEFT);
1130 input_set_capability(dev3, EV_KEY, BTN_RIGHT);
1131 input_set_capability(dev3, EV_KEY, BTN_MIDDLE);
1132
1133 __set_bit(INPUT_PROP_POINTER, dev3->propbit);
1134
1135 error = input_register_device(dev3);
1136 if (error) {
1137 psmouse_err(psmouse,
1138 "failed to register secondary device: %d\n",
1139 error);
1140 input_free_device(dev3);
1141 goto out;
1142 }
1143
1144 priv->dev3 = dev3;
1145
1146out:
1147 /*
1148 * Save the error code so that we can detect that we
1149 * already tried to create the device.
1150 */
1151 if (error)
1152 priv->dev3 = ERR_PTR(error);
1153
1154 mutex_unlock(&alps_mutex);
1155}
1156
1157static void alps_report_bare_ps2_packet(struct input_dev *dev,
1111 unsigned char packet[], 1158 unsigned char packet[],
1112 bool report_buttons) 1159 bool report_buttons)
1113{ 1160{
1114 struct alps_data *priv = psmouse->private;
1115 struct input_dev *dev2 = priv->dev2;
1116
1117 if (report_buttons) 1161 if (report_buttons)
1118 alps_report_buttons(psmouse, dev2, psmouse->dev, 1162 alps_report_buttons(dev, NULL,
1119 packet[0] & 1, packet[0] & 2, packet[0] & 4); 1163 packet[0] & 1, packet[0] & 2, packet[0] & 4);
1120 1164
1121 input_report_rel(dev2, REL_X, 1165 input_report_rel(dev, REL_X,
1122 packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0); 1166 packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0);
1123 input_report_rel(dev2, REL_Y, 1167 input_report_rel(dev, REL_Y,
1124 packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0); 1168 packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0);
1125 1169
1126 input_sync(dev2); 1170 input_sync(dev);
1127} 1171}
1128 1172
1129static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse) 1173static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
@@ -1188,8 +1232,8 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
1188 * de-synchronization. 1232 * de-synchronization.
1189 */ 1233 */
1190 1234
1191 alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3], 1235 alps_report_bare_ps2_packet(priv->dev2,
1192 false); 1236 &psmouse->packet[3], false);
1193 1237
1194 /* 1238 /*
1195 * Continue with the standard ALPS protocol handling, 1239 * Continue with the standard ALPS protocol handling,
@@ -1245,9 +1289,18 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
1245 * properly we only do this if the device is fully synchronized. 1289 * properly we only do this if the device is fully synchronized.
1246 */ 1290 */
1247 if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) { 1291 if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
1292
1293 /* Register dev3 mouse if we received PS/2 packet first time */
1294 if (unlikely(!priv->dev3))
1295 psmouse_queue_work(psmouse,
1296 &priv->dev3_register_work, 0);
1297
1248 if (psmouse->pktcnt == 3) { 1298 if (psmouse->pktcnt == 3) {
1249 alps_report_bare_ps2_packet(psmouse, psmouse->packet, 1299 /* Once dev3 mouse device is registered report data */
1250 true); 1300 if (likely(!IS_ERR_OR_NULL(priv->dev3)))
1301 alps_report_bare_ps2_packet(priv->dev3,
1302 psmouse->packet,
1303 true);
1251 return PSMOUSE_FULL_PACKET; 1304 return PSMOUSE_FULL_PACKET;
1252 } 1305 }
1253 return PSMOUSE_GOOD_DATA; 1306 return PSMOUSE_GOOD_DATA;
@@ -1275,7 +1328,7 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
1275 psmouse->pktcnt - 1, 1328 psmouse->pktcnt - 1,
1276 psmouse->packet[psmouse->pktcnt - 1]); 1329 psmouse->packet[psmouse->pktcnt - 1]);
1277 1330
1278 if (priv->proto_version == ALPS_PROTO_V3 && 1331 if (priv->proto_version == ALPS_PROTO_V3_RUSHMORE &&
1279 psmouse->pktcnt == psmouse->pktsize) { 1332 psmouse->pktcnt == psmouse->pktsize) {
1280 /* 1333 /*
1281 * Some Dell boxes, such as Latitude E6440 or E7440 1334 * Some Dell boxes, such as Latitude E6440 or E7440
@@ -1780,7 +1833,7 @@ static int alps_setup_trackstick_v3(struct psmouse *psmouse, int reg_base)
1780 * all. 1833 * all.
1781 */ 1834 */
1782 if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_SETSCALE21, param)) { 1835 if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_SETSCALE21, param)) {
1783 psmouse_warn(psmouse, "trackstick E7 report failed\n"); 1836 psmouse_warn(psmouse, "Failed to initialize trackstick (E7 report failed)\n");
1784 ret = -ENODEV; 1837 ret = -ENODEV;
1785 } else { 1838 } else {
1786 psmouse_dbg(psmouse, "trackstick E7 report: %3ph\n", param); 1839 psmouse_dbg(psmouse, "trackstick E7 report: %3ph\n", param);
@@ -1945,8 +1998,6 @@ static int alps_hw_init_rushmore_v3(struct psmouse *psmouse)
1945 ALPS_REG_BASE_RUSHMORE); 1998 ALPS_REG_BASE_RUSHMORE);
1946 if (reg_val == -EIO) 1999 if (reg_val == -EIO)
1947 goto error; 2000 goto error;
1948 if (reg_val == -ENODEV)
1949 priv->flags &= ~ALPS_DUALPOINT;
1950 } 2001 }
1951 2002
1952 if (alps_enter_command_mode(psmouse) || 2003 if (alps_enter_command_mode(psmouse) ||
@@ -2162,11 +2213,18 @@ error:
2162 return ret; 2213 return ret;
2163} 2214}
2164 2215
2165static void alps_set_defaults(struct alps_data *priv) 2216static int alps_set_protocol(struct psmouse *psmouse,
2217 struct alps_data *priv,
2218 const struct alps_protocol_info *protocol)
2166{ 2219{
2167 priv->byte0 = 0x8f; 2220 psmouse->private = priv;
2168 priv->mask0 = 0x8f; 2221
2169 priv->flags = ALPS_DUALPOINT; 2222 setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse);
2223
2224 priv->proto_version = protocol->version;
2225 priv->byte0 = protocol->byte0;
2226 priv->mask0 = protocol->mask0;
2227 priv->flags = protocol->flags;
2170 2228
2171 priv->x_max = 2000; 2229 priv->x_max = 2000;
2172 priv->y_max = 1400; 2230 priv->y_max = 1400;
@@ -2182,6 +2240,7 @@ static void alps_set_defaults(struct alps_data *priv)
2182 priv->x_max = 1023; 2240 priv->x_max = 1023;
2183 priv->y_max = 767; 2241 priv->y_max = 767;
2184 break; 2242 break;
2243
2185 case ALPS_PROTO_V3: 2244 case ALPS_PROTO_V3:
2186 priv->hw_init = alps_hw_init_v3; 2245 priv->hw_init = alps_hw_init_v3;
2187 priv->process_packet = alps_process_packet_v3; 2246 priv->process_packet = alps_process_packet_v3;
@@ -2190,6 +2249,23 @@ static void alps_set_defaults(struct alps_data *priv)
2190 priv->nibble_commands = alps_v3_nibble_commands; 2249 priv->nibble_commands = alps_v3_nibble_commands;
2191 priv->addr_command = PSMOUSE_CMD_RESET_WRAP; 2250 priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
2192 break; 2251 break;
2252
2253 case ALPS_PROTO_V3_RUSHMORE:
2254 priv->hw_init = alps_hw_init_rushmore_v3;
2255 priv->process_packet = alps_process_packet_v3;
2256 priv->set_abs_params = alps_set_abs_params_mt;
2257 priv->decode_fields = alps_decode_rushmore;
2258 priv->nibble_commands = alps_v3_nibble_commands;
2259 priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
2260 priv->x_bits = 16;
2261 priv->y_bits = 12;
2262
2263 if (alps_probe_trackstick_v3(psmouse,
2264 ALPS_REG_BASE_RUSHMORE) < 0)
2265 priv->flags &= ~ALPS_DUALPOINT;
2266
2267 break;
2268
2193 case ALPS_PROTO_V4: 2269 case ALPS_PROTO_V4:
2194 priv->hw_init = alps_hw_init_v4; 2270 priv->hw_init = alps_hw_init_v4;
2195 priv->process_packet = alps_process_packet_v4; 2271 priv->process_packet = alps_process_packet_v4;
@@ -2197,6 +2273,7 @@ static void alps_set_defaults(struct alps_data *priv)
2197 priv->nibble_commands = alps_v4_nibble_commands; 2273 priv->nibble_commands = alps_v4_nibble_commands;
2198 priv->addr_command = PSMOUSE_CMD_DISABLE; 2274 priv->addr_command = PSMOUSE_CMD_DISABLE;
2199 break; 2275 break;
2276
2200 case ALPS_PROTO_V5: 2277 case ALPS_PROTO_V5:
2201 priv->hw_init = alps_hw_init_dolphin_v1; 2278 priv->hw_init = alps_hw_init_dolphin_v1;
2202 priv->process_packet = alps_process_touchpad_packet_v3_v5; 2279 priv->process_packet = alps_process_touchpad_packet_v3_v5;
@@ -2204,14 +2281,12 @@ static void alps_set_defaults(struct alps_data *priv)
2204 priv->set_abs_params = alps_set_abs_params_mt; 2281 priv->set_abs_params = alps_set_abs_params_mt;
2205 priv->nibble_commands = alps_v3_nibble_commands; 2282 priv->nibble_commands = alps_v3_nibble_commands;
2206 priv->addr_command = PSMOUSE_CMD_RESET_WRAP; 2283 priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
2207 priv->byte0 = 0xc8;
2208 priv->mask0 = 0xd8;
2209 priv->flags = 0;
2210 priv->x_max = 1360; 2284 priv->x_max = 1360;
2211 priv->y_max = 660; 2285 priv->y_max = 660;
2212 priv->x_bits = 23; 2286 priv->x_bits = 23;
2213 priv->y_bits = 12; 2287 priv->y_bits = 12;
2214 break; 2288 break;
2289
2215 case ALPS_PROTO_V6: 2290 case ALPS_PROTO_V6:
2216 priv->hw_init = alps_hw_init_v6; 2291 priv->hw_init = alps_hw_init_v6;
2217 priv->process_packet = alps_process_packet_v6; 2292 priv->process_packet = alps_process_packet_v6;
@@ -2220,6 +2295,7 @@ static void alps_set_defaults(struct alps_data *priv)
2220 priv->x_max = 2047; 2295 priv->x_max = 2047;
2221 priv->y_max = 1535; 2296 priv->y_max = 1535;
2222 break; 2297 break;
2298
2223 case ALPS_PROTO_V7: 2299 case ALPS_PROTO_V7:
2224 priv->hw_init = alps_hw_init_v7; 2300 priv->hw_init = alps_hw_init_v7;
2225 priv->process_packet = alps_process_packet_v7; 2301 priv->process_packet = alps_process_packet_v7;
@@ -2227,19 +2303,21 @@ static void alps_set_defaults(struct alps_data *priv)
2227 priv->set_abs_params = alps_set_abs_params_mt; 2303 priv->set_abs_params = alps_set_abs_params_mt;
2228 priv->nibble_commands = alps_v3_nibble_commands; 2304 priv->nibble_commands = alps_v3_nibble_commands;
2229 priv->addr_command = PSMOUSE_CMD_RESET_WRAP; 2305 priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
2230 priv->x_max = 0xfff; 2306
2231 priv->y_max = 0x7ff; 2307 if (alps_dolphin_get_device_area(psmouse, priv))
2232 priv->byte0 = 0x48; 2308 return -EIO;
2233 priv->mask0 = 0x48;
2234 2309
2235 if (priv->fw_ver[1] != 0xba) 2310 if (priv->fw_ver[1] != 0xba)
2236 priv->flags |= ALPS_BUTTONPAD; 2311 priv->flags |= ALPS_BUTTONPAD;
2312
2237 break; 2313 break;
2238 } 2314 }
2315
2316 return 0;
2239} 2317}
2240 2318
2241static int alps_match_table(struct psmouse *psmouse, struct alps_data *priv, 2319static const struct alps_protocol_info *alps_match_table(unsigned char *e7,
2242 unsigned char *e7, unsigned char *ec) 2320 unsigned char *ec)
2243{ 2321{
2244 const struct alps_model_info *model; 2322 const struct alps_model_info *model;
2245 int i; 2323 int i;
@@ -2251,23 +2329,18 @@ static int alps_match_table(struct psmouse *psmouse, struct alps_data *priv,
2251 (!model->command_mode_resp || 2329 (!model->command_mode_resp ||
2252 model->command_mode_resp == ec[2])) { 2330 model->command_mode_resp == ec[2])) {
2253 2331
2254 priv->proto_version = model->proto_version; 2332 return &model->protocol_info;
2255 alps_set_defaults(priv);
2256
2257 priv->flags = model->flags;
2258 priv->byte0 = model->byte0;
2259 priv->mask0 = model->mask0;
2260
2261 return 0;
2262 } 2333 }
2263 } 2334 }
2264 2335
2265 return -EINVAL; 2336 return NULL;
2266} 2337}
2267 2338
2268static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) 2339static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
2269{ 2340{
2341 const struct alps_protocol_info *protocol;
2270 unsigned char e6[4], e7[4], ec[4]; 2342 unsigned char e6[4], e7[4], ec[4];
2343 int error;
2271 2344
2272 /* 2345 /*
2273 * First try "E6 report". 2346 * First try "E6 report".
@@ -2293,54 +2366,35 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
2293 alps_exit_command_mode(psmouse)) 2366 alps_exit_command_mode(psmouse))
2294 return -EIO; 2367 return -EIO;
2295 2368
2296 /* Save the Firmware version */ 2369 protocol = alps_match_table(e7, ec);
2297 memcpy(priv->fw_ver, ec, 3); 2370 if (!protocol) {
2298 2371 if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 &&
2299 if (alps_match_table(psmouse, priv, e7, ec) == 0) { 2372 ec[0] == 0x73 && (ec[1] == 0x01 || ec[1] == 0x02)) {
2300 return 0; 2373 protocol = &alps_v5_protocol_data;
2301 } else if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 && 2374 } else if (ec[0] == 0x88 &&
2302 ec[0] == 0x73 && (ec[1] == 0x01 || ec[1] == 0x02)) { 2375 ((ec[1] & 0xf0) == 0xb0 || (ec[1] & 0xf0) == 0xc0)) {
2303 priv->proto_version = ALPS_PROTO_V5; 2376 protocol = &alps_v7_protocol_data;
2304 alps_set_defaults(priv); 2377 } else if (ec[0] == 0x88 && ec[1] == 0x08) {
2305 if (alps_dolphin_get_device_area(psmouse, priv)) 2378 protocol = &alps_v3_rushmore_data;
2306 return -EIO; 2379 } else if (ec[0] == 0x88 && ec[1] == 0x07 &&
2307 else 2380 ec[2] >= 0x90 && ec[2] <= 0x9d) {
2308 return 0; 2381 protocol = &alps_v3_protocol_data;
2309 } else if (ec[0] == 0x88 && 2382 } else {
2310 ((ec[1] & 0xf0) == 0xb0 || (ec[1] & 0xf0) == 0xc0)) { 2383 psmouse_dbg(psmouse,
2311 priv->proto_version = ALPS_PROTO_V7; 2384 "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec);
2312 alps_set_defaults(priv); 2385 return -EINVAL;
2313 2386 }
2314 return 0;
2315 } else if (ec[0] == 0x88 && ec[1] == 0x08) {
2316 priv->proto_version = ALPS_PROTO_V3;
2317 alps_set_defaults(priv);
2318
2319 priv->hw_init = alps_hw_init_rushmore_v3;
2320 priv->decode_fields = alps_decode_rushmore;
2321 priv->x_bits = 16;
2322 priv->y_bits = 12;
2323 priv->flags |= ALPS_IS_RUSHMORE;
2324
2325 /* hack to make addr_command, nibble_command available */
2326 psmouse->private = priv;
2327
2328 if (alps_probe_trackstick_v3(psmouse, ALPS_REG_BASE_RUSHMORE))
2329 priv->flags &= ~ALPS_DUALPOINT;
2330
2331 return 0;
2332 } else if (ec[0] == 0x88 && ec[1] == 0x07 &&
2333 ec[2] >= 0x90 && ec[2] <= 0x9d) {
2334 priv->proto_version = ALPS_PROTO_V3;
2335 alps_set_defaults(priv);
2336
2337 return 0;
2338 } 2387 }
2339 2388
2340 psmouse_dbg(psmouse, 2389 if (priv) {
2341 "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec); 2390 /* Save the Firmware version */
2391 memcpy(priv->fw_ver, ec, 3);
2392 error = alps_set_protocol(psmouse, priv, protocol);
2393 if (error)
2394 return error;
2395 }
2342 2396
2343 return -EINVAL; 2397 return 0;
2344} 2398}
2345 2399
2346static int alps_reconnect(struct psmouse *psmouse) 2400static int alps_reconnect(struct psmouse *psmouse)
@@ -2361,7 +2415,10 @@ static void alps_disconnect(struct psmouse *psmouse)
2361 2415
2362 psmouse_reset(psmouse); 2416 psmouse_reset(psmouse);
2363 del_timer_sync(&priv->timer); 2417 del_timer_sync(&priv->timer);
2364 input_unregister_device(priv->dev2); 2418 if (priv->dev2)
2419 input_unregister_device(priv->dev2);
2420 if (!IS_ERR_OR_NULL(priv->dev3))
2421 input_unregister_device(priv->dev3);
2365 kfree(priv); 2422 kfree(priv);
2366} 2423}
2367 2424
@@ -2394,25 +2451,12 @@ static void alps_set_abs_params_mt(struct alps_data *priv,
2394 2451
2395int alps_init(struct psmouse *psmouse) 2452int alps_init(struct psmouse *psmouse)
2396{ 2453{
2397 struct alps_data *priv; 2454 struct alps_data *priv = psmouse->private;
2398 struct input_dev *dev1 = psmouse->dev, *dev2; 2455 struct input_dev *dev1 = psmouse->dev;
2399 2456 int error;
2400 priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL);
2401 dev2 = input_allocate_device();
2402 if (!priv || !dev2)
2403 goto init_fail;
2404
2405 priv->dev2 = dev2;
2406 setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse);
2407
2408 psmouse->private = priv;
2409
2410 psmouse_reset(psmouse);
2411
2412 if (alps_identify(psmouse, priv) < 0)
2413 goto init_fail;
2414 2457
2415 if (priv->hw_init(psmouse)) 2458 error = priv->hw_init(psmouse);
2459 if (error)
2416 goto init_fail; 2460 goto init_fail;
2417 2461
2418 /* 2462 /*
@@ -2462,36 +2506,57 @@ int alps_init(struct psmouse *psmouse)
2462 } 2506 }
2463 2507
2464 if (priv->flags & ALPS_DUALPOINT) { 2508 if (priv->flags & ALPS_DUALPOINT) {
2509 struct input_dev *dev2;
2510
2511 dev2 = input_allocate_device();
2512 if (!dev2) {
2513 psmouse_err(psmouse,
2514 "failed to allocate trackstick device\n");
2515 error = -ENOMEM;
2516 goto init_fail;
2517 }
2518
2519 snprintf(priv->phys2, sizeof(priv->phys2), "%s/input1",
2520 psmouse->ps2dev.serio->phys);
2521 dev2->phys = priv->phys2;
2522
2465 /* 2523 /*
2466 * format of input device name is: "protocol vendor name" 2524 * format of input device name is: "protocol vendor name"
2467 * see function psmouse_switch_protocol() in psmouse-base.c 2525 * see function psmouse_switch_protocol() in psmouse-base.c
2468 */ 2526 */
2469 dev2->name = "AlpsPS/2 ALPS DualPoint Stick"; 2527 dev2->name = "AlpsPS/2 ALPS DualPoint Stick";
2528
2529 dev2->id.bustype = BUS_I8042;
2530 dev2->id.vendor = 0x0002;
2470 dev2->id.product = PSMOUSE_ALPS; 2531 dev2->id.product = PSMOUSE_ALPS;
2471 dev2->id.version = priv->proto_version; 2532 dev2->id.version = priv->proto_version;
2472 } else { 2533 dev2->dev.parent = &psmouse->ps2dev.serio->dev;
2473 dev2->name = "PS/2 ALPS Mouse";
2474 dev2->id.product = PSMOUSE_PS2;
2475 dev2->id.version = 0x0000;
2476 }
2477 2534
2478 snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys); 2535 input_set_capability(dev2, EV_REL, REL_X);
2479 dev2->phys = priv->phys; 2536 input_set_capability(dev2, EV_REL, REL_Y);
2480 dev2->id.bustype = BUS_I8042; 2537 input_set_capability(dev2, EV_KEY, BTN_LEFT);
2481 dev2->id.vendor = 0x0002; 2538 input_set_capability(dev2, EV_KEY, BTN_RIGHT);
2482 dev2->dev.parent = &psmouse->ps2dev.serio->dev; 2539 input_set_capability(dev2, EV_KEY, BTN_MIDDLE);
2483 2540
2484 dev2->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); 2541 __set_bit(INPUT_PROP_POINTER, dev2->propbit);
2485 dev2->relbit[BIT_WORD(REL_X)] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
2486 dev2->keybit[BIT_WORD(BTN_LEFT)] =
2487 BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT);
2488
2489 __set_bit(INPUT_PROP_POINTER, dev2->propbit);
2490 if (priv->flags & ALPS_DUALPOINT)
2491 __set_bit(INPUT_PROP_POINTING_STICK, dev2->propbit); 2542 __set_bit(INPUT_PROP_POINTING_STICK, dev2->propbit);
2492 2543
2493 if (input_register_device(priv->dev2)) 2544 error = input_register_device(dev2);
2494 goto init_fail; 2545 if (error) {
2546 psmouse_err(psmouse,
2547 "failed to register trackstick device: %d\n",
2548 error);
2549 input_free_device(dev2);
2550 goto init_fail;
2551 }
2552
2553 priv->dev2 = dev2;
2554 }
2555
2556 priv->psmouse = psmouse;
2557
2558 INIT_DELAYED_WORK(&priv->dev3_register_work,
2559 alps_register_bare_ps2_mouse);
2495 2560
2496 psmouse->protocol_handler = alps_process_byte; 2561 psmouse->protocol_handler = alps_process_byte;
2497 psmouse->poll = alps_poll; 2562 psmouse->poll = alps_poll;
@@ -2509,25 +2574,56 @@ int alps_init(struct psmouse *psmouse)
2509 2574
2510init_fail: 2575init_fail:
2511 psmouse_reset(psmouse); 2576 psmouse_reset(psmouse);
2512 input_free_device(dev2); 2577 /*
2513 kfree(priv); 2578 * Even though we did not allocate psmouse->private we do free
2579 * it here.
2580 */
2581 kfree(psmouse->private);
2514 psmouse->private = NULL; 2582 psmouse->private = NULL;
2515 return -1; 2583 return error;
2516} 2584}
2517 2585
2518int alps_detect(struct psmouse *psmouse, bool set_properties) 2586int alps_detect(struct psmouse *psmouse, bool set_properties)
2519{ 2587{
2520 struct alps_data dummy; 2588 struct alps_data *priv;
2589 int error;
2521 2590
2522 if (alps_identify(psmouse, &dummy) < 0) 2591 error = alps_identify(psmouse, NULL);
2523 return -1; 2592 if (error)
2593 return error;
2594
2595 /*
2596 * Reset the device to make sure it is fully operational:
2597 * on some laptops, like certain Dell Latitudes, we may
2598 * fail to properly detect presence of trackstick if device
2599 * has not been reset.
2600 */
2601 psmouse_reset(psmouse);
2602
2603 priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL);
2604 if (!priv)
2605 return -ENOMEM;
2606
2607 error = alps_identify(psmouse, priv);
2608 if (error)
2609 return error;
2524 2610
2525 if (set_properties) { 2611 if (set_properties) {
2526 psmouse->vendor = "ALPS"; 2612 psmouse->vendor = "ALPS";
2527 psmouse->name = dummy.flags & ALPS_DUALPOINT ? 2613 psmouse->name = priv->flags & ALPS_DUALPOINT ?
2528 "DualPoint TouchPad" : "GlidePoint"; 2614 "DualPoint TouchPad" : "GlidePoint";
2529 psmouse->model = dummy.proto_version << 8; 2615 psmouse->model = priv->proto_version;
2616 } else {
2617 /*
2618 * Destroy alps_data structure we allocated earlier since
2619 * this was just a "trial run". Otherwise we'll keep it
2620 * to be used by alps_init() which has to be called if
2621 * we succeed and set_properties is true.
2622 */
2623 kfree(priv);
2624 psmouse->private = NULL;
2530 } 2625 }
2626
2531 return 0; 2627 return 0;
2532} 2628}
2533 2629
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index 66240b47819a..02513c0502fc 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -14,13 +14,14 @@
14 14
15#include <linux/input/mt.h> 15#include <linux/input/mt.h>
16 16
17#define ALPS_PROTO_V1 1 17#define ALPS_PROTO_V1 0x100
18#define ALPS_PROTO_V2 2 18#define ALPS_PROTO_V2 0x200
19#define ALPS_PROTO_V3 3 19#define ALPS_PROTO_V3 0x300
20#define ALPS_PROTO_V4 4 20#define ALPS_PROTO_V3_RUSHMORE 0x310
21#define ALPS_PROTO_V5 5 21#define ALPS_PROTO_V4 0x400
22#define ALPS_PROTO_V6 6 22#define ALPS_PROTO_V5 0x500
23#define ALPS_PROTO_V7 7 /* t3btl t4s */ 23#define ALPS_PROTO_V6 0x600
24#define ALPS_PROTO_V7 0x700 /* t3btl t4s */
24 25
25#define MAX_TOUCHES 2 26#define MAX_TOUCHES 2
26 27
@@ -46,29 +47,37 @@ enum V7_PACKET_ID {
46}; 47};
47 48
48/** 49/**
50 * struct alps_protocol_info - information about protocol used by a device
51 * @version: Indicates V1/V2/V3/...
52 * @byte0: Helps figure out whether a position report packet matches the
53 * known format for this model. The first byte of the report, ANDed with
54 * mask0, should match byte0.
55 * @mask0: The mask used to check the first byte of the report.
56 * @flags: Additional device capabilities (passthrough port, trackstick, etc.).
57 */
58struct alps_protocol_info {
59 u16 version;
60 u8 byte0, mask0;
61 unsigned int flags;
62};
63
64/**
49 * struct alps_model_info - touchpad ID table 65 * struct alps_model_info - touchpad ID table
50 * @signature: E7 response string to match. 66 * @signature: E7 response string to match.
51 * @command_mode_resp: For V3/V4 touchpads, the final byte of the EC response 67 * @command_mode_resp: For V3/V4 touchpads, the final byte of the EC response
52 * (aka command mode response) identifies the firmware minor version. This 68 * (aka command mode response) identifies the firmware minor version. This
53 * can be used to distinguish different hardware models which are not 69 * can be used to distinguish different hardware models which are not
54 * uniquely identifiable through their E7 responses. 70 * uniquely identifiable through their E7 responses.
55 * @proto_version: Indicates V1/V2/V3/... 71 * @protocol_info: information about protcol used by the device.
56 * @byte0: Helps figure out whether a position report packet matches the
57 * known format for this model. The first byte of the report, ANDed with
58 * mask0, should match byte0.
59 * @mask0: The mask used to check the first byte of the report.
60 * @flags: Additional device capabilities (passthrough port, trackstick, etc.).
61 * 72 *
62 * Many (but not all) ALPS touchpads can be identified by looking at the 73 * Many (but not all) ALPS touchpads can be identified by looking at the
63 * values returned in the "E7 report" and/or the "EC report." This table 74 * values returned in the "E7 report" and/or the "EC report." This table
64 * lists a number of such touchpads. 75 * lists a number of such touchpads.
65 */ 76 */
66struct alps_model_info { 77struct alps_model_info {
67 unsigned char signature[3]; 78 u8 signature[3];
68 unsigned char command_mode_resp; 79 u8 command_mode_resp;
69 unsigned char proto_version; 80 struct alps_protocol_info protocol_info;
70 unsigned char byte0, mask0;
71 int flags;
72}; 81};
73 82
74/** 83/**
@@ -132,8 +141,12 @@ struct alps_fields {
132 141
133/** 142/**
134 * struct alps_data - private data structure for the ALPS driver 143 * struct alps_data - private data structure for the ALPS driver
135 * @dev2: "Relative" device used to report trackstick or mouse activity. 144 * @psmouse: Pointer to parent psmouse device
136 * @phys: Physical path for the relative device. 145 * @dev2: Trackstick device (can be NULL).
146 * @dev3: Generic PS/2 mouse (can be NULL, delayed registering).
147 * @phys2: Physical path for the trackstick device.
148 * @phys3: Physical path for the generic PS/2 mouse.
149 * @dev3_register_work: Delayed work for registering PS/2 mouse.
137 * @nibble_commands: Command mapping used for touchpad register accesses. 150 * @nibble_commands: Command mapping used for touchpad register accesses.
138 * @addr_command: Command used to tell the touchpad that a register address 151 * @addr_command: Command used to tell the touchpad that a register address
139 * follows. 152 * follows.
@@ -160,15 +173,19 @@ struct alps_fields {
160 * @timer: Timer for flushing out the final report packet in the stream. 173 * @timer: Timer for flushing out the final report packet in the stream.
161 */ 174 */
162struct alps_data { 175struct alps_data {
176 struct psmouse *psmouse;
163 struct input_dev *dev2; 177 struct input_dev *dev2;
164 char phys[32]; 178 struct input_dev *dev3;
179 char phys2[32];
180 char phys3[32];
181 struct delayed_work dev3_register_work;
165 182
166 /* these are autodetected when the device is identified */ 183 /* these are autodetected when the device is identified */
167 const struct alps_nibble_commands *nibble_commands; 184 const struct alps_nibble_commands *nibble_commands;
168 int addr_command; 185 int addr_command;
169 unsigned char proto_version; 186 u16 proto_version;
170 unsigned char byte0, mask0; 187 u8 byte0, mask0;
171 unsigned char fw_ver[3]; 188 u8 fw_ver[3];
172 int flags; 189 int flags;
173 int x_max; 190 int x_max;
174 int y_max; 191 int y_max;
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
index 9118a1861a45..28dcfc822bf6 100644
--- a/drivers/input/mouse/cypress_ps2.c
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -710,8 +710,3 @@ err_exit:
710 710
711 return -1; 711 return -1;
712} 712}
713
714bool cypress_supported(void)
715{
716 return true;
717}
diff --git a/drivers/input/mouse/cypress_ps2.h b/drivers/input/mouse/cypress_ps2.h
index 4720f21d2d70..81f68aaed7c8 100644
--- a/drivers/input/mouse/cypress_ps2.h
+++ b/drivers/input/mouse/cypress_ps2.h
@@ -172,7 +172,6 @@ struct cytp_data {
172#ifdef CONFIG_MOUSE_PS2_CYPRESS 172#ifdef CONFIG_MOUSE_PS2_CYPRESS
173int cypress_detect(struct psmouse *psmouse, bool set_properties); 173int cypress_detect(struct psmouse *psmouse, bool set_properties);
174int cypress_init(struct psmouse *psmouse); 174int cypress_init(struct psmouse *psmouse);
175bool cypress_supported(void);
176#else 175#else
177inline int cypress_detect(struct psmouse *psmouse, bool set_properties) 176inline int cypress_detect(struct psmouse *psmouse, bool set_properties)
178{ 177{
@@ -182,10 +181,6 @@ inline int cypress_init(struct psmouse *psmouse)
182{ 181{
183 return -ENOSYS; 182 return -ENOSYS;
184} 183}
185inline bool cypress_supported(void)
186{
187 return 0;
188}
189#endif /* CONFIG_MOUSE_PS2_CYPRESS */ 184#endif /* CONFIG_MOUSE_PS2_CYPRESS */
190 185
191#endif /* _CYPRESS_PS2_H */ 186#endif /* _CYPRESS_PS2_H */
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c
index fca38ba63bbe..757f78a94aec 100644
--- a/drivers/input/mouse/focaltech.c
+++ b/drivers/input/mouse/focaltech.c
@@ -424,11 +424,6 @@ fail:
424 return error; 424 return error;
425} 425}
426 426
427bool focaltech_supported(void)
428{
429 return true;
430}
431
432#else /* CONFIG_MOUSE_PS2_FOCALTECH */ 427#else /* CONFIG_MOUSE_PS2_FOCALTECH */
433 428
434int focaltech_init(struct psmouse *psmouse) 429int focaltech_init(struct psmouse *psmouse)
@@ -438,9 +433,4 @@ int focaltech_init(struct psmouse *psmouse)
438 return 0; 433 return 0;
439} 434}
440 435
441bool focaltech_supported(void)
442{
443 return false;
444}
445
446#endif /* CONFIG_MOUSE_PS2_FOCALTECH */ 436#endif /* CONFIG_MOUSE_PS2_FOCALTECH */
diff --git a/drivers/input/mouse/focaltech.h b/drivers/input/mouse/focaltech.h
index 71870a9b548a..ca61ebff373e 100644
--- a/drivers/input/mouse/focaltech.h
+++ b/drivers/input/mouse/focaltech.h
@@ -19,6 +19,5 @@
19 19
20int focaltech_detect(struct psmouse *psmouse, bool set_properties); 20int focaltech_detect(struct psmouse *psmouse, bool set_properties);
21int focaltech_init(struct psmouse *psmouse); 21int focaltech_init(struct psmouse *psmouse);
22bool focaltech_supported(void);
23 22
24#endif 23#endif
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 68469feda470..4ccd01d7a48d 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -727,7 +727,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
727 if (psmouse_do_detect(focaltech_detect, psmouse, set_properties) == 0) { 727 if (psmouse_do_detect(focaltech_detect, psmouse, set_properties) == 0) {
728 if (max_proto > PSMOUSE_IMEX) { 728 if (max_proto > PSMOUSE_IMEX) {
729 if (!set_properties || focaltech_init(psmouse) == 0) { 729 if (!set_properties || focaltech_init(psmouse) == 0) {
730 if (focaltech_supported()) 730 if (IS_ENABLED(CONFIG_MOUSE_PS2_FOCALTECH))
731 return PSMOUSE_FOCALTECH; 731 return PSMOUSE_FOCALTECH;
732 /* 732 /*
733 * Note that we need to also restrict 733 * Note that we need to also restrict
@@ -776,7 +776,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
776 * Try activating protocol, but check if support is enabled first, since 776 * Try activating protocol, but check if support is enabled first, since
777 * we try detecting Synaptics even when protocol is disabled. 777 * we try detecting Synaptics even when protocol is disabled.
778 */ 778 */
779 if (synaptics_supported() && 779 if (IS_ENABLED(CONFIG_MOUSE_PS2_SYNAPTICS) &&
780 (!set_properties || synaptics_init(psmouse) == 0)) { 780 (!set_properties || synaptics_init(psmouse) == 0)) {
781 return PSMOUSE_SYNAPTICS; 781 return PSMOUSE_SYNAPTICS;
782 } 782 }
@@ -801,7 +801,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
801 */ 801 */
802 if (max_proto > PSMOUSE_IMEX && 802 if (max_proto > PSMOUSE_IMEX &&
803 cypress_detect(psmouse, set_properties) == 0) { 803 cypress_detect(psmouse, set_properties) == 0) {
804 if (cypress_supported()) { 804 if (IS_ENABLED(CONFIG_MOUSE_PS2_CYPRESS)) {
805 if (cypress_init(psmouse) == 0) 805 if (cypress_init(psmouse) == 0)
806 return PSMOUSE_CYPRESS; 806 return PSMOUSE_CYPRESS;
807 807
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 7e705ee90b86..f2cceb6493a0 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1454,11 +1454,6 @@ int synaptics_init_relative(struct psmouse *psmouse)
1454 return __synaptics_init(psmouse, false); 1454 return __synaptics_init(psmouse, false);
1455} 1455}
1456 1456
1457bool synaptics_supported(void)
1458{
1459 return true;
1460}
1461
1462#else /* CONFIG_MOUSE_PS2_SYNAPTICS */ 1457#else /* CONFIG_MOUSE_PS2_SYNAPTICS */
1463 1458
1464void __init synaptics_module_init(void) 1459void __init synaptics_module_init(void)
@@ -1470,9 +1465,4 @@ int synaptics_init(struct psmouse *psmouse)
1470 return -ENOSYS; 1465 return -ENOSYS;
1471} 1466}
1472 1467
1473bool synaptics_supported(void)
1474{
1475 return false;
1476}
1477
1478#endif /* CONFIG_MOUSE_PS2_SYNAPTICS */ 1468#endif /* CONFIG_MOUSE_PS2_SYNAPTICS */
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 6faf9bb7c117..aedc3299b14e 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -175,6 +175,5 @@ int synaptics_detect(struct psmouse *psmouse, bool set_properties);
175int synaptics_init(struct psmouse *psmouse); 175int synaptics_init(struct psmouse *psmouse);
176int synaptics_init_relative(struct psmouse *psmouse); 176int synaptics_init_relative(struct psmouse *psmouse);
177void synaptics_reset(struct psmouse *psmouse); 177void synaptics_reset(struct psmouse *psmouse);
178bool synaptics_supported(void);
179 178
180#endif /* _SYNAPTICS_H */ 179#endif /* _SYNAPTICS_H */
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 1daa7ca04577..9acdc080e7ec 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -192,14 +192,6 @@ static bool gic_local_irq_is_routable(int intr)
192 } 192 }
193} 193}
194 194
195unsigned int gic_get_timer_pending(void)
196{
197 unsigned int vpe_pending;
198
199 vpe_pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
200 return vpe_pending & GIC_VPE_PEND_TIMER_MSK;
201}
202
203static void gic_bind_eic_interrupt(int irq, int set) 195static void gic_bind_eic_interrupt(int irq, int set)
204{ 196{
205 /* Convert irq vector # to hw int # */ 197 /* Convert irq vector # to hw int # */
diff --git a/drivers/isdn/hardware/mISDN/Kconfig b/drivers/isdn/hardware/mISDN/Kconfig
index b8611e3e5e74..09df54fc1fef 100644
--- a/drivers/isdn/hardware/mISDN/Kconfig
+++ b/drivers/isdn/hardware/mISDN/Kconfig
@@ -24,7 +24,7 @@ config MISDN_HFCMULTI
24 * HFC-E1 (E1 interface for 2Mbit ISDN) 24 * HFC-E1 (E1 interface for 2Mbit ISDN)
25 25
26config MISDN_HFCMULTI_8xx 26config MISDN_HFCMULTI_8xx
27 boolean "Support for XHFC embedded board in HFC multiport driver" 27 bool "Support for XHFC embedded board in HFC multiport driver"
28 depends on MISDN 28 depends on MISDN
29 depends on MISDN_HFCMULTI 29 depends on MISDN_HFCMULTI
30 depends on 8xx 30 depends on 8xx
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 3c92780bda09..ff48da61c94c 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1755,7 +1755,7 @@ init_card(struct hfc_pci *hc)
1755 enable_hwirq(hc); 1755 enable_hwirq(hc);
1756 spin_unlock_irqrestore(&hc->lock, flags); 1756 spin_unlock_irqrestore(&hc->lock, flags);
1757 /* Timeout 80ms */ 1757 /* Timeout 80ms */
1758 current->state = TASK_UNINTERRUPTIBLE; 1758 set_current_state(TASK_UNINTERRUPTIBLE);
1759 schedule_timeout((80 * HZ) / 1000); 1759 schedule_timeout((80 * HZ) / 1000);
1760 printk(KERN_INFO "HFC PCI: IRQ %d count %d\n", 1760 printk(KERN_INFO "HFC PCI: IRQ %d count %d\n",
1761 hc->irq, hc->irqcnt); 1761 hc->irq, hc->irqcnt);
diff --git a/drivers/lguest/Makefile b/drivers/lguest/Makefile
index c4197503900e..16f52ee73994 100644
--- a/drivers/lguest/Makefile
+++ b/drivers/lguest/Makefile
@@ -1,6 +1,3 @@
1# Guest requires the device configuration and probing code.
2obj-$(CONFIG_LGUEST_GUEST) += lguest_device.o
3
4# Host requires the other files, which can be a module. 1# Host requires the other files, which can be a module.
5obj-$(CONFIG_LGUEST) += lg.o 2obj-$(CONFIG_LGUEST) += lg.o
6lg-y = core.o hypercalls.o page_tables.o interrupts_and_traps.o \ 3lg-y = core.o hypercalls.o page_tables.o interrupts_and_traps.o \
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index 6590558d1d31..7dc93aa004c8 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -208,6 +208,14 @@ void __lgwrite(struct lg_cpu *cpu, unsigned long addr, const void *b,
208 */ 208 */
209int run_guest(struct lg_cpu *cpu, unsigned long __user *user) 209int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
210{ 210{
211 /* If the launcher asked for a register with LHREQ_GETREG */
212 if (cpu->reg_read) {
213 if (put_user(*cpu->reg_read, user))
214 return -EFAULT;
215 cpu->reg_read = NULL;
216 return sizeof(*cpu->reg_read);
217 }
218
211 /* We stop running once the Guest is dead. */ 219 /* We stop running once the Guest is dead. */
212 while (!cpu->lg->dead) { 220 while (!cpu->lg->dead) {
213 unsigned int irq; 221 unsigned int irq;
@@ -217,21 +225,12 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
217 if (cpu->hcall) 225 if (cpu->hcall)
218 do_hypercalls(cpu); 226 do_hypercalls(cpu);
219 227
220 /* 228 /* Do we have to tell the Launcher about a trap? */
221 * It's possible the Guest did a NOTIFY hypercall to the 229 if (cpu->pending.trap) {
222 * Launcher. 230 if (copy_to_user(user, &cpu->pending,
223 */ 231 sizeof(cpu->pending)))
224 if (cpu->pending_notify) { 232 return -EFAULT;
225 /* 233 return sizeof(cpu->pending);
226 * Does it just needs to write to a registered
227 * eventfd (ie. the appropriate virtqueue thread)?
228 */
229 if (!send_notify_to_eventfd(cpu)) {
230 /* OK, we tell the main Launcher. */
231 if (put_user(cpu->pending_notify, user))
232 return -EFAULT;
233 return sizeof(cpu->pending_notify);
234 }
235 } 234 }
236 235
237 /* 236 /*
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
index 83511eb0923d..1219af493c0f 100644
--- a/drivers/lguest/hypercalls.c
+++ b/drivers/lguest/hypercalls.c
@@ -117,9 +117,6 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
117 /* Similarly, this sets the halted flag for run_guest(). */ 117 /* Similarly, this sets the halted flag for run_guest(). */
118 cpu->halted = 1; 118 cpu->halted = 1;
119 break; 119 break;
120 case LHCALL_NOTIFY:
121 cpu->pending_notify = args->arg1;
122 break;
123 default: 120 default:
124 /* It should be an architecture-specific hypercall. */ 121 /* It should be an architecture-specific hypercall. */
125 if (lguest_arch_do_hcall(cpu, args)) 122 if (lguest_arch_do_hcall(cpu, args))
@@ -189,7 +186,7 @@ static void do_async_hcalls(struct lg_cpu *cpu)
189 * Stop doing hypercalls if they want to notify the Launcher: 186 * Stop doing hypercalls if they want to notify the Launcher:
190 * it needs to service this first. 187 * it needs to service this first.
191 */ 188 */
192 if (cpu->pending_notify) 189 if (cpu->pending.trap)
193 break; 190 break;
194 } 191 }
195} 192}
@@ -280,7 +277,7 @@ void do_hypercalls(struct lg_cpu *cpu)
280 * NOTIFY to the Launcher, we want to return now. Otherwise we do 277 * NOTIFY to the Launcher, we want to return now. Otherwise we do
281 * the hypercall. 278 * the hypercall.
282 */ 279 */
283 if (!cpu->pending_notify) { 280 if (!cpu->pending.trap) {
284 do_hcall(cpu, cpu->hcall); 281 do_hcall(cpu, cpu->hcall);
285 /* 282 /*
286 * Tricky point: we reset the hcall pointer to mark the 283 * Tricky point: we reset the hcall pointer to mark the
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 2eef40be4c04..307e8b39e7d1 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -50,7 +50,10 @@ struct lg_cpu {
50 /* Bitmap of what has changed: see CHANGED_* above. */ 50 /* Bitmap of what has changed: see CHANGED_* above. */
51 int changed; 51 int changed;
52 52
53 unsigned long pending_notify; /* pfn from LHCALL_NOTIFY */ 53 /* Pending operation. */
54 struct lguest_pending pending;
55
56 unsigned long *reg_read; /* register from LHREQ_GETREG */
54 57
55 /* At end of a page shared mapped over lguest_pages in guest. */ 58 /* At end of a page shared mapped over lguest_pages in guest. */
56 unsigned long regs_page; 59 unsigned long regs_page;
@@ -78,24 +81,18 @@ struct lg_cpu {
78 struct lg_cpu_arch arch; 81 struct lg_cpu_arch arch;
79}; 82};
80 83
81struct lg_eventfd {
82 unsigned long addr;
83 struct eventfd_ctx *event;
84};
85
86struct lg_eventfd_map {
87 unsigned int num;
88 struct lg_eventfd map[];
89};
90
91/* The private info the thread maintains about the guest. */ 84/* The private info the thread maintains about the guest. */
92struct lguest { 85struct lguest {
93 struct lguest_data __user *lguest_data; 86 struct lguest_data __user *lguest_data;
94 struct lg_cpu cpus[NR_CPUS]; 87 struct lg_cpu cpus[NR_CPUS];
95 unsigned int nr_cpus; 88 unsigned int nr_cpus;
96 89
90 /* Valid guest memory pages must be < this. */
97 u32 pfn_limit; 91 u32 pfn_limit;
98 92
93 /* Device memory is >= pfn_limit and < device_limit. */
94 u32 device_limit;
95
99 /* 96 /*
100 * This provides the offset to the base of guest-physical memory in the 97 * This provides the offset to the base of guest-physical memory in the
101 * Launcher. 98 * Launcher.
@@ -110,8 +107,6 @@ struct lguest {
110 unsigned int stack_pages; 107 unsigned int stack_pages;
111 u32 tsc_khz; 108 u32 tsc_khz;
112 109
113 struct lg_eventfd_map *eventfds;
114
115 /* Dead? */ 110 /* Dead? */
116 const char *dead; 111 const char *dead;
117}; 112};
@@ -197,8 +192,10 @@ void guest_pagetable_flush_user(struct lg_cpu *cpu);
197void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir, 192void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir,
198 unsigned long vaddr, pte_t val); 193 unsigned long vaddr, pte_t val);
199void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages); 194void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages);
200bool demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode); 195bool demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode,
196 unsigned long *iomem);
201void pin_page(struct lg_cpu *cpu, unsigned long vaddr); 197void pin_page(struct lg_cpu *cpu, unsigned long vaddr);
198bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr);
202unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr); 199unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr);
203void page_table_guest_data_init(struct lg_cpu *cpu); 200void page_table_guest_data_init(struct lg_cpu *cpu);
204 201
@@ -210,6 +207,7 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu);
210int lguest_arch_init_hypercalls(struct lg_cpu *cpu); 207int lguest_arch_init_hypercalls(struct lg_cpu *cpu);
211int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args); 208int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args);
212void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start); 209void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start);
210unsigned long *lguest_arch_regptr(struct lg_cpu *cpu, size_t reg_off, bool any);
213 211
214/* <arch>/switcher.S: */ 212/* <arch>/switcher.S: */
215extern char start_switcher_text[], end_switcher_text[], switch_to_guest[]; 213extern char start_switcher_text[], end_switcher_text[], switch_to_guest[];
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
deleted file mode 100644
index 89088d6538fd..000000000000
--- a/drivers/lguest/lguest_device.c
+++ /dev/null
@@ -1,540 +0,0 @@
1/*P:050
2 * Lguest guests use a very simple method to describe devices. It's a
3 * series of device descriptors contained just above the top of normal Guest
4 * memory.
5 *
6 * We use the standard "virtio" device infrastructure, which provides us with a
7 * console, a network and a block driver. Each one expects some configuration
8 * information and a "virtqueue" or two to send and receive data.
9:*/
10#include <linux/init.h>
11#include <linux/bootmem.h>
12#include <linux/lguest_launcher.h>
13#include <linux/virtio.h>
14#include <linux/virtio_config.h>
15#include <linux/interrupt.h>
16#include <linux/virtio_ring.h>
17#include <linux/err.h>
18#include <linux/export.h>
19#include <linux/slab.h>
20#include <asm/io.h>
21#include <asm/paravirt.h>
22#include <asm/lguest_hcall.h>
23
24/* The pointer to our (page) of device descriptions. */
25static void *lguest_devices;
26
27/*
28 * For Guests, device memory can be used as normal memory, so we cast away the
29 * __iomem to quieten sparse.
30 */
31static inline void *lguest_map(unsigned long phys_addr, unsigned long pages)
32{
33 return (__force void *)ioremap_cache(phys_addr, PAGE_SIZE*pages);
34}
35
36static inline void lguest_unmap(void *addr)
37{
38 iounmap((__force void __iomem *)addr);
39}
40
41/*D:100
42 * Each lguest device is just a virtio device plus a pointer to its entry
43 * in the lguest_devices page.
44 */
45struct lguest_device {
46 struct virtio_device vdev;
47
48 /* The entry in the lguest_devices page for this device. */
49 struct lguest_device_desc *desc;
50};
51
52/*
53 * Since the virtio infrastructure hands us a pointer to the virtio_device all
54 * the time, it helps to have a curt macro to get a pointer to the struct
55 * lguest_device it's enclosed in.
56 */
57#define to_lgdev(vd) container_of(vd, struct lguest_device, vdev)
58
59/*D:130
60 * Device configurations
61 *
62 * The configuration information for a device consists of one or more
63 * virtqueues, a feature bitmap, and some configuration bytes. The
64 * configuration bytes don't really matter to us: the Launcher sets them up, and
65 * the driver will look at them during setup.
66 *
67 * A convenient routine to return the device's virtqueue config array:
68 * immediately after the descriptor.
69 */
70static struct lguest_vqconfig *lg_vq(const struct lguest_device_desc *desc)
71{
72 return (void *)(desc + 1);
73}
74
75/* The features come immediately after the virtqueues. */
76static u8 *lg_features(const struct lguest_device_desc *desc)
77{
78 return (void *)(lg_vq(desc) + desc->num_vq);
79}
80
81/* The config space comes after the two feature bitmasks. */
82static u8 *lg_config(const struct lguest_device_desc *desc)
83{
84 return lg_features(desc) + desc->feature_len * 2;
85}
86
87/* The total size of the config page used by this device (incl. desc) */
88static unsigned desc_size(const struct lguest_device_desc *desc)
89{
90 return sizeof(*desc)
91 + desc->num_vq * sizeof(struct lguest_vqconfig)
92 + desc->feature_len * 2
93 + desc->config_len;
94}
95
96/* This gets the device's feature bits. */
97static u64 lg_get_features(struct virtio_device *vdev)
98{
99 unsigned int i;
100 u32 features = 0;
101 struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
102 u8 *in_features = lg_features(desc);
103
104 /* We do this the slow but generic way. */
105 for (i = 0; i < min(desc->feature_len * 8, 32); i++)
106 if (in_features[i / 8] & (1 << (i % 8)))
107 features |= (1 << i);
108
109 return features;
110}
111
112/*
113 * To notify on reset or feature finalization, we (ab)use the NOTIFY
114 * hypercall, with the descriptor address of the device.
115 */
116static void status_notify(struct virtio_device *vdev)
117{
118 unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices;
119
120 hcall(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset, 0, 0, 0);
121}
122
123/*
124 * The virtio core takes the features the Host offers, and copies the ones
125 * supported by the driver into the vdev->features array. Once that's all
126 * sorted out, this routine is called so we can tell the Host which features we
127 * understand and accept.
128 */
129static int lg_finalize_features(struct virtio_device *vdev)
130{
131 unsigned int i, bits;
132 struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
133 /* Second half of bitmap is features we accept. */
134 u8 *out_features = lg_features(desc) + desc->feature_len;
135
136 /* Give virtio_ring a chance to accept features. */
137 vring_transport_features(vdev);
138
139 /* Make sure we don't have any features > 32 bits! */
140 BUG_ON((u32)vdev->features != vdev->features);
141
142 /*
143 * Since lguest is currently x86-only, we're little-endian. That
144 * means we could just memcpy. But it's not time critical, and in
145 * case someone copies this code, we do it the slow, obvious way.
146 */
147 memset(out_features, 0, desc->feature_len);
148 bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8;
149 for (i = 0; i < bits; i++) {
150 if (__virtio_test_bit(vdev, i))
151 out_features[i / 8] |= (1 << (i % 8));
152 }
153
154 /* Tell Host we've finished with this device's feature negotiation */
155 status_notify(vdev);
156
157 return 0;
158}
159
160/* Once they've found a field, getting a copy of it is easy. */
161static void lg_get(struct virtio_device *vdev, unsigned int offset,
162 void *buf, unsigned len)
163{
164 struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
165
166 /* Check they didn't ask for more than the length of the config! */
167 BUG_ON(offset + len > desc->config_len);
168 memcpy(buf, lg_config(desc) + offset, len);
169}
170
171/* Setting the contents is also trivial. */
172static void lg_set(struct virtio_device *vdev, unsigned int offset,
173 const void *buf, unsigned len)
174{
175 struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
176
177 /* Check they didn't ask for more than the length of the config! */
178 BUG_ON(offset + len > desc->config_len);
179 memcpy(lg_config(desc) + offset, buf, len);
180}
181
182/*
183 * The operations to get and set the status word just access the status field
184 * of the device descriptor.
185 */
186static u8 lg_get_status(struct virtio_device *vdev)
187{
188 return to_lgdev(vdev)->desc->status;
189}
190
191static void lg_set_status(struct virtio_device *vdev, u8 status)
192{
193 BUG_ON(!status);
194 to_lgdev(vdev)->desc->status = status;
195
196 /* Tell Host immediately if we failed. */
197 if (status & VIRTIO_CONFIG_S_FAILED)
198 status_notify(vdev);
199}
200
201static void lg_reset(struct virtio_device *vdev)
202{
203 /* 0 status means "reset" */
204 to_lgdev(vdev)->desc->status = 0;
205 status_notify(vdev);
206}
207
208/*
209 * Virtqueues
210 *
211 * The other piece of infrastructure virtio needs is a "virtqueue": a way of
212 * the Guest device registering buffers for the other side to read from or
213 * write into (ie. send and receive buffers). Each device can have multiple
214 * virtqueues: for example the console driver uses one queue for sending and
215 * another for receiving.
216 *
217 * Fortunately for us, a very fast shared-memory-plus-descriptors virtqueue
218 * already exists in virtio_ring.c. We just need to connect it up.
219 *
220 * We start with the information we need to keep about each virtqueue.
221 */
222
223/*D:140 This is the information we remember about each virtqueue. */
224struct lguest_vq_info {
225 /* A copy of the information contained in the device config. */
226 struct lguest_vqconfig config;
227
228 /* The address where we mapped the virtio ring, so we can unmap it. */
229 void *pages;
230};
231
232/*
233 * When the virtio_ring code wants to prod the Host, it calls us here and we
234 * make a hypercall. We hand the physical address of the virtqueue so the Host
235 * knows which virtqueue we're talking about.
236 */
237static bool lg_notify(struct virtqueue *vq)
238{
239 /*
240 * We store our virtqueue information in the "priv" pointer of the
241 * virtqueue structure.
242 */
243 struct lguest_vq_info *lvq = vq->priv;
244
245 hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0, 0);
246 return true;
247}
248
249/* An extern declaration inside a C file is bad form. Don't do it. */
250extern int lguest_setup_irq(unsigned int irq);
251
252/*
253 * This routine finds the Nth virtqueue described in the configuration of
254 * this device and sets it up.
255 *
256 * This is kind of an ugly duckling. It'd be nicer to have a standard
257 * representation of a virtqueue in the configuration space, but it seems that
258 * everyone wants to do it differently. The KVM coders want the Guest to
259 * allocate its own pages and tell the Host where they are, but for lguest it's
260 * simpler for the Host to simply tell us where the pages are.
261 */
262static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
263 unsigned index,
264 void (*callback)(struct virtqueue *vq),
265 const char *name)
266{
267 struct lguest_device *ldev = to_lgdev(vdev);
268 struct lguest_vq_info *lvq;
269 struct virtqueue *vq;
270 int err;
271
272 if (!name)
273 return NULL;
274
275 /* We must have this many virtqueues. */
276 if (index >= ldev->desc->num_vq)
277 return ERR_PTR(-ENOENT);
278
279 lvq = kmalloc(sizeof(*lvq), GFP_KERNEL);
280 if (!lvq)
281 return ERR_PTR(-ENOMEM);
282
283 /*
284 * Make a copy of the "struct lguest_vqconfig" entry, which sits after
285 * the descriptor. We need a copy because the config space might not
286 * be aligned correctly.
287 */
288 memcpy(&lvq->config, lg_vq(ldev->desc)+index, sizeof(lvq->config));
289
290 printk("Mapping virtqueue %i addr %lx\n", index,
291 (unsigned long)lvq->config.pfn << PAGE_SHIFT);
292 /* Figure out how many pages the ring will take, and map that memory */
293 lvq->pages = lguest_map((unsigned long)lvq->config.pfn << PAGE_SHIFT,
294 DIV_ROUND_UP(vring_size(lvq->config.num,
295 LGUEST_VRING_ALIGN),
296 PAGE_SIZE));
297 if (!lvq->pages) {
298 err = -ENOMEM;
299 goto free_lvq;
300 }
301
302 /*
303 * OK, tell virtio_ring.c to set up a virtqueue now we know its size
304 * and we've got a pointer to its pages. Note that we set weak_barriers
305 * to 'true': the host just a(nother) SMP CPU, so we only need inter-cpu
306 * barriers.
307 */
308 vq = vring_new_virtqueue(index, lvq->config.num, LGUEST_VRING_ALIGN, vdev,
309 true, lvq->pages, lg_notify, callback, name);
310 if (!vq) {
311 err = -ENOMEM;
312 goto unmap;
313 }
314
315 /* Make sure the interrupt is allocated. */
316 err = lguest_setup_irq(lvq->config.irq);
317 if (err)
318 goto destroy_vring;
319
320 /*
321 * Tell the interrupt for this virtqueue to go to the virtio_ring
322 * interrupt handler.
323 *
324 * FIXME: We used to have a flag for the Host to tell us we could use
325 * the interrupt as a source of randomness: it'd be nice to have that
326 * back.
327 */
328 err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED,
329 dev_name(&vdev->dev), vq);
330 if (err)
331 goto free_desc;
332
333 /*
334 * Last of all we hook up our 'struct lguest_vq_info" to the
335 * virtqueue's priv pointer.
336 */
337 vq->priv = lvq;
338 return vq;
339
340free_desc:
341 irq_free_desc(lvq->config.irq);
342destroy_vring:
343 vring_del_virtqueue(vq);
344unmap:
345 lguest_unmap(lvq->pages);
346free_lvq:
347 kfree(lvq);
348 return ERR_PTR(err);
349}
350/*:*/
351
352/* Cleaning up a virtqueue is easy */
353static void lg_del_vq(struct virtqueue *vq)
354{
355 struct lguest_vq_info *lvq = vq->priv;
356
357 /* Release the interrupt */
358 free_irq(lvq->config.irq, vq);
359 /* Tell virtio_ring.c to free the virtqueue. */
360 vring_del_virtqueue(vq);
361 /* Unmap the pages containing the ring. */
362 lguest_unmap(lvq->pages);
363 /* Free our own queue information. */
364 kfree(lvq);
365}
366
367static void lg_del_vqs(struct virtio_device *vdev)
368{
369 struct virtqueue *vq, *n;
370
371 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
372 lg_del_vq(vq);
373}
374
375static int lg_find_vqs(struct virtio_device *vdev, unsigned nvqs,
376 struct virtqueue *vqs[],
377 vq_callback_t *callbacks[],
378 const char *names[])
379{
380 struct lguest_device *ldev = to_lgdev(vdev);
381 int i;
382
383 /* We must have this many virtqueues. */
384 if (nvqs > ldev->desc->num_vq)
385 return -ENOENT;
386
387 for (i = 0; i < nvqs; ++i) {
388 vqs[i] = lg_find_vq(vdev, i, callbacks[i], names[i]);
389 if (IS_ERR(vqs[i]))
390 goto error;
391 }
392 return 0;
393
394error:
395 lg_del_vqs(vdev);
396 return PTR_ERR(vqs[i]);
397}
398
399static const char *lg_bus_name(struct virtio_device *vdev)
400{
401 return "";
402}
403
404/* The ops structure which hooks everything together. */
405static const struct virtio_config_ops lguest_config_ops = {
406 .get_features = lg_get_features,
407 .finalize_features = lg_finalize_features,
408 .get = lg_get,
409 .set = lg_set,
410 .get_status = lg_get_status,
411 .set_status = lg_set_status,
412 .reset = lg_reset,
413 .find_vqs = lg_find_vqs,
414 .del_vqs = lg_del_vqs,
415 .bus_name = lg_bus_name,
416};
417
418/*
419 * The root device for the lguest virtio devices. This makes them appear as
420 * /sys/devices/lguest/0,1,2 not /sys/devices/0,1,2.
421 */
422static struct device *lguest_root;
423
424/*D:120
425 * This is the core of the lguest bus: actually adding a new device.
426 * It's a separate function because it's neater that way, and because an
427 * earlier version of the code supported hotplug and unplug. They were removed
428 * early on because they were never used.
429 *
430 * As Andrew Tridgell says, "Untested code is buggy code".
431 *
432 * It's worth reading this carefully: we start with a pointer to the new device
433 * descriptor in the "lguest_devices" page, and the offset into the device
434 * descriptor page so we can uniquely identify it if things go badly wrong.
435 */
436static void add_lguest_device(struct lguest_device_desc *d,
437 unsigned int offset)
438{
439 struct lguest_device *ldev;
440
441 /* Start with zeroed memory; Linux's device layer counts on it. */
442 ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
443 if (!ldev) {
444 printk(KERN_EMERG "Cannot allocate lguest dev %u type %u\n",
445 offset, d->type);
446 return;
447 }
448
449 /* This devices' parent is the lguest/ dir. */
450 ldev->vdev.dev.parent = lguest_root;
451 /*
452 * The device type comes straight from the descriptor. There's also a
453 * device vendor field in the virtio_device struct, which we leave as
454 * 0.
455 */
456 ldev->vdev.id.device = d->type;
457 /*
458 * We have a simple set of routines for querying the device's
459 * configuration information and setting its status.
460 */
461 ldev->vdev.config = &lguest_config_ops;
462 /* And we remember the device's descriptor for lguest_config_ops. */
463 ldev->desc = d;
464
465 /*
466 * register_virtio_device() sets up the generic fields for the struct
467 * virtio_device and calls device_register(). This makes the bus
468 * infrastructure look for a matching driver.
469 */
470 if (register_virtio_device(&ldev->vdev) != 0) {
471 printk(KERN_ERR "Failed to register lguest dev %u type %u\n",
472 offset, d->type);
473 kfree(ldev);
474 }
475}
476
477/*D:110
478 * scan_devices() simply iterates through the device page. The type 0 is
479 * reserved to mean "end of devices".
480 */
481static void scan_devices(void)
482{
483 unsigned int i;
484 struct lguest_device_desc *d;
485
486 /* We start at the page beginning, and skip over each entry. */
487 for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
488 d = lguest_devices + i;
489
490 /* Once we hit a zero, stop. */
491 if (d->type == 0)
492 break;
493
494 printk("Device at %i has size %u\n", i, desc_size(d));
495 add_lguest_device(d, i);
496 }
497}
498
499/*D:105
500 * Fairly early in boot, lguest_devices_init() is called to set up the
501 * lguest device infrastructure. We check that we are a Guest by checking
502 * pv_info.name: there are other ways of checking, but this seems most
503 * obvious to me.
504 *
505 * So we can access the "struct lguest_device_desc"s easily, we map that memory
506 * and store the pointer in the global "lguest_devices". Then we register a
507 * root device from which all our devices will hang (this seems to be the
508 * correct sysfs incantation).
509 *
510 * Finally we call scan_devices() which adds all the devices found in the
511 * lguest_devices page.
512 */
513static int __init lguest_devices_init(void)
514{
515 if (strcmp(pv_info.name, "lguest") != 0)
516 return 0;
517
518 lguest_root = root_device_register("lguest");
519 if (IS_ERR(lguest_root))
520 panic("Could not register lguest root");
521
522 /* Devices are in a single page above top of "normal" mem */
523 lguest_devices = lguest_map(max_pfn<<PAGE_SHIFT, 1);
524
525 scan_devices();
526 return 0;
527}
528/* We do this after core stuff, but before the drivers. */
529postcore_initcall(lguest_devices_init);
530
531/*D:150
532 * At this point in the journey we used to now wade through the lguest
533 * devices themselves: net, block and console. Since they're all now virtio
534 * devices rather than lguest-specific, I've decided to ignore them. Mostly,
535 * they're kind of boring. But this does mean you'll never experience the
536 * thrill of reading the forbidden love scene buried deep in the block driver.
537 *
538 * "make Launcher" beckons, where we answer questions like "Where do Guests
539 * come from?", and "What do you do when someone asks for optimization?".
540 */
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index 4263f4cc8c55..c4c6113eb9a6 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -2,175 +2,62 @@
2 * launcher controls and communicates with the Guest. For example, 2 * launcher controls and communicates with the Guest. For example,
3 * the first write will tell us the Guest's memory layout and entry 3 * the first write will tell us the Guest's memory layout and entry
4 * point. A read will run the Guest until something happens, such as 4 * point. A read will run the Guest until something happens, such as
5 * a signal or the Guest doing a NOTIFY out to the Launcher. There is 5 * a signal or the Guest accessing a device.
6 * also a way for the Launcher to attach eventfds to particular NOTIFY
7 * values instead of returning from the read() call.
8:*/ 6:*/
9#include <linux/uaccess.h> 7#include <linux/uaccess.h>
10#include <linux/miscdevice.h> 8#include <linux/miscdevice.h>
11#include <linux/fs.h> 9#include <linux/fs.h>
12#include <linux/sched.h> 10#include <linux/sched.h>
13#include <linux/eventfd.h>
14#include <linux/file.h> 11#include <linux/file.h>
15#include <linux/slab.h> 12#include <linux/slab.h>
16#include <linux/export.h> 13#include <linux/export.h>
17#include "lg.h" 14#include "lg.h"
18 15
19/*L:056 16/*L:052
20 * Before we move on, let's jump ahead and look at what the kernel does when 17 The Launcher can get the registers, and also set some of them.
21 * it needs to look up the eventfds. That will complete our picture of how we 18*/
22 * use RCU. 19static int getreg_setup(struct lg_cpu *cpu, const unsigned long __user *input)
23 *
24 * The notification value is in cpu->pending_notify: we return true if it went
25 * to an eventfd.
26 */
27bool send_notify_to_eventfd(struct lg_cpu *cpu)
28{
29 unsigned int i;
30 struct lg_eventfd_map *map;
31
32 /*
33 * This "rcu_read_lock()" helps track when someone is still looking at
34 * the (RCU-using) eventfds array. It's not actually a lock at all;
35 * indeed it's a noop in many configurations. (You didn't expect me to
36 * explain all the RCU secrets here, did you?)
37 */
38 rcu_read_lock();
39 /*
40 * rcu_dereference is the counter-side of rcu_assign_pointer(); it
41 * makes sure we don't access the memory pointed to by
42 * cpu->lg->eventfds before cpu->lg->eventfds is set. Sounds crazy,
43 * but Alpha allows this! Paul McKenney points out that a really
44 * aggressive compiler could have the same effect:
45 * http://lists.ozlabs.org/pipermail/lguest/2009-July/001560.html
46 *
47 * So play safe, use rcu_dereference to get the rcu-protected pointer:
48 */
49 map = rcu_dereference(cpu->lg->eventfds);
50 /*
51 * Simple array search: even if they add an eventfd while we do this,
52 * we'll continue to use the old array and just won't see the new one.
53 */
54 for (i = 0; i < map->num; i++) {
55 if (map->map[i].addr == cpu->pending_notify) {
56 eventfd_signal(map->map[i].event, 1);
57 cpu->pending_notify = 0;
58 break;
59 }
60 }
61 /* We're done with the rcu-protected variable cpu->lg->eventfds. */
62 rcu_read_unlock();
63
64 /* If we cleared the notification, it's because we found a match. */
65 return cpu->pending_notify == 0;
66}
67
68/*L:055
69 * One of the more tricksy tricks in the Linux Kernel is a technique called
70 * Read Copy Update. Since one point of lguest is to teach lguest journeyers
71 * about kernel coding, I use it here. (In case you're curious, other purposes
72 * include learning about virtualization and instilling a deep appreciation for
73 * simplicity and puppies).
74 *
75 * We keep a simple array which maps LHCALL_NOTIFY values to eventfds, but we
76 * add new eventfds without ever blocking readers from accessing the array.
77 * The current Launcher only does this during boot, so that never happens. But
78 * Read Copy Update is cool, and adding a lock risks damaging even more puppies
79 * than this code does.
80 *
81 * We allocate a brand new one-larger array, copy the old one and add our new
82 * element. Then we make the lg eventfd pointer point to the new array.
83 * That's the easy part: now we need to free the old one, but we need to make
84 * sure no slow CPU somewhere is still looking at it. That's what
85 * synchronize_rcu does for us: waits until every CPU has indicated that it has
86 * moved on to know it's no longer using the old one.
87 *
88 * If that's unclear, see http://en.wikipedia.org/wiki/Read-copy-update.
89 */
90static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
91{ 20{
92 struct lg_eventfd_map *new, *old = lg->eventfds; 21 unsigned long which;
93
94 /*
95 * We don't allow notifications on value 0 anyway (pending_notify of
96 * 0 means "nothing pending").
97 */
98 if (!addr)
99 return -EINVAL;
100
101 /*
102 * Replace the old array with the new one, carefully: others can
103 * be accessing it at the same time.
104 */
105 new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1),
106 GFP_KERNEL);
107 if (!new)
108 return -ENOMEM;
109 22
110 /* First make identical copy. */ 23 /* We re-use the ptrace structure to specify which register to read. */
111 memcpy(new->map, old->map, sizeof(old->map[0]) * old->num); 24 if (get_user(which, input) != 0)
112 new->num = old->num; 25 return -EFAULT;
113
114 /* Now append new entry. */
115 new->map[new->num].addr = addr;
116 new->map[new->num].event = eventfd_ctx_fdget(fd);
117 if (IS_ERR(new->map[new->num].event)) {
118 int err = PTR_ERR(new->map[new->num].event);
119 kfree(new);
120 return err;
121 }
122 new->num++;
123 26
124 /* 27 /*
125 * Now put new one in place: rcu_assign_pointer() is a fancy way of 28 * We set up the cpu register pointer, and their next read will
126 * doing "lg->eventfds = new", but it uses memory barriers to make 29 * actually get the value (instead of running the guest).
127 * absolutely sure that the contents of "new" written above is nailed
128 * down before we actually do the assignment.
129 * 30 *
130 * We have to think about these kinds of things when we're operating on 31 * The last argument 'true' says we can access any register.
131 * live data without locks.
132 */ 32 */
133 rcu_assign_pointer(lg->eventfds, new); 33 cpu->reg_read = lguest_arch_regptr(cpu, which, true);
34 if (!cpu->reg_read)
35 return -ENOENT;
134 36
135 /* 37 /* And because this is a write() call, we return the length used. */
136 * We're not in a big hurry. Wait until no one's looking at old 38 return sizeof(unsigned long) * 2;
137 * version, then free it.
138 */
139 synchronize_rcu();
140 kfree(old);
141
142 return 0;
143} 39}
144 40
145/*L:052 41static int setreg(struct lg_cpu *cpu, const unsigned long __user *input)
146 * Receiving notifications from the Guest is usually done by attaching a
147 * particular LHCALL_NOTIFY value to an event filedescriptor. The eventfd will
148 * become readable when the Guest does an LHCALL_NOTIFY with that value.
149 *
150 * This is really convenient for processing each virtqueue in a separate
151 * thread.
152 */
153static int attach_eventfd(struct lguest *lg, const unsigned long __user *input)
154{ 42{
155 unsigned long addr, fd; 43 unsigned long which, value, *reg;
156 int err;
157 44
158 if (get_user(addr, input) != 0) 45 /* We re-use the ptrace structure to specify which register to read. */
46 if (get_user(which, input) != 0)
159 return -EFAULT; 47 return -EFAULT;
160 input++; 48 input++;
161 if (get_user(fd, input) != 0) 49 if (get_user(value, input) != 0)
162 return -EFAULT; 50 return -EFAULT;
163 51
164 /* 52 /* The last argument 'false' means we can't access all registers. */
165 * Just make sure two callers don't add eventfds at once. We really 53 reg = lguest_arch_regptr(cpu, which, false);
166 * only need to lock against callers adding to the same Guest, so using 54 if (!reg)
167 * the Big Lguest Lock is overkill. But this is setup, not a fast path. 55 return -ENOENT;
168 */
169 mutex_lock(&lguest_lock);
170 err = add_eventfd(lg, addr, fd);
171 mutex_unlock(&lguest_lock);
172 56
173 return err; 57 *reg = value;
58
59 /* And because this is a write() call, we return the length used. */
60 return sizeof(unsigned long) * 3;
174} 61}
175 62
176/*L:050 63/*L:050
@@ -194,6 +81,23 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
194 return 0; 81 return 0;
195} 82}
196 83
84/*L:053
85 * Deliver a trap: this is used by the Launcher if it can't emulate
86 * an instruction.
87 */
88static int trap(struct lg_cpu *cpu, const unsigned long __user *input)
89{
90 unsigned long trapnum;
91
92 if (get_user(trapnum, input) != 0)
93 return -EFAULT;
94
95 if (!deliver_trap(cpu, trapnum))
96 return -EINVAL;
97
98 return 0;
99}
100
197/*L:040 101/*L:040
198 * Once our Guest is initialized, the Launcher makes it run by reading 102 * Once our Guest is initialized, the Launcher makes it run by reading
199 * from /dev/lguest. 103 * from /dev/lguest.
@@ -237,8 +141,8 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
237 * If we returned from read() last time because the Guest sent I/O, 141 * If we returned from read() last time because the Guest sent I/O,
238 * clear the flag. 142 * clear the flag.
239 */ 143 */
240 if (cpu->pending_notify) 144 if (cpu->pending.trap)
241 cpu->pending_notify = 0; 145 cpu->pending.trap = 0;
242 146
243 /* Run the Guest until something interesting happens. */ 147 /* Run the Guest until something interesting happens. */
244 return run_guest(cpu, (unsigned long __user *)user); 148 return run_guest(cpu, (unsigned long __user *)user);
@@ -319,7 +223,7 @@ static int initialize(struct file *file, const unsigned long __user *input)
319 /* "struct lguest" contains all we (the Host) know about a Guest. */ 223 /* "struct lguest" contains all we (the Host) know about a Guest. */
320 struct lguest *lg; 224 struct lguest *lg;
321 int err; 225 int err;
322 unsigned long args[3]; 226 unsigned long args[4];
323 227
324 /* 228 /*
325 * We grab the Big Lguest lock, which protects against multiple 229 * We grab the Big Lguest lock, which protects against multiple
@@ -343,21 +247,15 @@ static int initialize(struct file *file, const unsigned long __user *input)
343 goto unlock; 247 goto unlock;
344 } 248 }
345 249
346 lg->eventfds = kmalloc(sizeof(*lg->eventfds), GFP_KERNEL);
347 if (!lg->eventfds) {
348 err = -ENOMEM;
349 goto free_lg;
350 }
351 lg->eventfds->num = 0;
352
353 /* Populate the easy fields of our "struct lguest" */ 250 /* Populate the easy fields of our "struct lguest" */
354 lg->mem_base = (void __user *)args[0]; 251 lg->mem_base = (void __user *)args[0];
355 lg->pfn_limit = args[1]; 252 lg->pfn_limit = args[1];
253 lg->device_limit = args[3];
356 254
357 /* This is the first cpu (cpu 0) and it will start booting at args[2] */ 255 /* This is the first cpu (cpu 0) and it will start booting at args[2] */
358 err = lg_cpu_start(&lg->cpus[0], 0, args[2]); 256 err = lg_cpu_start(&lg->cpus[0], 0, args[2]);
359 if (err) 257 if (err)
360 goto free_eventfds; 258 goto free_lg;
361 259
362 /* 260 /*
363 * Initialize the Guest's shadow page tables. This allocates 261 * Initialize the Guest's shadow page tables. This allocates
@@ -378,8 +276,6 @@ static int initialize(struct file *file, const unsigned long __user *input)
378free_regs: 276free_regs:
379 /* FIXME: This should be in free_vcpu */ 277 /* FIXME: This should be in free_vcpu */
380 free_page(lg->cpus[0].regs_page); 278 free_page(lg->cpus[0].regs_page);
381free_eventfds:
382 kfree(lg->eventfds);
383free_lg: 279free_lg:
384 kfree(lg); 280 kfree(lg);
385unlock: 281unlock:
@@ -432,8 +328,12 @@ static ssize_t write(struct file *file, const char __user *in,
432 return initialize(file, input); 328 return initialize(file, input);
433 case LHREQ_IRQ: 329 case LHREQ_IRQ:
434 return user_send_irq(cpu, input); 330 return user_send_irq(cpu, input);
435 case LHREQ_EVENTFD: 331 case LHREQ_GETREG:
436 return attach_eventfd(lg, input); 332 return getreg_setup(cpu, input);
333 case LHREQ_SETREG:
334 return setreg(cpu, input);
335 case LHREQ_TRAP:
336 return trap(cpu, input);
437 default: 337 default:
438 return -EINVAL; 338 return -EINVAL;
439 } 339 }
@@ -478,11 +378,6 @@ static int close(struct inode *inode, struct file *file)
478 mmput(lg->cpus[i].mm); 378 mmput(lg->cpus[i].mm);
479 } 379 }
480 380
481 /* Release any eventfds they registered. */
482 for (i = 0; i < lg->eventfds->num; i++)
483 eventfd_ctx_put(lg->eventfds->map[i].event);
484 kfree(lg->eventfds);
485
486 /* 381 /*
487 * If lg->dead doesn't contain an error code it will be NULL or a 382 * If lg->dead doesn't contain an error code it will be NULL or a
488 * kmalloc()ed string, either of which is ok to hand to kfree(). 383 * kmalloc()ed string, either of which is ok to hand to kfree().
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index e8b55c3a6170..e3abebc912c0 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -250,6 +250,16 @@ static void release_pte(pte_t pte)
250} 250}
251/*:*/ 251/*:*/
252 252
253static bool gpte_in_iomem(struct lg_cpu *cpu, pte_t gpte)
254{
255 /* We don't handle large pages. */
256 if (pte_flags(gpte) & _PAGE_PSE)
257 return false;
258
259 return (pte_pfn(gpte) >= cpu->lg->pfn_limit
260 && pte_pfn(gpte) < cpu->lg->device_limit);
261}
262
253static bool check_gpte(struct lg_cpu *cpu, pte_t gpte) 263static bool check_gpte(struct lg_cpu *cpu, pte_t gpte)
254{ 264{
255 if ((pte_flags(gpte) & _PAGE_PSE) || 265 if ((pte_flags(gpte) & _PAGE_PSE) ||
@@ -374,8 +384,14 @@ static pte_t *find_spte(struct lg_cpu *cpu, unsigned long vaddr, bool allocate,
374 * 384 *
375 * If we fixed up the fault (ie. we mapped the address), this routine returns 385 * If we fixed up the fault (ie. we mapped the address), this routine returns
376 * true. Otherwise, it was a real fault and we need to tell the Guest. 386 * true. Otherwise, it was a real fault and we need to tell the Guest.
387 *
388 * There's a corner case: they're trying to access memory between
389 * pfn_limit and device_limit, which is I/O memory. In this case, we
390 * return false and set @iomem to the physical address, so the the
391 * Launcher can handle the instruction manually.
377 */ 392 */
378bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) 393bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode,
394 unsigned long *iomem)
379{ 395{
380 unsigned long gpte_ptr; 396 unsigned long gpte_ptr;
381 pte_t gpte; 397 pte_t gpte;
@@ -383,6 +399,8 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
383 pmd_t gpmd; 399 pmd_t gpmd;
384 pgd_t gpgd; 400 pgd_t gpgd;
385 401
402 *iomem = 0;
403
386 /* We never demand page the Switcher, so trying is a mistake. */ 404 /* We never demand page the Switcher, so trying is a mistake. */
387 if (vaddr >= switcher_addr) 405 if (vaddr >= switcher_addr)
388 return false; 406 return false;
@@ -459,6 +477,12 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
459 if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) 477 if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
460 return false; 478 return false;
461 479
480 /* If they're accessing io memory, we expect a fault. */
481 if (gpte_in_iomem(cpu, gpte)) {
482 *iomem = (pte_pfn(gpte) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
483 return false;
484 }
485
462 /* 486 /*
463 * Check that the Guest PTE flags are OK, and the page number is below 487 * Check that the Guest PTE flags are OK, and the page number is below
464 * the pfn_limit (ie. not mapping the Launcher binary). 488 * the pfn_limit (ie. not mapping the Launcher binary).
@@ -553,7 +577,9 @@ static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
553 */ 577 */
554void pin_page(struct lg_cpu *cpu, unsigned long vaddr) 578void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
555{ 579{
556 if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2)) 580 unsigned long iomem;
581
582 if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2, &iomem))
557 kill_guest(cpu, "bad stack page %#lx", vaddr); 583 kill_guest(cpu, "bad stack page %#lx", vaddr);
558} 584}
559/*:*/ 585/*:*/
@@ -647,7 +673,7 @@ void guest_pagetable_flush_user(struct lg_cpu *cpu)
647/*:*/ 673/*:*/
648 674
649/* We walk down the guest page tables to get a guest-physical address */ 675/* We walk down the guest page tables to get a guest-physical address */
650unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) 676bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr)
651{ 677{
652 pgd_t gpgd; 678 pgd_t gpgd;
653 pte_t gpte; 679 pte_t gpte;
@@ -656,31 +682,47 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
656#endif 682#endif
657 683
658 /* Still not set up? Just map 1:1. */ 684 /* Still not set up? Just map 1:1. */
659 if (unlikely(cpu->linear_pages)) 685 if (unlikely(cpu->linear_pages)) {
660 return vaddr; 686 *paddr = vaddr;
687 return true;
688 }
661 689
662 /* First step: get the top-level Guest page table entry. */ 690 /* First step: get the top-level Guest page table entry. */
663 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); 691 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
664 /* Toplevel not present? We can't map it in. */ 692 /* Toplevel not present? We can't map it in. */
665 if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) { 693 if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
666 kill_guest(cpu, "Bad address %#lx", vaddr); 694 goto fail;
667 return -1UL;
668 }
669 695
670#ifdef CONFIG_X86_PAE 696#ifdef CONFIG_X86_PAE
671 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); 697 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
672 if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) { 698 if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
673 kill_guest(cpu, "Bad address %#lx", vaddr); 699 goto fail;
674 return -1UL;
675 }
676 gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t); 700 gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t);
677#else 701#else
678 gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t); 702 gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
679#endif 703#endif
680 if (!(pte_flags(gpte) & _PAGE_PRESENT)) 704 if (!(pte_flags(gpte) & _PAGE_PRESENT))
681 kill_guest(cpu, "Bad address %#lx", vaddr); 705 goto fail;
706
707 *paddr = pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
708 return true;
709
710fail:
711 *paddr = -1UL;
712 return false;
713}
682 714
683 return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); 715/*
716 * This is the version we normally use: kills the Guest if it uses a
717 * bad address
718 */
719unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
720{
721 unsigned long paddr;
722
723 if (!__guest_pa(cpu, vaddr, &paddr))
724 kill_guest(cpu, "Bad address %#lx", vaddr);
725 return paddr;
684} 726}
685 727
686/* 728/*
@@ -912,7 +954,8 @@ static void __guest_set_pte(struct lg_cpu *cpu, int idx,
912 * now. This shaves 10% off a copy-on-write 954 * now. This shaves 10% off a copy-on-write
913 * micro-benchmark. 955 * micro-benchmark.
914 */ 956 */
915 if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { 957 if ((pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED))
958 && !gpte_in_iomem(cpu, gpte)) {
916 if (!check_gpte(cpu, gpte)) 959 if (!check_gpte(cpu, gpte))
917 return; 960 return;
918 set_pte(spte, 961 set_pte(spte,
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 6adfd7ba4c97..30f2aef69d78 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -182,6 +182,52 @@ static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages)
182} 182}
183/*:*/ 183/*:*/
184 184
185unsigned long *lguest_arch_regptr(struct lg_cpu *cpu, size_t reg_off, bool any)
186{
187 switch (reg_off) {
188 case offsetof(struct pt_regs, bx):
189 return &cpu->regs->ebx;
190 case offsetof(struct pt_regs, cx):
191 return &cpu->regs->ecx;
192 case offsetof(struct pt_regs, dx):
193 return &cpu->regs->edx;
194 case offsetof(struct pt_regs, si):
195 return &cpu->regs->esi;
196 case offsetof(struct pt_regs, di):
197 return &cpu->regs->edi;
198 case offsetof(struct pt_regs, bp):
199 return &cpu->regs->ebp;
200 case offsetof(struct pt_regs, ax):
201 return &cpu->regs->eax;
202 case offsetof(struct pt_regs, ip):
203 return &cpu->regs->eip;
204 case offsetof(struct pt_regs, sp):
205 return &cpu->regs->esp;
206 }
207
208 /* Launcher can read these, but we don't allow any setting. */
209 if (any) {
210 switch (reg_off) {
211 case offsetof(struct pt_regs, ds):
212 return &cpu->regs->ds;
213 case offsetof(struct pt_regs, es):
214 return &cpu->regs->es;
215 case offsetof(struct pt_regs, fs):
216 return &cpu->regs->fs;
217 case offsetof(struct pt_regs, gs):
218 return &cpu->regs->gs;
219 case offsetof(struct pt_regs, cs):
220 return &cpu->regs->cs;
221 case offsetof(struct pt_regs, flags):
222 return &cpu->regs->eflags;
223 case offsetof(struct pt_regs, ss):
224 return &cpu->regs->ss;
225 }
226 }
227
228 return NULL;
229}
230
185/*M:002 231/*M:002
186 * There are hooks in the scheduler which we can register to tell when we 232 * There are hooks in the scheduler which we can register to tell when we
187 * get kicked off the CPU (preempt_notifier_register()). This would allow us 233 * get kicked off the CPU (preempt_notifier_register()). This would allow us
@@ -269,110 +315,73 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
269 * usually attached to a PC. 315 * usually attached to a PC.
270 * 316 *
271 * When the Guest uses one of these instructions, we get a trap (General 317 * When the Guest uses one of these instructions, we get a trap (General
272 * Protection Fault) and come here. We see if it's one of those troublesome 318 * Protection Fault) and come here. We queue this to be sent out to the
273 * instructions and skip over it. We return true if we did. 319 * Launcher to handle.
274 */ 320 */
275static int emulate_insn(struct lg_cpu *cpu)
276{
277 u8 insn;
278 unsigned int insnlen = 0, in = 0, small_operand = 0;
279 /*
280 * The eip contains the *virtual* address of the Guest's instruction:
281 * walk the Guest's page tables to find the "physical" address.
282 */
283 unsigned long physaddr = guest_pa(cpu, cpu->regs->eip);
284
285 /*
286 * This must be the Guest kernel trying to do something, not userspace!
287 * The bottom two bits of the CS segment register are the privilege
288 * level.
289 */
290 if ((cpu->regs->cs & 3) != GUEST_PL)
291 return 0;
292
293 /* Decoding x86 instructions is icky. */
294 insn = lgread(cpu, physaddr, u8);
295 321
296 /* 322/*
297 * Around 2.6.33, the kernel started using an emulation for the 323 * The eip contains the *virtual* address of the Guest's instruction:
298 * cmpxchg8b instruction in early boot on many configurations. This 324 * we copy the instruction here so the Launcher doesn't have to walk
299 * code isn't paravirtualized, and it tries to disable interrupts. 325 * the page tables to decode it. We handle the case (eg. in a kernel
300 * Ignore it, which will Mostly Work. 326 * module) where the instruction is over two pages, and the pages are
301 */ 327 * virtually but not physically contiguous.
302 if (insn == 0xfa) { 328 *
303 /* "cli", or Clear Interrupt Enable instruction. Skip it. */ 329 * The longest possible x86 instruction is 15 bytes, but we don't handle
304 cpu->regs->eip++; 330 * anything that strange.
305 return 1; 331 */
332static void copy_from_guest(struct lg_cpu *cpu,
333 void *dst, unsigned long vaddr, size_t len)
334{
335 size_t to_page_end = PAGE_SIZE - (vaddr % PAGE_SIZE);
336 unsigned long paddr;
337
338 BUG_ON(len > PAGE_SIZE);
339
340 /* If it goes over a page, copy in two parts. */
341 if (len > to_page_end) {
342 /* But make sure the next page is mapped! */
343 if (__guest_pa(cpu, vaddr + to_page_end, &paddr))
344 copy_from_guest(cpu, dst + to_page_end,
345 vaddr + to_page_end,
346 len - to_page_end);
347 else
348 /* Otherwise fill with zeroes. */
349 memset(dst + to_page_end, 0, len - to_page_end);
350 len = to_page_end;
306 } 351 }
307 352
308 /* 353 /* This will kill the guest if it isn't mapped, but that
309 * 0x66 is an "operand prefix". It means a 16, not 32 bit in/out. 354 * shouldn't happen. */
310 */ 355 __lgread(cpu, dst, guest_pa(cpu, vaddr), len);
311 if (insn == 0x66) { 356}
312 small_operand = 1;
313 /* The instruction is 1 byte so far, read the next byte. */
314 insnlen = 1;
315 insn = lgread(cpu, physaddr + insnlen, u8);
316 }
317 357
318 /*
319 * We can ignore the lower bit for the moment and decode the 4 opcodes
320 * we need to emulate.
321 */
322 switch (insn & 0xFE) {
323 case 0xE4: /* in <next byte>,%al */
324 insnlen += 2;
325 in = 1;
326 break;
327 case 0xEC: /* in (%dx),%al */
328 insnlen += 1;
329 in = 1;
330 break;
331 case 0xE6: /* out %al,<next byte> */
332 insnlen += 2;
333 break;
334 case 0xEE: /* out %al,(%dx) */
335 insnlen += 1;
336 break;
337 default:
338 /* OK, we don't know what this is, can't emulate. */
339 return 0;
340 }
341 358
342 /* 359static void setup_emulate_insn(struct lg_cpu *cpu)
343 * If it was an "IN" instruction, they expect the result to be read 360{
344 * into %eax, so we change %eax. We always return all-ones, which 361 cpu->pending.trap = 13;
345 * traditionally means "there's nothing there". 362 copy_from_guest(cpu, cpu->pending.insn, cpu->regs->eip,
346 */ 363 sizeof(cpu->pending.insn));
347 if (in) { 364}
348 /* Lower bit tells means it's a 32/16 bit access */ 365
349 if (insn & 0x1) { 366static void setup_iomem_insn(struct lg_cpu *cpu, unsigned long iomem_addr)
350 if (small_operand) 367{
351 cpu->regs->eax |= 0xFFFF; 368 cpu->pending.trap = 14;
352 else 369 cpu->pending.addr = iomem_addr;
353 cpu->regs->eax = 0xFFFFFFFF; 370 copy_from_guest(cpu, cpu->pending.insn, cpu->regs->eip,
354 } else 371 sizeof(cpu->pending.insn));
355 cpu->regs->eax |= 0xFF;
356 }
357 /* Finally, we've "done" the instruction, so move past it. */
358 cpu->regs->eip += insnlen;
359 /* Success! */
360 return 1;
361} 372}
362 373
363/*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */ 374/*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */
364void lguest_arch_handle_trap(struct lg_cpu *cpu) 375void lguest_arch_handle_trap(struct lg_cpu *cpu)
365{ 376{
377 unsigned long iomem_addr;
378
366 switch (cpu->regs->trapnum) { 379 switch (cpu->regs->trapnum) {
367 case 13: /* We've intercepted a General Protection Fault. */ 380 case 13: /* We've intercepted a General Protection Fault. */
368 /* 381 /* Hand to Launcher to emulate those pesky IN and OUT insns */
369 * Check if this was one of those annoying IN or OUT
370 * instructions which we need to emulate. If so, we just go
371 * back into the Guest after we've done it.
372 */
373 if (cpu->regs->errcode == 0) { 382 if (cpu->regs->errcode == 0) {
374 if (emulate_insn(cpu)) 383 setup_emulate_insn(cpu);
375 return; 384 return;
376 } 385 }
377 break; 386 break;
378 case 14: /* We've intercepted a Page Fault. */ 387 case 14: /* We've intercepted a Page Fault. */
@@ -387,9 +396,16 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
387 * whether kernel or userspace code. 396 * whether kernel or userspace code.
388 */ 397 */
389 if (demand_page(cpu, cpu->arch.last_pagefault, 398 if (demand_page(cpu, cpu->arch.last_pagefault,
390 cpu->regs->errcode)) 399 cpu->regs->errcode, &iomem_addr))
391 return; 400 return;
392 401
402 /* Was this an access to memory mapped IO? */
403 if (iomem_addr) {
404 /* Tell Launcher, let it handle it. */
405 setup_iomem_insn(cpu, iomem_addr);
406 return;
407 }
408
393 /* 409 /*
394 * OK, it's really not there (or not OK): the Guest needs to 410 * OK, it's really not there (or not OK): the Guest needs to
395 * know. We write out the cr2 value so it knows where the 411 * know. We write out the cr2 value so it knows where the
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index c39644478aa4..63e05e32b462 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -178,7 +178,7 @@ config MD_FAULTY
178source "drivers/md/bcache/Kconfig" 178source "drivers/md/bcache/Kconfig"
179 179
180config BLK_DEV_DM_BUILTIN 180config BLK_DEV_DM_BUILTIN
181 boolean 181 bool
182 182
183config BLK_DEV_DM 183config BLK_DEV_DM
184 tristate "Device mapper support" 184 tristate "Device mapper support"
@@ -197,7 +197,7 @@ config BLK_DEV_DM
197 If unsure, say N. 197 If unsure, say N.
198 198
199config DM_DEBUG 199config DM_DEBUG
200 boolean "Device mapper debugging support" 200 bool "Device mapper debugging support"
201 depends on BLK_DEV_DM 201 depends on BLK_DEV_DM
202 ---help--- 202 ---help---
203 Enable this for messages that may help debug device-mapper problems. 203 Enable this for messages that may help debug device-mapper problems.
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 08981be7baa1..713a96237a80 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -18,9 +18,11 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/crypto.h> 19#include <linux/crypto.h>
20#include <linux/workqueue.h> 20#include <linux/workqueue.h>
21#include <linux/kthread.h>
21#include <linux/backing-dev.h> 22#include <linux/backing-dev.h>
22#include <linux/atomic.h> 23#include <linux/atomic.h>
23#include <linux/scatterlist.h> 24#include <linux/scatterlist.h>
25#include <linux/rbtree.h>
24#include <asm/page.h> 26#include <asm/page.h>
25#include <asm/unaligned.h> 27#include <asm/unaligned.h>
26#include <crypto/hash.h> 28#include <crypto/hash.h>
@@ -58,7 +60,8 @@ struct dm_crypt_io {
58 atomic_t io_pending; 60 atomic_t io_pending;
59 int error; 61 int error;
60 sector_t sector; 62 sector_t sector;
61 struct dm_crypt_io *base_io; 63
64 struct rb_node rb_node;
62} CRYPTO_MINALIGN_ATTR; 65} CRYPTO_MINALIGN_ATTR;
63 66
64struct dm_crypt_request { 67struct dm_crypt_request {
@@ -108,7 +111,8 @@ struct iv_tcw_private {
108 * Crypt: maps a linear range of a block device 111 * Crypt: maps a linear range of a block device
109 * and encrypts / decrypts at the same time. 112 * and encrypts / decrypts at the same time.
110 */ 113 */
111enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; 114enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
115 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
112 116
113/* 117/*
114 * The fields in here must be read only after initialization. 118 * The fields in here must be read only after initialization.
@@ -121,14 +125,18 @@ struct crypt_config {
121 * pool for per bio private data, crypto requests and 125 * pool for per bio private data, crypto requests and
122 * encryption requeusts/buffer pages 126 * encryption requeusts/buffer pages
123 */ 127 */
124 mempool_t *io_pool;
125 mempool_t *req_pool; 128 mempool_t *req_pool;
126 mempool_t *page_pool; 129 mempool_t *page_pool;
127 struct bio_set *bs; 130 struct bio_set *bs;
131 struct mutex bio_alloc_lock;
128 132
129 struct workqueue_struct *io_queue; 133 struct workqueue_struct *io_queue;
130 struct workqueue_struct *crypt_queue; 134 struct workqueue_struct *crypt_queue;
131 135
136 struct task_struct *write_thread;
137 wait_queue_head_t write_thread_wait;
138 struct rb_root write_tree;
139
132 char *cipher; 140 char *cipher;
133 char *cipher_string; 141 char *cipher_string;
134 142
@@ -172,9 +180,6 @@ struct crypt_config {
172}; 180};
173 181
174#define MIN_IOS 16 182#define MIN_IOS 16
175#define MIN_POOL_PAGES 32
176
177static struct kmem_cache *_crypt_io_pool;
178 183
179static void clone_init(struct dm_crypt_io *, struct bio *); 184static void clone_init(struct dm_crypt_io *, struct bio *);
180static void kcryptd_queue_crypt(struct dm_crypt_io *io); 185static void kcryptd_queue_crypt(struct dm_crypt_io *io);
@@ -946,57 +951,70 @@ static int crypt_convert(struct crypt_config *cc,
946 return 0; 951 return 0;
947} 952}
948 953
954static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
955
949/* 956/*
950 * Generate a new unfragmented bio with the given size 957 * Generate a new unfragmented bio with the given size
951 * This should never violate the device limitations 958 * This should never violate the device limitations
952 * May return a smaller bio when running out of pages, indicated by 959 *
953 * *out_of_pages set to 1. 960 * This function may be called concurrently. If we allocate from the mempool
961 * concurrently, there is a possibility of deadlock. For example, if we have
962 * mempool of 256 pages, two processes, each wanting 256, pages allocate from
963 * the mempool concurrently, it may deadlock in a situation where both processes
964 * have allocated 128 pages and the mempool is exhausted.
965 *
966 * In order to avoid this scenario we allocate the pages under a mutex.
967 *
968 * In order to not degrade performance with excessive locking, we try
969 * non-blocking allocations without a mutex first but on failure we fallback
970 * to blocking allocations with a mutex.
954 */ 971 */
955static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, 972static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
956 unsigned *out_of_pages)
957{ 973{
958 struct crypt_config *cc = io->cc; 974 struct crypt_config *cc = io->cc;
959 struct bio *clone; 975 struct bio *clone;
960 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 976 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
961 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; 977 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
962 unsigned i, len; 978 unsigned i, len, remaining_size;
963 struct page *page; 979 struct page *page;
980 struct bio_vec *bvec;
981
982retry:
983 if (unlikely(gfp_mask & __GFP_WAIT))
984 mutex_lock(&cc->bio_alloc_lock);
964 985
965 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); 986 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
966 if (!clone) 987 if (!clone)
967 return NULL; 988 goto return_clone;
968 989
969 clone_init(io, clone); 990 clone_init(io, clone);
970 *out_of_pages = 0; 991
992 remaining_size = size;
971 993
972 for (i = 0; i < nr_iovecs; i++) { 994 for (i = 0; i < nr_iovecs; i++) {
973 page = mempool_alloc(cc->page_pool, gfp_mask); 995 page = mempool_alloc(cc->page_pool, gfp_mask);
974 if (!page) { 996 if (!page) {
975 *out_of_pages = 1; 997 crypt_free_buffer_pages(cc, clone);
976 break; 998 bio_put(clone);
999 gfp_mask |= __GFP_WAIT;
1000 goto retry;
977 } 1001 }
978 1002
979 /* 1003 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
980 * If additional pages cannot be allocated without waiting,
981 * return a partially-allocated bio. The caller will then try
982 * to allocate more bios while submitting this partial bio.
983 */
984 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
985 1004
986 len = (size > PAGE_SIZE) ? PAGE_SIZE : size; 1005 bvec = &clone->bi_io_vec[clone->bi_vcnt++];
1006 bvec->bv_page = page;
1007 bvec->bv_len = len;
1008 bvec->bv_offset = 0;
987 1009
988 if (!bio_add_page(clone, page, len, 0)) { 1010 clone->bi_iter.bi_size += len;
989 mempool_free(page, cc->page_pool);
990 break;
991 }
992 1011
993 size -= len; 1012 remaining_size -= len;
994 } 1013 }
995 1014
996 if (!clone->bi_iter.bi_size) { 1015return_clone:
997 bio_put(clone); 1016 if (unlikely(gfp_mask & __GFP_WAIT))
998 return NULL; 1017 mutex_unlock(&cc->bio_alloc_lock);
999 }
1000 1018
1001 return clone; 1019 return clone;
1002} 1020}
@@ -1020,7 +1038,6 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1020 io->base_bio = bio; 1038 io->base_bio = bio;
1021 io->sector = sector; 1039 io->sector = sector;
1022 io->error = 0; 1040 io->error = 0;
1023 io->base_io = NULL;
1024 io->ctx.req = NULL; 1041 io->ctx.req = NULL;
1025 atomic_set(&io->io_pending, 0); 1042 atomic_set(&io->io_pending, 0);
1026} 1043}
@@ -1033,13 +1050,11 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
1033/* 1050/*
1034 * One of the bios was finished. Check for completion of 1051 * One of the bios was finished. Check for completion of
1035 * the whole request and correctly clean up the buffer. 1052 * the whole request and correctly clean up the buffer.
1036 * If base_io is set, wait for the last fragment to complete.
1037 */ 1053 */
1038static void crypt_dec_pending(struct dm_crypt_io *io) 1054static void crypt_dec_pending(struct dm_crypt_io *io)
1039{ 1055{
1040 struct crypt_config *cc = io->cc; 1056 struct crypt_config *cc = io->cc;
1041 struct bio *base_bio = io->base_bio; 1057 struct bio *base_bio = io->base_bio;
1042 struct dm_crypt_io *base_io = io->base_io;
1043 int error = io->error; 1058 int error = io->error;
1044 1059
1045 if (!atomic_dec_and_test(&io->io_pending)) 1060 if (!atomic_dec_and_test(&io->io_pending))
@@ -1047,16 +1062,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
1047 1062
1048 if (io->ctx.req) 1063 if (io->ctx.req)
1049 crypt_free_req(cc, io->ctx.req, base_bio); 1064 crypt_free_req(cc, io->ctx.req, base_bio);
1050 if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size)) 1065
1051 mempool_free(io, cc->io_pool); 1066 bio_endio(base_bio, error);
1052
1053 if (likely(!base_io))
1054 bio_endio(base_bio, error);
1055 else {
1056 if (error && !base_io->error)
1057 base_io->error = error;
1058 crypt_dec_pending(base_io);
1059 }
1060} 1067}
1061 1068
1062/* 1069/*
@@ -1138,37 +1145,97 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1138 return 0; 1145 return 0;
1139} 1146}
1140 1147
1148static void kcryptd_io_read_work(struct work_struct *work)
1149{
1150 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1151
1152 crypt_inc_pending(io);
1153 if (kcryptd_io_read(io, GFP_NOIO))
1154 io->error = -ENOMEM;
1155 crypt_dec_pending(io);
1156}
1157
1158static void kcryptd_queue_read(struct dm_crypt_io *io)
1159{
1160 struct crypt_config *cc = io->cc;
1161
1162 INIT_WORK(&io->work, kcryptd_io_read_work);
1163 queue_work(cc->io_queue, &io->work);
1164}
1165
1141static void kcryptd_io_write(struct dm_crypt_io *io) 1166static void kcryptd_io_write(struct dm_crypt_io *io)
1142{ 1167{
1143 struct bio *clone = io->ctx.bio_out; 1168 struct bio *clone = io->ctx.bio_out;
1169
1144 generic_make_request(clone); 1170 generic_make_request(clone);
1145} 1171}
1146 1172
1147static void kcryptd_io(struct work_struct *work) 1173#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1174
1175static int dmcrypt_write(void *data)
1148{ 1176{
1149 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 1177 struct crypt_config *cc = data;
1178 struct dm_crypt_io *io;
1150 1179
1151 if (bio_data_dir(io->base_bio) == READ) { 1180 while (1) {
1152 crypt_inc_pending(io); 1181 struct rb_root write_tree;
1153 if (kcryptd_io_read(io, GFP_NOIO)) 1182 struct blk_plug plug;
1154 io->error = -ENOMEM;
1155 crypt_dec_pending(io);
1156 } else
1157 kcryptd_io_write(io);
1158}
1159 1183
1160static void kcryptd_queue_io(struct dm_crypt_io *io) 1184 DECLARE_WAITQUEUE(wait, current);
1161{
1162 struct crypt_config *cc = io->cc;
1163 1185
1164 INIT_WORK(&io->work, kcryptd_io); 1186 spin_lock_irq(&cc->write_thread_wait.lock);
1165 queue_work(cc->io_queue, &io->work); 1187continue_locked:
1188
1189 if (!RB_EMPTY_ROOT(&cc->write_tree))
1190 goto pop_from_list;
1191
1192 __set_current_state(TASK_INTERRUPTIBLE);
1193 __add_wait_queue(&cc->write_thread_wait, &wait);
1194
1195 spin_unlock_irq(&cc->write_thread_wait.lock);
1196
1197 if (unlikely(kthread_should_stop())) {
1198 set_task_state(current, TASK_RUNNING);
1199 remove_wait_queue(&cc->write_thread_wait, &wait);
1200 break;
1201 }
1202
1203 schedule();
1204
1205 set_task_state(current, TASK_RUNNING);
1206 spin_lock_irq(&cc->write_thread_wait.lock);
1207 __remove_wait_queue(&cc->write_thread_wait, &wait);
1208 goto continue_locked;
1209
1210pop_from_list:
1211 write_tree = cc->write_tree;
1212 cc->write_tree = RB_ROOT;
1213 spin_unlock_irq(&cc->write_thread_wait.lock);
1214
1215 BUG_ON(rb_parent(write_tree.rb_node));
1216
1217 /*
1218 * Note: we cannot walk the tree here with rb_next because
1219 * the structures may be freed when kcryptd_io_write is called.
1220 */
1221 blk_start_plug(&plug);
1222 do {
1223 io = crypt_io_from_node(rb_first(&write_tree));
1224 rb_erase(&io->rb_node, &write_tree);
1225 kcryptd_io_write(io);
1226 } while (!RB_EMPTY_ROOT(&write_tree));
1227 blk_finish_plug(&plug);
1228 }
1229 return 0;
1166} 1230}
1167 1231
1168static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) 1232static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1169{ 1233{
1170 struct bio *clone = io->ctx.bio_out; 1234 struct bio *clone = io->ctx.bio_out;
1171 struct crypt_config *cc = io->cc; 1235 struct crypt_config *cc = io->cc;
1236 unsigned long flags;
1237 sector_t sector;
1238 struct rb_node **rbp, *parent;
1172 1239
1173 if (unlikely(io->error < 0)) { 1240 if (unlikely(io->error < 0)) {
1174 crypt_free_buffer_pages(cc, clone); 1241 crypt_free_buffer_pages(cc, clone);
@@ -1182,20 +1249,34 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1182 1249
1183 clone->bi_iter.bi_sector = cc->start + io->sector; 1250 clone->bi_iter.bi_sector = cc->start + io->sector;
1184 1251
1185 if (async) 1252 if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
1186 kcryptd_queue_io(io);
1187 else
1188 generic_make_request(clone); 1253 generic_make_request(clone);
1254 return;
1255 }
1256
1257 spin_lock_irqsave(&cc->write_thread_wait.lock, flags);
1258 rbp = &cc->write_tree.rb_node;
1259 parent = NULL;
1260 sector = io->sector;
1261 while (*rbp) {
1262 parent = *rbp;
1263 if (sector < crypt_io_from_node(parent)->sector)
1264 rbp = &(*rbp)->rb_left;
1265 else
1266 rbp = &(*rbp)->rb_right;
1267 }
1268 rb_link_node(&io->rb_node, parent, rbp);
1269 rb_insert_color(&io->rb_node, &cc->write_tree);
1270
1271 wake_up_locked(&cc->write_thread_wait);
1272 spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags);
1189} 1273}
1190 1274
1191static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) 1275static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1192{ 1276{
1193 struct crypt_config *cc = io->cc; 1277 struct crypt_config *cc = io->cc;
1194 struct bio *clone; 1278 struct bio *clone;
1195 struct dm_crypt_io *new_io;
1196 int crypt_finished; 1279 int crypt_finished;
1197 unsigned out_of_pages = 0;
1198 unsigned remaining = io->base_bio->bi_iter.bi_size;
1199 sector_t sector = io->sector; 1280 sector_t sector = io->sector;
1200 int r; 1281 int r;
1201 1282
@@ -1205,80 +1286,30 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1205 crypt_inc_pending(io); 1286 crypt_inc_pending(io);
1206 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); 1287 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
1207 1288
1208 /* 1289 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
1209 * The allocated buffers can be smaller than the whole bio, 1290 if (unlikely(!clone)) {
1210 * so repeat the whole process until all the data can be handled. 1291 io->error = -EIO;
1211 */ 1292 goto dec;
1212 while (remaining) { 1293 }
1213 clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
1214 if (unlikely(!clone)) {
1215 io->error = -ENOMEM;
1216 break;
1217 }
1218
1219 io->ctx.bio_out = clone;
1220 io->ctx.iter_out = clone->bi_iter;
1221
1222 remaining -= clone->bi_iter.bi_size;
1223 sector += bio_sectors(clone);
1224
1225 crypt_inc_pending(io);
1226
1227 r = crypt_convert(cc, &io->ctx);
1228 if (r < 0)
1229 io->error = -EIO;
1230
1231 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
1232
1233 /* Encryption was already finished, submit io now */
1234 if (crypt_finished) {
1235 kcryptd_crypt_write_io_submit(io, 0);
1236
1237 /*
1238 * If there was an error, do not try next fragments.
1239 * For async, error is processed in async handler.
1240 */
1241 if (unlikely(r < 0))
1242 break;
1243 1294
1244 io->sector = sector; 1295 io->ctx.bio_out = clone;
1245 } 1296 io->ctx.iter_out = clone->bi_iter;
1246 1297
1247 /* 1298 sector += bio_sectors(clone);
1248 * Out of memory -> run queues
1249 * But don't wait if split was due to the io size restriction
1250 */
1251 if (unlikely(out_of_pages))
1252 congestion_wait(BLK_RW_ASYNC, HZ/100);
1253 1299
1254 /* 1300 crypt_inc_pending(io);
1255 * With async crypto it is unsafe to share the crypto context 1301 r = crypt_convert(cc, &io->ctx);
1256 * between fragments, so switch to a new dm_crypt_io structure. 1302 if (r)
1257 */ 1303 io->error = -EIO;
1258 if (unlikely(!crypt_finished && remaining)) { 1304 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
1259 new_io = mempool_alloc(cc->io_pool, GFP_NOIO);
1260 crypt_io_init(new_io, io->cc, io->base_bio, sector);
1261 crypt_inc_pending(new_io);
1262 crypt_convert_init(cc, &new_io->ctx, NULL,
1263 io->base_bio, sector);
1264 new_io->ctx.iter_in = io->ctx.iter_in;
1265
1266 /*
1267 * Fragments after the first use the base_io
1268 * pending count.
1269 */
1270 if (!io->base_io)
1271 new_io->base_io = io;
1272 else {
1273 new_io->base_io = io->base_io;
1274 crypt_inc_pending(io->base_io);
1275 crypt_dec_pending(io);
1276 }
1277 1305
1278 io = new_io; 1306 /* Encryption was already finished, submit io now */
1279 } 1307 if (crypt_finished) {
1308 kcryptd_crypt_write_io_submit(io, 0);
1309 io->sector = sector;
1280 } 1310 }
1281 1311
1312dec:
1282 crypt_dec_pending(io); 1313 crypt_dec_pending(io);
1283} 1314}
1284 1315
@@ -1481,6 +1512,9 @@ static void crypt_dtr(struct dm_target *ti)
1481 if (!cc) 1512 if (!cc)
1482 return; 1513 return;
1483 1514
1515 if (cc->write_thread)
1516 kthread_stop(cc->write_thread);
1517
1484 if (cc->io_queue) 1518 if (cc->io_queue)
1485 destroy_workqueue(cc->io_queue); 1519 destroy_workqueue(cc->io_queue);
1486 if (cc->crypt_queue) 1520 if (cc->crypt_queue)
@@ -1495,8 +1529,6 @@ static void crypt_dtr(struct dm_target *ti)
1495 mempool_destroy(cc->page_pool); 1529 mempool_destroy(cc->page_pool);
1496 if (cc->req_pool) 1530 if (cc->req_pool)
1497 mempool_destroy(cc->req_pool); 1531 mempool_destroy(cc->req_pool);
1498 if (cc->io_pool)
1499 mempool_destroy(cc->io_pool);
1500 1532
1501 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 1533 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1502 cc->iv_gen_ops->dtr(cc); 1534 cc->iv_gen_ops->dtr(cc);
@@ -1688,7 +1720,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1688 char dummy; 1720 char dummy;
1689 1721
1690 static struct dm_arg _args[] = { 1722 static struct dm_arg _args[] = {
1691 {0, 1, "Invalid number of feature args"}, 1723 {0, 3, "Invalid number of feature args"},
1692 }; 1724 };
1693 1725
1694 if (argc < 5) { 1726 if (argc < 5) {
@@ -1710,13 +1742,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1710 if (ret < 0) 1742 if (ret < 0)
1711 goto bad; 1743 goto bad;
1712 1744
1713 ret = -ENOMEM;
1714 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
1715 if (!cc->io_pool) {
1716 ti->error = "Cannot allocate crypt io mempool";
1717 goto bad;
1718 }
1719
1720 cc->dmreq_start = sizeof(struct ablkcipher_request); 1745 cc->dmreq_start = sizeof(struct ablkcipher_request);
1721 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); 1746 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
1722 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request)); 1747 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
@@ -1734,6 +1759,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1734 iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc)); 1759 iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
1735 } 1760 }
1736 1761
1762 ret = -ENOMEM;
1737 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + 1763 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
1738 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size); 1764 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
1739 if (!cc->req_pool) { 1765 if (!cc->req_pool) {
@@ -1746,7 +1772,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1746 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size, 1772 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
1747 ARCH_KMALLOC_MINALIGN); 1773 ARCH_KMALLOC_MINALIGN);
1748 1774
1749 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); 1775 cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
1750 if (!cc->page_pool) { 1776 if (!cc->page_pool) {
1751 ti->error = "Cannot allocate page mempool"; 1777 ti->error = "Cannot allocate page mempool";
1752 goto bad; 1778 goto bad;
@@ -1758,6 +1784,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1758 goto bad; 1784 goto bad;
1759 } 1785 }
1760 1786
1787 mutex_init(&cc->bio_alloc_lock);
1788
1761 ret = -EINVAL; 1789 ret = -EINVAL;
1762 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) { 1790 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
1763 ti->error = "Invalid iv_offset sector"; 1791 ti->error = "Invalid iv_offset sector";
@@ -1788,15 +1816,26 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1788 if (ret) 1816 if (ret)
1789 goto bad; 1817 goto bad;
1790 1818
1791 opt_string = dm_shift_arg(&as); 1819 while (opt_params--) {
1820 opt_string = dm_shift_arg(&as);
1821 if (!opt_string) {
1822 ti->error = "Not enough feature arguments";
1823 goto bad;
1824 }
1792 1825
1793 if (opt_params == 1 && opt_string && 1826 if (!strcasecmp(opt_string, "allow_discards"))
1794 !strcasecmp(opt_string, "allow_discards")) 1827 ti->num_discard_bios = 1;
1795 ti->num_discard_bios = 1; 1828
1796 else if (opt_params) { 1829 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
1797 ret = -EINVAL; 1830 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
1798 ti->error = "Invalid feature arguments"; 1831
1799 goto bad; 1832 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
1833 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
1834
1835 else {
1836 ti->error = "Invalid feature arguments";
1837 goto bad;
1838 }
1800 } 1839 }
1801 } 1840 }
1802 1841
@@ -1807,13 +1846,28 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1807 goto bad; 1846 goto bad;
1808 } 1847 }
1809 1848
1810 cc->crypt_queue = alloc_workqueue("kcryptd", 1849 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1811 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); 1850 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
1851 else
1852 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
1853 num_online_cpus());
1812 if (!cc->crypt_queue) { 1854 if (!cc->crypt_queue) {
1813 ti->error = "Couldn't create kcryptd queue"; 1855 ti->error = "Couldn't create kcryptd queue";
1814 goto bad; 1856 goto bad;
1815 } 1857 }
1816 1858
1859 init_waitqueue_head(&cc->write_thread_wait);
1860 cc->write_tree = RB_ROOT;
1861
1862 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
1863 if (IS_ERR(cc->write_thread)) {
1864 ret = PTR_ERR(cc->write_thread);
1865 cc->write_thread = NULL;
1866 ti->error = "Couldn't spawn write thread";
1867 goto bad;
1868 }
1869 wake_up_process(cc->write_thread);
1870
1817 ti->num_flush_bios = 1; 1871 ti->num_flush_bios = 1;
1818 ti->discard_zeroes_data_unsupported = true; 1872 ti->discard_zeroes_data_unsupported = true;
1819 1873
@@ -1848,7 +1902,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
1848 1902
1849 if (bio_data_dir(io->base_bio) == READ) { 1903 if (bio_data_dir(io->base_bio) == READ) {
1850 if (kcryptd_io_read(io, GFP_NOWAIT)) 1904 if (kcryptd_io_read(io, GFP_NOWAIT))
1851 kcryptd_queue_io(io); 1905 kcryptd_queue_read(io);
1852 } else 1906 } else
1853 kcryptd_queue_crypt(io); 1907 kcryptd_queue_crypt(io);
1854 1908
@@ -1860,6 +1914,7 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
1860{ 1914{
1861 struct crypt_config *cc = ti->private; 1915 struct crypt_config *cc = ti->private;
1862 unsigned i, sz = 0; 1916 unsigned i, sz = 0;
1917 int num_feature_args = 0;
1863 1918
1864 switch (type) { 1919 switch (type) {
1865 case STATUSTYPE_INFO: 1920 case STATUSTYPE_INFO:
@@ -1878,8 +1933,18 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
1878 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, 1933 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1879 cc->dev->name, (unsigned long long)cc->start); 1934 cc->dev->name, (unsigned long long)cc->start);
1880 1935
1881 if (ti->num_discard_bios) 1936 num_feature_args += !!ti->num_discard_bios;
1882 DMEMIT(" 1 allow_discards"); 1937 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
1938 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
1939 if (num_feature_args) {
1940 DMEMIT(" %d", num_feature_args);
1941 if (ti->num_discard_bios)
1942 DMEMIT(" allow_discards");
1943 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1944 DMEMIT(" same_cpu_crypt");
1945 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
1946 DMEMIT(" submit_from_crypt_cpus");
1947 }
1883 1948
1884 break; 1949 break;
1885 } 1950 }
@@ -1976,7 +2041,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
1976 2041
1977static struct target_type crypt_target = { 2042static struct target_type crypt_target = {
1978 .name = "crypt", 2043 .name = "crypt",
1979 .version = {1, 13, 0}, 2044 .version = {1, 14, 0},
1980 .module = THIS_MODULE, 2045 .module = THIS_MODULE,
1981 .ctr = crypt_ctr, 2046 .ctr = crypt_ctr,
1982 .dtr = crypt_dtr, 2047 .dtr = crypt_dtr,
@@ -1994,15 +2059,9 @@ static int __init dm_crypt_init(void)
1994{ 2059{
1995 int r; 2060 int r;
1996 2061
1997 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
1998 if (!_crypt_io_pool)
1999 return -ENOMEM;
2000
2001 r = dm_register_target(&crypt_target); 2062 r = dm_register_target(&crypt_target);
2002 if (r < 0) { 2063 if (r < 0)
2003 DMERR("register failed %d", r); 2064 DMERR("register failed %d", r);
2004 kmem_cache_destroy(_crypt_io_pool);
2005 }
2006 2065
2007 return r; 2066 return r;
2008} 2067}
@@ -2010,7 +2069,6 @@ static int __init dm_crypt_init(void)
2010static void __exit dm_crypt_exit(void) 2069static void __exit dm_crypt_exit(void)
2011{ 2070{
2012 dm_unregister_target(&crypt_target); 2071 dm_unregister_target(&crypt_target);
2013 kmem_cache_destroy(_crypt_io_pool);
2014} 2072}
2015 2073
2016module_init(dm_crypt_init); 2074module_init(dm_crypt_init);
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index c09359db3a90..37de0173b6d2 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -290,6 +290,12 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
290 unsigned short logical_block_size = queue_logical_block_size(q); 290 unsigned short logical_block_size = queue_logical_block_size(q);
291 sector_t num_sectors; 291 sector_t num_sectors;
292 292
293 /* Reject unsupported discard requests */
294 if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) {
295 dec_count(io, region, -EOPNOTSUPP);
296 return;
297 }
298
293 /* 299 /*
294 * where->count may be zero if rw holds a flush and we need to 300 * where->count may be zero if rw holds a flush and we need to
295 * send a zero-sized flush. 301 * send a zero-sized flush.
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 7dfdb5c746d6..089d62751f7f 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -604,6 +604,15 @@ static void write_callback(unsigned long error, void *context)
604 return; 604 return;
605 } 605 }
606 606
607 /*
608 * If the bio is discard, return an error, but do not
609 * degrade the array.
610 */
611 if (bio->bi_rw & REQ_DISCARD) {
612 bio_endio(bio, -EOPNOTSUPP);
613 return;
614 }
615
607 for (i = 0; i < ms->nr_mirrors; i++) 616 for (i = 0; i < ms->nr_mirrors; i++)
608 if (test_bit(i, &error)) 617 if (test_bit(i, &error))
609 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); 618 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 864b03f47727..8b204ae216ab 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1432,8 +1432,6 @@ out:
1432 full_bio->bi_private = pe->full_bio_private; 1432 full_bio->bi_private = pe->full_bio_private;
1433 atomic_inc(&full_bio->bi_remaining); 1433 atomic_inc(&full_bio->bi_remaining);
1434 } 1434 }
1435 free_pending_exception(pe);
1436
1437 increment_pending_exceptions_done_count(); 1435 increment_pending_exceptions_done_count();
1438 1436
1439 up_write(&s->lock); 1437 up_write(&s->lock);
@@ -1450,6 +1448,8 @@ out:
1450 } 1448 }
1451 1449
1452 retry_origin_bios(s, origin_bios); 1450 retry_origin_bios(s, origin_bios);
1451
1452 free_pending_exception(pe);
1453} 1453}
1454 1454
1455static void commit_callback(void *context, int success) 1455static void commit_callback(void *context, int success)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ec1444f49de1..73f28802dc7a 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2571,7 +2571,7 @@ int dm_setup_md_queue(struct mapped_device *md)
2571 return 0; 2571 return 0;
2572} 2572}
2573 2573
2574static struct mapped_device *dm_find_md(dev_t dev) 2574struct mapped_device *dm_get_md(dev_t dev)
2575{ 2575{
2576 struct mapped_device *md; 2576 struct mapped_device *md;
2577 unsigned minor = MINOR(dev); 2577 unsigned minor = MINOR(dev);
@@ -2582,12 +2582,15 @@ static struct mapped_device *dm_find_md(dev_t dev)
2582 spin_lock(&_minor_lock); 2582 spin_lock(&_minor_lock);
2583 2583
2584 md = idr_find(&_minor_idr, minor); 2584 md = idr_find(&_minor_idr, minor);
2585 if (md && (md == MINOR_ALLOCED || 2585 if (md) {
2586 (MINOR(disk_devt(dm_disk(md))) != minor) || 2586 if ((md == MINOR_ALLOCED ||
2587 dm_deleting_md(md) || 2587 (MINOR(disk_devt(dm_disk(md))) != minor) ||
2588 test_bit(DMF_FREEING, &md->flags))) { 2588 dm_deleting_md(md) ||
2589 md = NULL; 2589 test_bit(DMF_FREEING, &md->flags))) {
2590 goto out; 2590 md = NULL;
2591 goto out;
2592 }
2593 dm_get(md);
2591 } 2594 }
2592 2595
2593out: 2596out:
@@ -2595,16 +2598,6 @@ out:
2595 2598
2596 return md; 2599 return md;
2597} 2600}
2598
2599struct mapped_device *dm_get_md(dev_t dev)
2600{
2601 struct mapped_device *md = dm_find_md(dev);
2602
2603 if (md)
2604 dm_get(md);
2605
2606 return md;
2607}
2608EXPORT_SYMBOL_GPL(dm_get_md); 2601EXPORT_SYMBOL_GPL(dm_get_md);
2609 2602
2610void *dm_get_mdptr(struct mapped_device *md) 2603void *dm_get_mdptr(struct mapped_device *md)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c8d2bac4e28b..cadf9cc02b25 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2555,7 +2555,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
2555 return err ? err : len; 2555 return err ? err : len;
2556} 2556}
2557static struct rdev_sysfs_entry rdev_state = 2557static struct rdev_sysfs_entry rdev_state =
2558__ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); 2558__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
2559 2559
2560static ssize_t 2560static ssize_t
2561errors_show(struct md_rdev *rdev, char *page) 2561errors_show(struct md_rdev *rdev, char *page)
@@ -3638,7 +3638,8 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len)
3638 return err ?: len; 3638 return err ?: len;
3639} 3639}
3640static struct md_sysfs_entry md_resync_start = 3640static struct md_sysfs_entry md_resync_start =
3641__ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store); 3641__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
3642 resync_start_show, resync_start_store);
3642 3643
3643/* 3644/*
3644 * The array state can be: 3645 * The array state can be:
@@ -3851,7 +3852,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
3851 return err ?: len; 3852 return err ?: len;
3852} 3853}
3853static struct md_sysfs_entry md_array_state = 3854static struct md_sysfs_entry md_array_state =
3854__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 3855__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
3855 3856
3856static ssize_t 3857static ssize_t
3857max_corrected_read_errors_show(struct mddev *mddev, char *page) { 3858max_corrected_read_errors_show(struct mddev *mddev, char *page) {
@@ -4101,7 +4102,7 @@ out_unlock:
4101} 4102}
4102 4103
4103static struct md_sysfs_entry md_metadata = 4104static struct md_sysfs_entry md_metadata =
4104__ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 4105__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4105 4106
4106static ssize_t 4107static ssize_t
4107action_show(struct mddev *mddev, char *page) 4108action_show(struct mddev *mddev, char *page)
@@ -4189,7 +4190,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
4189} 4190}
4190 4191
4191static struct md_sysfs_entry md_scan_mode = 4192static struct md_sysfs_entry md_scan_mode =
4192__ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 4193__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4193 4194
4194static ssize_t 4195static ssize_t
4195last_sync_action_show(struct mddev *mddev, char *page) 4196last_sync_action_show(struct mddev *mddev, char *page)
@@ -4335,7 +4336,8 @@ sync_completed_show(struct mddev *mddev, char *page)
4335 return sprintf(page, "%llu / %llu\n", resync, max_sectors); 4336 return sprintf(page, "%llu / %llu\n", resync, max_sectors);
4336} 4337}
4337 4338
4338static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); 4339static struct md_sysfs_entry md_sync_completed =
4340 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
4339 4341
4340static ssize_t 4342static ssize_t
4341min_sync_show(struct mddev *mddev, char *page) 4343min_sync_show(struct mddev *mddev, char *page)
diff --git a/drivers/md/persistent-data/Kconfig b/drivers/md/persistent-data/Kconfig
index 0c2dec7aec20..78c74bb71ba4 100644
--- a/drivers/md/persistent-data/Kconfig
+++ b/drivers/md/persistent-data/Kconfig
@@ -8,7 +8,7 @@ config DM_PERSISTENT_DATA
8 device-mapper targets such as the thin provisioning target. 8 device-mapper targets such as the thin provisioning target.
9 9
10config DM_DEBUG_BLOCK_STACK_TRACING 10config DM_DEBUG_BLOCK_STACK_TRACING
11 boolean "Keep stack trace of persistent data block lock holders" 11 bool "Keep stack trace of persistent data block lock holders"
12 depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA 12 depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA
13 select STACKTRACE 13 select STACKTRACE
14 ---help--- 14 ---help---
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index cfbf9617e465..ebb280a14325 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -78,7 +78,9 @@ static int sm_disk_count_is_more_than_one(struct dm_space_map *sm, dm_block_t b,
78 if (r) 78 if (r)
79 return r; 79 return r;
80 80
81 return count > 1; 81 *result = count > 1;
82
83 return 0;
82} 84}
83 85
84static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b, 86static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b,
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4153da5d4011..d34e238afa54 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -560,7 +560,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
560 if (test_bit(WriteMostly, &rdev->flags)) { 560 if (test_bit(WriteMostly, &rdev->flags)) {
561 /* Don't balance among write-mostly, just 561 /* Don't balance among write-mostly, just
562 * use the first as a last resort */ 562 * use the first as a last resort */
563 if (best_disk < 0) { 563 if (best_dist_disk < 0) {
564 if (is_badblock(rdev, this_sector, sectors, 564 if (is_badblock(rdev, this_sector, sectors,
565 &first_bad, &bad_sectors)) { 565 &first_bad, &bad_sectors)) {
566 if (first_bad < this_sector) 566 if (first_bad < this_sector)
@@ -569,7 +569,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
569 best_good_sectors = first_bad - this_sector; 569 best_good_sectors = first_bad - this_sector;
570 } else 570 } else
571 best_good_sectors = sectors; 571 best_good_sectors = sectors;
572 best_disk = disk; 572 best_dist_disk = disk;
573 best_pending_disk = disk;
573 } 574 }
574 continue; 575 continue;
575 } 576 }
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e75d48c0421a..cd2f96b2c572 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5121,12 +5121,17 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int
5121 schedule_timeout_uninterruptible(1); 5121 schedule_timeout_uninterruptible(1);
5122 } 5122 }
5123 /* Need to check if array will still be degraded after recovery/resync 5123 /* Need to check if array will still be degraded after recovery/resync
5124 * We don't need to check the 'failed' flag as when that gets set, 5124 * Note in case of > 1 drive failures it's possible we're rebuilding
5125 * recovery aborts. 5125 * one drive while leaving another faulty drive in array.
5126 */ 5126 */
5127 for (i = 0; i < conf->raid_disks; i++) 5127 rcu_read_lock();
5128 if (conf->disks[i].rdev == NULL) 5128 for (i = 0; i < conf->raid_disks; i++) {
5129 struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev);
5130
5131 if (rdev == NULL || test_bit(Faulty, &rdev->flags))
5129 still_degraded = 1; 5132 still_degraded = 1;
5133 }
5134 rcu_read_unlock();
5130 5135
5131 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 5136 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
5132 5137
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 3a2604580164..d2a85cde68da 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -1111,7 +1111,7 @@ static int verify_addr(struct i2c_client *i2c)
1111 return 0; 1111 return 0;
1112} 1112}
1113 1113
1114static struct regmap_config pm860x_regmap_config = { 1114static const struct regmap_config pm860x_regmap_config = {
1115 .reg_bits = 8, 1115 .reg_bits = 8,
1116 .val_bits = 8, 1116 .val_bits = 8,
1117}; 1117};
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 2e6b7311fabc..38356e39adba 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -195,6 +195,18 @@ config MFD_DA9063
195 Additional drivers must be enabled in order to use the functionality 195 Additional drivers must be enabled in order to use the functionality
196 of the device. 196 of the device.
197 197
198config MFD_DA9150
199 tristate "Dialog Semiconductor DA9150 Charger Fuel-Gauge chip"
200 depends on I2C=y
201 select MFD_CORE
202 select REGMAP_I2C
203 select REGMAP_IRQ
204 help
205 This adds support for the DA9150 integrated charger and fuel-gauge
206 chip. This driver provides common support for accessing the device.
207 Additional drivers must be enabled in order to use the specific
208 features of the device.
209
198config MFD_DLN2 210config MFD_DLN2
199 tristate "Diolan DLN2 support" 211 tristate "Diolan DLN2 support"
200 select MFD_CORE 212 select MFD_CORE
@@ -417,6 +429,7 @@ config MFD_MAX14577
417config MFD_MAX77686 429config MFD_MAX77686
418 bool "Maxim Semiconductor MAX77686/802 PMIC Support" 430 bool "Maxim Semiconductor MAX77686/802 PMIC Support"
419 depends on I2C=y 431 depends on I2C=y
432 depends on OF
420 select MFD_CORE 433 select MFD_CORE
421 select REGMAP_I2C 434 select REGMAP_I2C
422 select REGMAP_IRQ 435 select REGMAP_IRQ
@@ -589,6 +602,20 @@ config MFD_PM8921_CORE
589 Say M here if you want to include support for PM8921 chip as a module. 602 Say M here if you want to include support for PM8921 chip as a module.
590 This will build a module called "pm8921-core". 603 This will build a module called "pm8921-core".
591 604
605config MFD_QCOM_RPM
606 tristate "Qualcomm Resource Power Manager (RPM)"
607 depends on ARCH_QCOM && OF
608 help
609 If you say yes to this option, support will be included for the
610 Resource Power Manager system found in the Qualcomm 8660, 8960 and
611 8064 based devices.
612
613 This is required to access many regulators, clocks and bus
614 frequencies controlled by the RPM on these devices.
615
616 Say M here if you want to include support for the Qualcomm RPM as a
617 module. This will build a module called "qcom_rpm".
618
592config MFD_SPMI_PMIC 619config MFD_SPMI_PMIC
593 tristate "Qualcomm SPMI PMICs" 620 tristate "Qualcomm SPMI PMICs"
594 depends on ARCH_QCOM || COMPILE_TEST 621 depends on ARCH_QCOM || COMPILE_TEST
@@ -623,6 +650,18 @@ config MFD_RTSX_PCI
623 types of memory cards, such as Memory Stick, Memory Stick Pro, 650 types of memory cards, such as Memory Stick, Memory Stick Pro,
624 Secure Digital and MultiMediaCard. 651 Secure Digital and MultiMediaCard.
625 652
653config MFD_RT5033
654 tristate "Richtek RT5033 Power Management IC"
655 depends on I2C=y
656 select MFD_CORE
657 select REGMAP_I2C
658 help
659 This driver provides for the Richtek RT5033 Power Management IC,
660 which includes the I2C driver and the Core APIs. This driver provides
661 common support for accessing the device. The device supports multiple
662 sub-devices like charger, fuel gauge, flash LED, current source,
663 LDO and Buck.
664
626config MFD_RTSX_USB 665config MFD_RTSX_USB
627 tristate "Realtek USB card reader" 666 tristate "Realtek USB card reader"
628 depends on USB 667 depends on USB
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 53467e211381..19f3d744e3bd 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -113,7 +113,7 @@ obj-$(CONFIG_MFD_DA9055) += da9055.o
113 113
114da9063-objs := da9063-core.o da9063-irq.o da9063-i2c.o 114da9063-objs := da9063-core.o da9063-irq.o da9063-i2c.o
115obj-$(CONFIG_MFD_DA9063) += da9063.o 115obj-$(CONFIG_MFD_DA9063) += da9063.o
116 116obj-$(CONFIG_MFD_DA9150) += da9150-core.o
117obj-$(CONFIG_MFD_MAX14577) += max14577.o 117obj-$(CONFIG_MFD_MAX14577) += max14577.o
118obj-$(CONFIG_MFD_MAX77686) += max77686.o 118obj-$(CONFIG_MFD_MAX77686) += max77686.o
119obj-$(CONFIG_MFD_MAX77693) += max77693.o 119obj-$(CONFIG_MFD_MAX77693) += max77693.o
@@ -153,6 +153,7 @@ obj-$(CONFIG_MFD_SI476X_CORE) += si476x-core.o
153obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o 153obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o
154obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o omap-usb-tll.o 154obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o omap-usb-tll.o
155obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o ssbi.o 155obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o ssbi.o
156obj-$(CONFIG_MFD_QCOM_RPM) += qcom_rpm.o
156obj-$(CONFIG_MFD_SPMI_PMIC) += qcom-spmi-pmic.o 157obj-$(CONFIG_MFD_SPMI_PMIC) += qcom-spmi-pmic.o
157obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o 158obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
158obj-$(CONFIG_MFD_TPS65090) += tps65090.o 159obj-$(CONFIG_MFD_TPS65090) += tps65090.o
@@ -176,6 +177,7 @@ obj-$(CONFIG_MFD_IPAQ_MICRO) += ipaq-micro.o
176obj-$(CONFIG_MFD_MENF21BMC) += menf21bmc.o 177obj-$(CONFIG_MFD_MENF21BMC) += menf21bmc.o
177obj-$(CONFIG_MFD_HI6421_PMIC) += hi6421-pmic-core.o 178obj-$(CONFIG_MFD_HI6421_PMIC) += hi6421-pmic-core.o
178obj-$(CONFIG_MFD_DLN2) += dln2.o 179obj-$(CONFIG_MFD_DLN2) += dln2.o
180obj-$(CONFIG_MFD_RT5033) += rt5033.o
179 181
180intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o 182intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o
181obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o 183obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
diff --git a/drivers/mfd/da9063-core.c b/drivers/mfd/da9063-core.c
index f38bc98a3c57..facd3610ac77 100644
--- a/drivers/mfd/da9063-core.c
+++ b/drivers/mfd/da9063-core.c
@@ -86,6 +86,7 @@ static const struct mfd_cell da9063_devs[] = {
86 }, 86 },
87 { 87 {
88 .name = DA9063_DRVNAME_WATCHDOG, 88 .name = DA9063_DRVNAME_WATCHDOG,
89 .of_compatible = "dlg,da9063-watchdog",
89 }, 90 },
90 { 91 {
91 .name = DA9063_DRVNAME_HWMON, 92 .name = DA9063_DRVNAME_HWMON,
@@ -101,6 +102,7 @@ static const struct mfd_cell da9063_devs[] = {
101 .name = DA9063_DRVNAME_RTC, 102 .name = DA9063_DRVNAME_RTC,
102 .num_resources = ARRAY_SIZE(da9063_rtc_resources), 103 .num_resources = ARRAY_SIZE(da9063_rtc_resources),
103 .resources = da9063_rtc_resources, 104 .resources = da9063_rtc_resources,
105 .of_compatible = "dlg,da9063-rtc",
104 }, 106 },
105 { 107 {
106 .name = DA9063_DRVNAME_VIBRATION, 108 .name = DA9063_DRVNAME_VIBRATION,
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
index 21fd8d9a217b..6f3a7c0001f9 100644
--- a/drivers/mfd/da9063-i2c.c
+++ b/drivers/mfd/da9063-i2c.c
@@ -25,6 +25,9 @@
25#include <linux/mfd/da9063/pdata.h> 25#include <linux/mfd/da9063/pdata.h>
26#include <linux/mfd/da9063/registers.h> 26#include <linux/mfd/da9063/registers.h>
27 27
28#include <linux/of.h>
29#include <linux/regulator/of_regulator.h>
30
28static const struct regmap_range da9063_ad_readable_ranges[] = { 31static const struct regmap_range da9063_ad_readable_ranges[] = {
29 { 32 {
30 .range_min = DA9063_REG_PAGE_CON, 33 .range_min = DA9063_REG_PAGE_CON,
@@ -203,6 +206,11 @@ static struct regmap_config da9063_regmap_config = {
203 .cache_type = REGCACHE_RBTREE, 206 .cache_type = REGCACHE_RBTREE,
204}; 207};
205 208
209static const struct of_device_id da9063_dt_ids[] = {
210 { .compatible = "dlg,da9063", },
211 { }
212};
213MODULE_DEVICE_TABLE(of, da9063_dt_ids);
206static int da9063_i2c_probe(struct i2c_client *i2c, 214static int da9063_i2c_probe(struct i2c_client *i2c,
207 const struct i2c_device_id *id) 215 const struct i2c_device_id *id)
208{ 216{
@@ -257,6 +265,7 @@ static struct i2c_driver da9063_i2c_driver = {
257 .driver = { 265 .driver = {
258 .name = "da9063", 266 .name = "da9063",
259 .owner = THIS_MODULE, 267 .owner = THIS_MODULE,
268 .of_match_table = of_match_ptr(da9063_dt_ids),
260 }, 269 },
261 .probe = da9063_i2c_probe, 270 .probe = da9063_i2c_probe,
262 .remove = da9063_i2c_remove, 271 .remove = da9063_i2c_remove,
diff --git a/drivers/mfd/da9150-core.c b/drivers/mfd/da9150-core.c
new file mode 100644
index 000000000000..4d757b97ef9a
--- /dev/null
+++ b/drivers/mfd/da9150-core.c
@@ -0,0 +1,413 @@
1/*
2 * DA9150 Core MFD Driver
3 *
4 * Copyright (c) 2014 Dialog Semiconductor
5 *
6 * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/i2c.h>
18#include <linux/regmap.h>
19#include <linux/slab.h>
20#include <linux/irq.h>
21#include <linux/interrupt.h>
22#include <linux/mfd/core.h>
23#include <linux/mfd/da9150/core.h>
24#include <linux/mfd/da9150/registers.h>
25
26static bool da9150_volatile_reg(struct device *dev, unsigned int reg)
27{
28 switch (reg) {
29 case DA9150_PAGE_CON:
30 case DA9150_STATUS_A:
31 case DA9150_STATUS_B:
32 case DA9150_STATUS_C:
33 case DA9150_STATUS_D:
34 case DA9150_STATUS_E:
35 case DA9150_STATUS_F:
36 case DA9150_STATUS_G:
37 case DA9150_STATUS_H:
38 case DA9150_STATUS_I:
39 case DA9150_STATUS_J:
40 case DA9150_STATUS_K:
41 case DA9150_STATUS_L:
42 case DA9150_STATUS_N:
43 case DA9150_FAULT_LOG_A:
44 case DA9150_FAULT_LOG_B:
45 case DA9150_EVENT_E:
46 case DA9150_EVENT_F:
47 case DA9150_EVENT_G:
48 case DA9150_EVENT_H:
49 case DA9150_CONTROL_B:
50 case DA9150_CONTROL_C:
51 case DA9150_GPADC_MAN:
52 case DA9150_GPADC_RES_A:
53 case DA9150_GPADC_RES_B:
54 case DA9150_ADETVB_CFG_C:
55 case DA9150_ADETD_STAT:
56 case DA9150_ADET_CMPSTAT:
57 case DA9150_ADET_CTRL_A:
58 case DA9150_PPR_TCTR_B:
59 case DA9150_COREBTLD_STAT_A:
60 case DA9150_CORE_DATA_A:
61 case DA9150_CORE_DATA_B:
62 case DA9150_CORE_DATA_C:
63 case DA9150_CORE_DATA_D:
64 case DA9150_CORE2WIRE_STAT_A:
65 case DA9150_FW_CTRL_C:
66 case DA9150_FG_CTRL_B:
67 case DA9150_FW_CTRL_B:
68 case DA9150_GPADC_CMAN:
69 case DA9150_GPADC_CRES_A:
70 case DA9150_GPADC_CRES_B:
71 case DA9150_CC_ICHG_RES_A:
72 case DA9150_CC_ICHG_RES_B:
73 case DA9150_CC_IAVG_RES_A:
74 case DA9150_CC_IAVG_RES_B:
75 case DA9150_TAUX_CTRL_A:
76 case DA9150_TAUX_VALUE_H:
77 case DA9150_TAUX_VALUE_L:
78 case DA9150_TBAT_RES_A:
79 case DA9150_TBAT_RES_B:
80 return true;
81 default:
82 return false;
83 }
84}
85
86static const struct regmap_range_cfg da9150_range_cfg[] = {
87 {
88 .range_min = DA9150_PAGE_CON,
89 .range_max = DA9150_TBAT_RES_B,
90 .selector_reg = DA9150_PAGE_CON,
91 .selector_mask = DA9150_I2C_PAGE_MASK,
92 .selector_shift = DA9150_I2C_PAGE_SHIFT,
93 .window_start = 0,
94 .window_len = 256,
95 },
96};
97
98static struct regmap_config da9150_regmap_config = {
99 .reg_bits = 8,
100 .val_bits = 8,
101 .ranges = da9150_range_cfg,
102 .num_ranges = ARRAY_SIZE(da9150_range_cfg),
103 .max_register = DA9150_TBAT_RES_B,
104
105 .cache_type = REGCACHE_RBTREE,
106
107 .volatile_reg = da9150_volatile_reg,
108};
109
110u8 da9150_reg_read(struct da9150 *da9150, u16 reg)
111{
112 int val, ret;
113
114 ret = regmap_read(da9150->regmap, reg, &val);
115 if (ret)
116 dev_err(da9150->dev, "Failed to read from reg 0x%x: %d\n",
117 reg, ret);
118
119 return (u8) val;
120}
121EXPORT_SYMBOL_GPL(da9150_reg_read);
122
123void da9150_reg_write(struct da9150 *da9150, u16 reg, u8 val)
124{
125 int ret;
126
127 ret = regmap_write(da9150->regmap, reg, val);
128 if (ret)
129 dev_err(da9150->dev, "Failed to write to reg 0x%x: %d\n",
130 reg, ret);
131}
132EXPORT_SYMBOL_GPL(da9150_reg_write);
133
134void da9150_set_bits(struct da9150 *da9150, u16 reg, u8 mask, u8 val)
135{
136 int ret;
137
138 ret = regmap_update_bits(da9150->regmap, reg, mask, val);
139 if (ret)
140 dev_err(da9150->dev, "Failed to set bits in reg 0x%x: %d\n",
141 reg, ret);
142}
143EXPORT_SYMBOL_GPL(da9150_set_bits);
144
145void da9150_bulk_read(struct da9150 *da9150, u16 reg, int count, u8 *buf)
146{
147 int ret;
148
149 ret = regmap_bulk_read(da9150->regmap, reg, buf, count);
150 if (ret)
151 dev_err(da9150->dev, "Failed to bulk read from reg 0x%x: %d\n",
152 reg, ret);
153}
154EXPORT_SYMBOL_GPL(da9150_bulk_read);
155
156void da9150_bulk_write(struct da9150 *da9150, u16 reg, int count, const u8 *buf)
157{
158 int ret;
159
160 ret = regmap_raw_write(da9150->regmap, reg, buf, count);
161 if (ret)
162 dev_err(da9150->dev, "Failed to bulk write to reg 0x%x %d\n",
163 reg, ret);
164}
165EXPORT_SYMBOL_GPL(da9150_bulk_write);
166
167static struct regmap_irq da9150_irqs[] = {
168 [DA9150_IRQ_VBUS] = {
169 .reg_offset = 0,
170 .mask = DA9150_E_VBUS_MASK,
171 },
172 [DA9150_IRQ_CHG] = {
173 .reg_offset = 0,
174 .mask = DA9150_E_CHG_MASK,
175 },
176 [DA9150_IRQ_TCLASS] = {
177 .reg_offset = 0,
178 .mask = DA9150_E_TCLASS_MASK,
179 },
180 [DA9150_IRQ_TJUNC] = {
181 .reg_offset = 0,
182 .mask = DA9150_E_TJUNC_MASK,
183 },
184 [DA9150_IRQ_VFAULT] = {
185 .reg_offset = 0,
186 .mask = DA9150_E_VFAULT_MASK,
187 },
188 [DA9150_IRQ_CONF] = {
189 .reg_offset = 1,
190 .mask = DA9150_E_CONF_MASK,
191 },
192 [DA9150_IRQ_DAT] = {
193 .reg_offset = 1,
194 .mask = DA9150_E_DAT_MASK,
195 },
196 [DA9150_IRQ_DTYPE] = {
197 .reg_offset = 1,
198 .mask = DA9150_E_DTYPE_MASK,
199 },
200 [DA9150_IRQ_ID] = {
201 .reg_offset = 1,
202 .mask = DA9150_E_ID_MASK,
203 },
204 [DA9150_IRQ_ADP] = {
205 .reg_offset = 1,
206 .mask = DA9150_E_ADP_MASK,
207 },
208 [DA9150_IRQ_SESS_END] = {
209 .reg_offset = 1,
210 .mask = DA9150_E_SESS_END_MASK,
211 },
212 [DA9150_IRQ_SESS_VLD] = {
213 .reg_offset = 1,
214 .mask = DA9150_E_SESS_VLD_MASK,
215 },
216 [DA9150_IRQ_FG] = {
217 .reg_offset = 2,
218 .mask = DA9150_E_FG_MASK,
219 },
220 [DA9150_IRQ_GP] = {
221 .reg_offset = 2,
222 .mask = DA9150_E_GP_MASK,
223 },
224 [DA9150_IRQ_TBAT] = {
225 .reg_offset = 2,
226 .mask = DA9150_E_TBAT_MASK,
227 },
228 [DA9150_IRQ_GPIOA] = {
229 .reg_offset = 2,
230 .mask = DA9150_E_GPIOA_MASK,
231 },
232 [DA9150_IRQ_GPIOB] = {
233 .reg_offset = 2,
234 .mask = DA9150_E_GPIOB_MASK,
235 },
236 [DA9150_IRQ_GPIOC] = {
237 .reg_offset = 2,
238 .mask = DA9150_E_GPIOC_MASK,
239 },
240 [DA9150_IRQ_GPIOD] = {
241 .reg_offset = 2,
242 .mask = DA9150_E_GPIOD_MASK,
243 },
244 [DA9150_IRQ_GPADC] = {
245 .reg_offset = 2,
246 .mask = DA9150_E_GPADC_MASK,
247 },
248 [DA9150_IRQ_WKUP] = {
249 .reg_offset = 3,
250 .mask = DA9150_E_WKUP_MASK,
251 },
252};
253
254static struct regmap_irq_chip da9150_regmap_irq_chip = {
255 .name = "da9150_irq",
256 .status_base = DA9150_EVENT_E,
257 .mask_base = DA9150_IRQ_MASK_E,
258 .ack_base = DA9150_EVENT_E,
259 .num_regs = DA9150_NUM_IRQ_REGS,
260 .irqs = da9150_irqs,
261 .num_irqs = ARRAY_SIZE(da9150_irqs),
262};
263
264static struct resource da9150_gpadc_resources[] = {
265 {
266 .name = "GPADC",
267 .start = DA9150_IRQ_GPADC,
268 .end = DA9150_IRQ_GPADC,
269 .flags = IORESOURCE_IRQ,
270 },
271};
272
273static struct resource da9150_charger_resources[] = {
274 {
275 .name = "CHG_STATUS",
276 .start = DA9150_IRQ_CHG,
277 .end = DA9150_IRQ_CHG,
278 .flags = IORESOURCE_IRQ,
279 },
280 {
281 .name = "CHG_TJUNC",
282 .start = DA9150_IRQ_TJUNC,
283 .end = DA9150_IRQ_TJUNC,
284 .flags = IORESOURCE_IRQ,
285 },
286 {
287 .name = "CHG_VFAULT",
288 .start = DA9150_IRQ_VFAULT,
289 .end = DA9150_IRQ_VFAULT,
290 .flags = IORESOURCE_IRQ,
291 },
292 {
293 .name = "CHG_VBUS",
294 .start = DA9150_IRQ_VBUS,
295 .end = DA9150_IRQ_VBUS,
296 .flags = IORESOURCE_IRQ,
297 },
298};
299
300static struct mfd_cell da9150_devs[] = {
301 {
302 .name = "da9150-gpadc",
303 .of_compatible = "dlg,da9150-gpadc",
304 .resources = da9150_gpadc_resources,
305 .num_resources = ARRAY_SIZE(da9150_gpadc_resources),
306 },
307 {
308 .name = "da9150-charger",
309 .of_compatible = "dlg,da9150-charger",
310 .resources = da9150_charger_resources,
311 .num_resources = ARRAY_SIZE(da9150_charger_resources),
312 },
313};
314
315static int da9150_probe(struct i2c_client *client,
316 const struct i2c_device_id *id)
317{
318 struct da9150 *da9150;
319 struct da9150_pdata *pdata = dev_get_platdata(&client->dev);
320 int ret;
321
322 da9150 = devm_kzalloc(&client->dev, sizeof(*da9150), GFP_KERNEL);
323 if (!da9150)
324 return -ENOMEM;
325
326 da9150->dev = &client->dev;
327 da9150->irq = client->irq;
328 i2c_set_clientdata(client, da9150);
329
330 da9150->regmap = devm_regmap_init_i2c(client, &da9150_regmap_config);
331 if (IS_ERR(da9150->regmap)) {
332 ret = PTR_ERR(da9150->regmap);
333 dev_err(da9150->dev, "Failed to allocate register map: %d\n",
334 ret);
335 return ret;
336 }
337
338 da9150->irq_base = pdata ? pdata->irq_base : -1;
339
340 ret = regmap_add_irq_chip(da9150->regmap, da9150->irq,
341 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
342 da9150->irq_base, &da9150_regmap_irq_chip,
343 &da9150->regmap_irq_data);
344 if (ret)
345 return ret;
346
347 da9150->irq_base = regmap_irq_chip_get_base(da9150->regmap_irq_data);
348 enable_irq_wake(da9150->irq);
349
350 ret = mfd_add_devices(da9150->dev, -1, da9150_devs,
351 ARRAY_SIZE(da9150_devs), NULL,
352 da9150->irq_base, NULL);
353 if (ret) {
354 dev_err(da9150->dev, "Failed to add child devices: %d\n", ret);
355 regmap_del_irq_chip(da9150->irq, da9150->regmap_irq_data);
356 return ret;
357 }
358
359 return 0;
360}
361
362static int da9150_remove(struct i2c_client *client)
363{
364 struct da9150 *da9150 = i2c_get_clientdata(client);
365
366 regmap_del_irq_chip(da9150->irq, da9150->regmap_irq_data);
367 mfd_remove_devices(da9150->dev);
368
369 return 0;
370}
371
372static void da9150_shutdown(struct i2c_client *client)
373{
374 struct da9150 *da9150 = i2c_get_clientdata(client);
375
376 /* Make sure we have a wakup source for the device */
377 da9150_set_bits(da9150, DA9150_CONFIG_D,
378 DA9150_WKUP_PM_EN_MASK,
379 DA9150_WKUP_PM_EN_MASK);
380
381 /* Set device to DISABLED mode */
382 da9150_set_bits(da9150, DA9150_CONTROL_C,
383 DA9150_DISABLE_MASK, DA9150_DISABLE_MASK);
384}
385
386static const struct i2c_device_id da9150_i2c_id[] = {
387 { "da9150", },
388 { }
389};
390MODULE_DEVICE_TABLE(i2c, da9150_i2c_id);
391
392static const struct of_device_id da9150_of_match[] = {
393 { .compatible = "dlg,da9150", },
394 { }
395};
396MODULE_DEVICE_TABLE(of, da9150_of_match);
397
398static struct i2c_driver da9150_driver = {
399 .driver = {
400 .name = "da9150",
401 .of_match_table = of_match_ptr(da9150_of_match),
402 },
403 .probe = da9150_probe,
404 .remove = da9150_remove,
405 .shutdown = da9150_shutdown,
406 .id_table = da9150_i2c_id,
407};
408
409module_i2c_driver(da9150_driver);
410
411MODULE_DESCRIPTION("MFD Core Driver for DA9150");
412MODULE_AUTHOR("Adam Thomson <Adam.Thomson.Opensource@diasemi.com>");
413MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index c835e85539b2..9bbc642a7b9d 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -33,7 +33,7 @@
33 33
34#include <linux/mfd/davinci_voicecodec.h> 34#include <linux/mfd/davinci_voicecodec.h>
35 35
36static struct regmap_config davinci_vc_regmap = { 36static const struct regmap_config davinci_vc_regmap = {
37 .reg_bits = 32, 37 .reg_bits = 32,
38 .val_bits = 32, 38 .val_bits = 32,
39}; 39};
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 16162bf43656..cc1a404328c2 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -675,15 +675,6 @@ bool prcmu_has_arm_maxopp(void)
675} 675}
676 676
677/** 677/**
678 * prcmu_get_boot_status - PRCMU boot status checking
679 * Returns: the current PRCMU boot status
680 */
681int prcmu_get_boot_status(void)
682{
683 return readb(tcdm_base + PRCM_BOOT_STATUS);
684}
685
686/**
687 * prcmu_set_rc_a2p - This function is used to run few power state sequences 678 * prcmu_set_rc_a2p - This function is used to run few power state sequences
688 * @val: Value to be set, i.e. transition requested 679 * @val: Value to be set, i.e. transition requested
689 * Returns: 0 on success, -EINVAL on invalid argument 680 * Returns: 0 on success, -EINVAL on invalid argument
diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
index 6d49685d4ee4..1be9bd1c046d 100644
--- a/drivers/mfd/dln2.c
+++ b/drivers/mfd/dln2.c
@@ -587,12 +587,19 @@ static void dln2_free_rx_urbs(struct dln2_dev *dln2)
587 int i; 587 int i;
588 588
589 for (i = 0; i < DLN2_MAX_URBS; i++) { 589 for (i = 0; i < DLN2_MAX_URBS; i++) {
590 usb_kill_urb(dln2->rx_urb[i]);
591 usb_free_urb(dln2->rx_urb[i]); 590 usb_free_urb(dln2->rx_urb[i]);
592 kfree(dln2->rx_buf[i]); 591 kfree(dln2->rx_buf[i]);
593 } 592 }
594} 593}
595 594
595static void dln2_stop_rx_urbs(struct dln2_dev *dln2)
596{
597 int i;
598
599 for (i = 0; i < DLN2_MAX_URBS; i++)
600 usb_kill_urb(dln2->rx_urb[i]);
601}
602
596static void dln2_free(struct dln2_dev *dln2) 603static void dln2_free(struct dln2_dev *dln2)
597{ 604{
598 dln2_free_rx_urbs(dln2); 605 dln2_free_rx_urbs(dln2);
@@ -604,9 +611,7 @@ static int dln2_setup_rx_urbs(struct dln2_dev *dln2,
604 struct usb_host_interface *hostif) 611 struct usb_host_interface *hostif)
605{ 612{
606 int i; 613 int i;
607 int ret;
608 const int rx_max_size = DLN2_RX_BUF_SIZE; 614 const int rx_max_size = DLN2_RX_BUF_SIZE;
609 struct device *dev = &dln2->interface->dev;
610 615
611 for (i = 0; i < DLN2_MAX_URBS; i++) { 616 for (i = 0; i < DLN2_MAX_URBS; i++) {
612 dln2->rx_buf[i] = kmalloc(rx_max_size, GFP_KERNEL); 617 dln2->rx_buf[i] = kmalloc(rx_max_size, GFP_KERNEL);
@@ -620,8 +625,19 @@ static int dln2_setup_rx_urbs(struct dln2_dev *dln2,
620 usb_fill_bulk_urb(dln2->rx_urb[i], dln2->usb_dev, 625 usb_fill_bulk_urb(dln2->rx_urb[i], dln2->usb_dev,
621 usb_rcvbulkpipe(dln2->usb_dev, dln2->ep_in), 626 usb_rcvbulkpipe(dln2->usb_dev, dln2->ep_in),
622 dln2->rx_buf[i], rx_max_size, dln2_rx, dln2); 627 dln2->rx_buf[i], rx_max_size, dln2_rx, dln2);
628 }
623 629
624 ret = usb_submit_urb(dln2->rx_urb[i], GFP_KERNEL); 630 return 0;
631}
632
633static int dln2_start_rx_urbs(struct dln2_dev *dln2, gfp_t gfp)
634{
635 struct device *dev = &dln2->interface->dev;
636 int ret;
637 int i;
638
639 for (i = 0; i < DLN2_MAX_URBS; i++) {
640 ret = usb_submit_urb(dln2->rx_urb[i], gfp);
625 if (ret < 0) { 641 if (ret < 0) {
626 dev_err(dev, "failed to submit RX URB: %d\n", ret); 642 dev_err(dev, "failed to submit RX URB: %d\n", ret);
627 return ret; 643 return ret;
@@ -665,9 +681,8 @@ static const struct mfd_cell dln2_devs[] = {
665 }, 681 },
666}; 682};
667 683
668static void dln2_disconnect(struct usb_interface *interface) 684static void dln2_stop(struct dln2_dev *dln2)
669{ 685{
670 struct dln2_dev *dln2 = usb_get_intfdata(interface);
671 int i, j; 686 int i, j;
672 687
673 /* don't allow starting new transfers */ 688 /* don't allow starting new transfers */
@@ -696,6 +711,15 @@ static void dln2_disconnect(struct usb_interface *interface)
696 /* wait for transfers to end */ 711 /* wait for transfers to end */
697 wait_event(dln2->disconnect_wq, !dln2->active_transfers); 712 wait_event(dln2->disconnect_wq, !dln2->active_transfers);
698 713
714 dln2_stop_rx_urbs(dln2);
715}
716
717static void dln2_disconnect(struct usb_interface *interface)
718{
719 struct dln2_dev *dln2 = usb_get_intfdata(interface);
720
721 dln2_stop(dln2);
722
699 mfd_remove_devices(&interface->dev); 723 mfd_remove_devices(&interface->dev);
700 724
701 dln2_free(dln2); 725 dln2_free(dln2);
@@ -738,28 +762,53 @@ static int dln2_probe(struct usb_interface *interface,
738 762
739 ret = dln2_setup_rx_urbs(dln2, hostif); 763 ret = dln2_setup_rx_urbs(dln2, hostif);
740 if (ret) 764 if (ret)
741 goto out_cleanup; 765 goto out_free;
766
767 ret = dln2_start_rx_urbs(dln2, GFP_KERNEL);
768 if (ret)
769 goto out_stop_rx;
742 770
743 ret = dln2_hw_init(dln2); 771 ret = dln2_hw_init(dln2);
744 if (ret < 0) { 772 if (ret < 0) {
745 dev_err(dev, "failed to initialize hardware\n"); 773 dev_err(dev, "failed to initialize hardware\n");
746 goto out_cleanup; 774 goto out_stop_rx;
747 } 775 }
748 776
749 ret = mfd_add_hotplug_devices(dev, dln2_devs, ARRAY_SIZE(dln2_devs)); 777 ret = mfd_add_hotplug_devices(dev, dln2_devs, ARRAY_SIZE(dln2_devs));
750 if (ret != 0) { 778 if (ret != 0) {
751 dev_err(dev, "failed to add mfd devices to core\n"); 779 dev_err(dev, "failed to add mfd devices to core\n");
752 goto out_cleanup; 780 goto out_stop_rx;
753 } 781 }
754 782
755 return 0; 783 return 0;
756 784
757out_cleanup: 785out_stop_rx:
786 dln2_stop_rx_urbs(dln2);
787
788out_free:
758 dln2_free(dln2); 789 dln2_free(dln2);
759 790
760 return ret; 791 return ret;
761} 792}
762 793
794static int dln2_suspend(struct usb_interface *iface, pm_message_t message)
795{
796 struct dln2_dev *dln2 = usb_get_intfdata(iface);
797
798 dln2_stop(dln2);
799
800 return 0;
801}
802
803static int dln2_resume(struct usb_interface *iface)
804{
805 struct dln2_dev *dln2 = usb_get_intfdata(iface);
806
807 dln2->disconnect = false;
808
809 return dln2_start_rx_urbs(dln2, GFP_NOIO);
810}
811
763static const struct usb_device_id dln2_table[] = { 812static const struct usb_device_id dln2_table[] = {
764 { USB_DEVICE(0xa257, 0x2013) }, 813 { USB_DEVICE(0xa257, 0x2013) },
765 { } 814 { }
@@ -772,6 +821,8 @@ static struct usb_driver dln2_driver = {
772 .probe = dln2_probe, 821 .probe = dln2_probe,
773 .disconnect = dln2_disconnect, 822 .disconnect = dln2_disconnect,
774 .id_table = dln2_table, 823 .id_table = dln2_table,
824 .suspend = dln2_suspend,
825 .resume = dln2_resume,
775}; 826};
776 827
777module_usb_driver(dln2_driver); 828module_usb_driver(dln2_driver);
diff --git a/drivers/mfd/hi6421-pmic-core.c b/drivers/mfd/hi6421-pmic-core.c
index 321a2656fd00..7210ae28bf81 100644
--- a/drivers/mfd/hi6421-pmic-core.c
+++ b/drivers/mfd/hi6421-pmic-core.c
@@ -35,7 +35,7 @@ static const struct mfd_cell hi6421_devs[] = {
35 { .name = "hi6421-regulator", }, 35 { .name = "hi6421-regulator", },
36}; 36};
37 37
38static struct regmap_config hi6421_regmap_config = { 38static const struct regmap_config hi6421_regmap_config = {
39 .reg_bits = 32, 39 .reg_bits = 32,
40 .reg_stride = 4, 40 .reg_stride = 4,
41 .val_bits = 8, 41 .val_bits = 8,
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index df7b0642a5b4..80cef048b904 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -64,6 +64,9 @@ static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c,
64 config = (struct intel_soc_pmic_config *)id->driver_data; 64 config = (struct intel_soc_pmic_config *)id->driver_data;
65 65
66 pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL); 66 pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
67 if (!pmic)
68 return -ENOMEM;
69
67 dev_set_drvdata(dev, pmic); 70 dev_set_drvdata(dev, pmic);
68 71
69 pmic->regmap = devm_regmap_init_i2c(i2c, config->regmap_config); 72 pmic->regmap = devm_regmap_init_i2c(i2c, config->regmap_config);
diff --git a/drivers/mfd/intel_soc_pmic_core.h b/drivers/mfd/intel_soc_pmic_core.h
index 33aacd9baddc..9498d6719847 100644
--- a/drivers/mfd/intel_soc_pmic_core.h
+++ b/drivers/mfd/intel_soc_pmic_core.h
@@ -23,7 +23,7 @@ struct intel_soc_pmic_config {
23 unsigned long irq_flags; 23 unsigned long irq_flags;
24 struct mfd_cell *cell_dev; 24 struct mfd_cell *cell_dev;
25 int n_cell_devs; 25 int n_cell_devs;
26 struct regmap_config *regmap_config; 26 const struct regmap_config *regmap_config;
27 struct regmap_irq_chip *irq_chip; 27 struct regmap_irq_chip *irq_chip;
28}; 28};
29 29
diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c
index c85e2ecb868a..4cc1b324e971 100644
--- a/drivers/mfd/intel_soc_pmic_crc.c
+++ b/drivers/mfd/intel_soc_pmic_crc.c
@@ -111,7 +111,7 @@ static struct mfd_cell crystal_cove_dev[] = {
111 }, 111 },
112}; 112};
113 113
114static struct regmap_config crystal_cove_regmap_config = { 114static const struct regmap_config crystal_cove_regmap_config = {
115 .reg_bits = 8, 115 .reg_bits = 8,
116 .val_bits = 8, 116 .val_bits = 8,
117 117
diff --git a/drivers/mfd/lm3533-core.c b/drivers/mfd/lm3533-core.c
index 8c29f7b27324..d42fbb667d8c 100644
--- a/drivers/mfd/lm3533-core.c
+++ b/drivers/mfd/lm3533-core.c
@@ -583,7 +583,7 @@ static bool lm3533_precious_register(struct device *dev, unsigned int reg)
583 } 583 }
584} 584}
585 585
586static struct regmap_config regmap_config = { 586static const struct regmap_config regmap_config = {
587 .reg_bits = 8, 587 .reg_bits = 8,
588 .val_bits = 8, 588 .val_bits = 8,
589 .max_register = LM3533_REG_MAX, 589 .max_register = LM3533_REG_MAX,
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index 5c38df35a84d..a56e4ba5227b 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -75,6 +75,7 @@ static struct lpc_sch_info sch_chipset_info[] = {
75 [LPC_QUARK_X1000] = { 75 [LPC_QUARK_X1000] = {
76 .io_size_gpio = GPIO_IO_SIZE, 76 .io_size_gpio = GPIO_IO_SIZE,
77 .irq_gpio = GPIO_IRQ_QUARK_X1000, 77 .irq_gpio = GPIO_IRQ_QUARK_X1000,
78 .io_size_wdt = WDT_IO_SIZE,
78 }, 79 },
79}; 80};
80 81
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index 929795eae9fc..760d08d7923d 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -111,17 +111,17 @@ static bool max77802_is_volatile_reg(struct device *dev, unsigned int reg)
111 max77802_rtc_is_volatile_reg(dev, reg)); 111 max77802_rtc_is_volatile_reg(dev, reg));
112} 112}
113 113
114static struct regmap_config max77686_regmap_config = { 114static const struct regmap_config max77686_regmap_config = {
115 .reg_bits = 8, 115 .reg_bits = 8,
116 .val_bits = 8, 116 .val_bits = 8,
117}; 117};
118 118
119static struct regmap_config max77686_rtc_regmap_config = { 119static const struct regmap_config max77686_rtc_regmap_config = {
120 .reg_bits = 8, 120 .reg_bits = 8,
121 .val_bits = 8, 121 .val_bits = 8,
122}; 122};
123 123
124static struct regmap_config max77802_regmap_config = { 124static const struct regmap_config max77802_regmap_config = {
125 .reg_bits = 8, 125 .reg_bits = 8,
126 .val_bits = 8, 126 .val_bits = 8,
127 .writeable_reg = max77802_is_accessible_reg, 127 .writeable_reg = max77802_is_accessible_reg,
@@ -205,24 +205,10 @@ static const struct of_device_id max77686_pmic_dt_match[] = {
205 { }, 205 { },
206}; 206};
207 207
208static struct max77686_platform_data *max77686_i2c_parse_dt_pdata(struct device
209 *dev)
210{
211 struct max77686_platform_data *pd;
212
213 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
214 if (!pd)
215 return NULL;
216
217 dev->platform_data = pd;
218 return pd;
219}
220
221static int max77686_i2c_probe(struct i2c_client *i2c, 208static int max77686_i2c_probe(struct i2c_client *i2c,
222 const struct i2c_device_id *id) 209 const struct i2c_device_id *id)
223{ 210{
224 struct max77686_dev *max77686 = NULL; 211 struct max77686_dev *max77686 = NULL;
225 struct max77686_platform_data *pdata = dev_get_platdata(&i2c->dev);
226 const struct of_device_id *match; 212 const struct of_device_id *match;
227 unsigned int data; 213 unsigned int data;
228 int ret = 0; 214 int ret = 0;
@@ -233,14 +219,6 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
233 const struct mfd_cell *cells; 219 const struct mfd_cell *cells;
234 int n_devs; 220 int n_devs;
235 221
236 if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node && !pdata)
237 pdata = max77686_i2c_parse_dt_pdata(&i2c->dev);
238
239 if (!pdata) {
240 dev_err(&i2c->dev, "No platform data found.\n");
241 return -EINVAL;
242 }
243
244 max77686 = devm_kzalloc(&i2c->dev, 222 max77686 = devm_kzalloc(&i2c->dev,
245 sizeof(struct max77686_dev), GFP_KERNEL); 223 sizeof(struct max77686_dev), GFP_KERNEL);
246 if (!max77686) 224 if (!max77686)
@@ -259,7 +237,6 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
259 max77686->dev = &i2c->dev; 237 max77686->dev = &i2c->dev;
260 max77686->i2c = i2c; 238 max77686->i2c = i2c;
261 239
262 max77686->wakeup = pdata->wakeup;
263 max77686->irq = i2c->irq; 240 max77686->irq = i2c->irq;
264 241
265 if (max77686->type == TYPE_MAX77686) { 242 if (max77686->type == TYPE_MAX77686) {
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
index ae3addb153a2..68b844811566 100644
--- a/drivers/mfd/mc13xxx-i2c.c
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -46,7 +46,7 @@ static const struct of_device_id mc13xxx_dt_ids[] = {
46}; 46};
47MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids); 47MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
48 48
49static struct regmap_config mc13xxx_regmap_i2c_config = { 49static const struct regmap_config mc13xxx_regmap_i2c_config = {
50 .reg_bits = 8, 50 .reg_bits = 8,
51 .val_bits = 24, 51 .val_bits = 24,
52 52
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
index 702925e242c9..58a170e45d88 100644
--- a/drivers/mfd/mc13xxx-spi.c
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -48,7 +48,7 @@ static const struct of_device_id mc13xxx_dt_ids[] = {
48}; 48};
49MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids); 49MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
50 50
51static struct regmap_config mc13xxx_regmap_spi_config = { 51static const struct regmap_config mc13xxx_regmap_spi_config = {
52 .reg_bits = 7, 52 .reg_bits = 7,
53 .pad_bits = 1, 53 .pad_bits = 1,
54 .val_bits = 24, 54 .val_bits = 24,
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 04cd54dd507c..1d924d1533c0 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -129,16 +129,6 @@ static inline u32 usbhs_read(void __iomem *base, u32 reg)
129 return readl_relaxed(base + reg); 129 return readl_relaxed(base + reg);
130} 130}
131 131
132static inline void usbhs_writeb(void __iomem *base, u8 reg, u8 val)
133{
134 writeb_relaxed(val, base + reg);
135}
136
137static inline u8 usbhs_readb(void __iomem *base, u8 reg)
138{
139 return readb_relaxed(base + reg);
140}
141
142/*-------------------------------------------------------------------------*/ 132/*-------------------------------------------------------------------------*/
143 133
144/** 134/**
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 43664eb69c93..6155d123a84e 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -183,7 +183,7 @@ static int pcf50633_resume(struct device *dev)
183 183
184static SIMPLE_DEV_PM_OPS(pcf50633_pm, pcf50633_suspend, pcf50633_resume); 184static SIMPLE_DEV_PM_OPS(pcf50633_pm, pcf50633_suspend, pcf50633_resume);
185 185
186static struct regmap_config pcf50633_regmap_config = { 186static const struct regmap_config pcf50633_regmap_config = {
187 .reg_bits = 8, 187 .reg_bits = 8,
188 .val_bits = 8, 188 .val_bits = 8,
189}; 189};
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
new file mode 100644
index 000000000000..f696328c2933
--- /dev/null
+++ b/drivers/mfd/qcom_rpm.c
@@ -0,0 +1,581 @@
1/*
2 * Copyright (c) 2014, Sony Mobile Communications AB.
3 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
4 * Author: Bjorn Andersson <bjorn.andersson@sonymobile.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/of_platform.h>
19#include <linux/io.h>
20#include <linux/interrupt.h>
21#include <linux/mfd/qcom_rpm.h>
22#include <linux/mfd/syscon.h>
23#include <linux/regmap.h>
24
25#include <dt-bindings/mfd/qcom-rpm.h>
26
27struct qcom_rpm_resource {
28 unsigned target_id;
29 unsigned status_id;
30 unsigned select_id;
31 unsigned size;
32};
33
34struct qcom_rpm_data {
35 u32 version;
36 const struct qcom_rpm_resource *resource_table;
37 unsigned n_resources;
38};
39
40struct qcom_rpm {
41 struct device *dev;
42 struct regmap *ipc_regmap;
43 unsigned ipc_offset;
44 unsigned ipc_bit;
45
46 struct completion ack;
47 struct mutex lock;
48
49 void __iomem *status_regs;
50 void __iomem *ctrl_regs;
51 void __iomem *req_regs;
52
53 u32 ack_status;
54
55 const struct qcom_rpm_data *data;
56};
57
58#define RPM_STATUS_REG(rpm, i) ((rpm)->status_regs + (i) * 4)
59#define RPM_CTRL_REG(rpm, i) ((rpm)->ctrl_regs + (i) * 4)
60#define RPM_REQ_REG(rpm, i) ((rpm)->req_regs + (i) * 4)
61
62#define RPM_REQUEST_TIMEOUT (5 * HZ)
63
64#define RPM_REQUEST_CONTEXT 3
65#define RPM_REQ_SELECT 11
66#define RPM_ACK_CONTEXT 15
67#define RPM_ACK_SELECTOR 23
68#define RPM_SELECT_SIZE 7
69
70#define RPM_NOTIFICATION BIT(30)
71#define RPM_REJECTED BIT(31)
72
73#define RPM_SIGNAL BIT(2)
74
75static const struct qcom_rpm_resource apq8064_rpm_resource_table[] = {
76 [QCOM_RPM_CXO_CLK] = { 25, 9, 5, 1 },
77 [QCOM_RPM_PXO_CLK] = { 26, 10, 6, 1 },
78 [QCOM_RPM_APPS_FABRIC_CLK] = { 27, 11, 8, 1 },
79 [QCOM_RPM_SYS_FABRIC_CLK] = { 28, 12, 9, 1 },
80 [QCOM_RPM_MM_FABRIC_CLK] = { 29, 13, 10, 1 },
81 [QCOM_RPM_DAYTONA_FABRIC_CLK] = { 30, 14, 11, 1 },
82 [QCOM_RPM_SFPB_CLK] = { 31, 15, 12, 1 },
83 [QCOM_RPM_CFPB_CLK] = { 32, 16, 13, 1 },
84 [QCOM_RPM_MMFPB_CLK] = { 33, 17, 14, 1 },
85 [QCOM_RPM_EBI1_CLK] = { 34, 18, 16, 1 },
86 [QCOM_RPM_APPS_FABRIC_HALT] = { 35, 19, 18, 1 },
87 [QCOM_RPM_APPS_FABRIC_MODE] = { 37, 20, 19, 1 },
88 [QCOM_RPM_APPS_FABRIC_IOCTL] = { 40, 21, 20, 1 },
89 [QCOM_RPM_APPS_FABRIC_ARB] = { 41, 22, 21, 12 },
90 [QCOM_RPM_SYS_FABRIC_HALT] = { 53, 23, 22, 1 },
91 [QCOM_RPM_SYS_FABRIC_MODE] = { 55, 24, 23, 1 },
92 [QCOM_RPM_SYS_FABRIC_IOCTL] = { 58, 25, 24, 1 },
93 [QCOM_RPM_SYS_FABRIC_ARB] = { 59, 26, 25, 30 },
94 [QCOM_RPM_MM_FABRIC_HALT] = { 89, 27, 26, 1 },
95 [QCOM_RPM_MM_FABRIC_MODE] = { 91, 28, 27, 1 },
96 [QCOM_RPM_MM_FABRIC_IOCTL] = { 94, 29, 28, 1 },
97 [QCOM_RPM_MM_FABRIC_ARB] = { 95, 30, 29, 21 },
98 [QCOM_RPM_PM8921_SMPS1] = { 116, 31, 30, 2 },
99 [QCOM_RPM_PM8921_SMPS2] = { 118, 33, 31, 2 },
100 [QCOM_RPM_PM8921_SMPS3] = { 120, 35, 32, 2 },
101 [QCOM_RPM_PM8921_SMPS4] = { 122, 37, 33, 2 },
102 [QCOM_RPM_PM8921_SMPS5] = { 124, 39, 34, 2 },
103 [QCOM_RPM_PM8921_SMPS6] = { 126, 41, 35, 2 },
104 [QCOM_RPM_PM8921_SMPS7] = { 128, 43, 36, 2 },
105 [QCOM_RPM_PM8921_SMPS8] = { 130, 45, 37, 2 },
106 [QCOM_RPM_PM8921_LDO1] = { 132, 47, 38, 2 },
107 [QCOM_RPM_PM8921_LDO2] = { 134, 49, 39, 2 },
108 [QCOM_RPM_PM8921_LDO3] = { 136, 51, 40, 2 },
109 [QCOM_RPM_PM8921_LDO4] = { 138, 53, 41, 2 },
110 [QCOM_RPM_PM8921_LDO5] = { 140, 55, 42, 2 },
111 [QCOM_RPM_PM8921_LDO6] = { 142, 57, 43, 2 },
112 [QCOM_RPM_PM8921_LDO7] = { 144, 59, 44, 2 },
113 [QCOM_RPM_PM8921_LDO8] = { 146, 61, 45, 2 },
114 [QCOM_RPM_PM8921_LDO9] = { 148, 63, 46, 2 },
115 [QCOM_RPM_PM8921_LDO10] = { 150, 65, 47, 2 },
116 [QCOM_RPM_PM8921_LDO11] = { 152, 67, 48, 2 },
117 [QCOM_RPM_PM8921_LDO12] = { 154, 69, 49, 2 },
118 [QCOM_RPM_PM8921_LDO13] = { 156, 71, 50, 2 },
119 [QCOM_RPM_PM8921_LDO14] = { 158, 73, 51, 2 },
120 [QCOM_RPM_PM8921_LDO15] = { 160, 75, 52, 2 },
121 [QCOM_RPM_PM8921_LDO16] = { 162, 77, 53, 2 },
122 [QCOM_RPM_PM8921_LDO17] = { 164, 79, 54, 2 },
123 [QCOM_RPM_PM8921_LDO18] = { 166, 81, 55, 2 },
124 [QCOM_RPM_PM8921_LDO19] = { 168, 83, 56, 2 },
125 [QCOM_RPM_PM8921_LDO20] = { 170, 85, 57, 2 },
126 [QCOM_RPM_PM8921_LDO21] = { 172, 87, 58, 2 },
127 [QCOM_RPM_PM8921_LDO22] = { 174, 89, 59, 2 },
128 [QCOM_RPM_PM8921_LDO23] = { 176, 91, 60, 2 },
129 [QCOM_RPM_PM8921_LDO24] = { 178, 93, 61, 2 },
130 [QCOM_RPM_PM8921_LDO25] = { 180, 95, 62, 2 },
131 [QCOM_RPM_PM8921_LDO26] = { 182, 97, 63, 2 },
132 [QCOM_RPM_PM8921_LDO27] = { 184, 99, 64, 2 },
133 [QCOM_RPM_PM8921_LDO28] = { 186, 101, 65, 2 },
134 [QCOM_RPM_PM8921_LDO29] = { 188, 103, 66, 2 },
135 [QCOM_RPM_PM8921_CLK1] = { 190, 105, 67, 2 },
136 [QCOM_RPM_PM8921_CLK2] = { 192, 107, 68, 2 },
137 [QCOM_RPM_PM8921_LVS1] = { 194, 109, 69, 1 },
138 [QCOM_RPM_PM8921_LVS2] = { 195, 110, 70, 1 },
139 [QCOM_RPM_PM8921_LVS3] = { 196, 111, 71, 1 },
140 [QCOM_RPM_PM8921_LVS4] = { 197, 112, 72, 1 },
141 [QCOM_RPM_PM8921_LVS5] = { 198, 113, 73, 1 },
142 [QCOM_RPM_PM8921_LVS6] = { 199, 114, 74, 1 },
143 [QCOM_RPM_PM8921_LVS7] = { 200, 115, 75, 1 },
144 [QCOM_RPM_PM8821_SMPS1] = { 201, 116, 76, 2 },
145 [QCOM_RPM_PM8821_SMPS2] = { 203, 118, 77, 2 },
146 [QCOM_RPM_PM8821_LDO1] = { 205, 120, 78, 2 },
147 [QCOM_RPM_PM8921_NCP] = { 207, 122, 80, 2 },
148 [QCOM_RPM_CXO_BUFFERS] = { 209, 124, 81, 1 },
149 [QCOM_RPM_USB_OTG_SWITCH] = { 210, 125, 82, 1 },
150 [QCOM_RPM_HDMI_SWITCH] = { 211, 126, 83, 1 },
151 [QCOM_RPM_DDR_DMM] = { 212, 127, 84, 2 },
152 [QCOM_RPM_VDDMIN_GPIO] = { 215, 131, 89, 1 },
153};
154
155static const struct qcom_rpm_data apq8064_template = {
156 .version = 3,
157 .resource_table = apq8064_rpm_resource_table,
158 .n_resources = ARRAY_SIZE(apq8064_rpm_resource_table),
159};
160
161static const struct qcom_rpm_resource msm8660_rpm_resource_table[] = {
162 [QCOM_RPM_CXO_CLK] = { 32, 12, 5, 1 },
163 [QCOM_RPM_PXO_CLK] = { 33, 13, 6, 1 },
164 [QCOM_RPM_PLL_4] = { 34, 14, 7, 1 },
165 [QCOM_RPM_APPS_FABRIC_CLK] = { 35, 15, 8, 1 },
166 [QCOM_RPM_SYS_FABRIC_CLK] = { 36, 16, 9, 1 },
167 [QCOM_RPM_MM_FABRIC_CLK] = { 37, 17, 10, 1 },
168 [QCOM_RPM_DAYTONA_FABRIC_CLK] = { 38, 18, 11, 1 },
169 [QCOM_RPM_SFPB_CLK] = { 39, 19, 12, 1 },
170 [QCOM_RPM_CFPB_CLK] = { 40, 20, 13, 1 },
171 [QCOM_RPM_MMFPB_CLK] = { 41, 21, 14, 1 },
172 [QCOM_RPM_SMI_CLK] = { 42, 22, 15, 1 },
173 [QCOM_RPM_EBI1_CLK] = { 43, 23, 16, 1 },
174 [QCOM_RPM_APPS_L2_CACHE_CTL] = { 44, 24, 17, 1 },
175 [QCOM_RPM_APPS_FABRIC_HALT] = { 45, 25, 18, 2 },
176 [QCOM_RPM_APPS_FABRIC_MODE] = { 47, 26, 19, 3 },
177 [QCOM_RPM_APPS_FABRIC_ARB] = { 51, 28, 21, 6 },
178 [QCOM_RPM_SYS_FABRIC_HALT] = { 63, 29, 22, 2 },
179 [QCOM_RPM_SYS_FABRIC_MODE] = { 65, 30, 23, 3 },
180 [QCOM_RPM_SYS_FABRIC_ARB] = { 69, 32, 25, 22 },
181 [QCOM_RPM_MM_FABRIC_HALT] = { 105, 33, 26, 2 },
182 [QCOM_RPM_MM_FABRIC_MODE] = { 107, 34, 27, 3 },
183 [QCOM_RPM_MM_FABRIC_ARB] = { 111, 36, 29, 23 },
184 [QCOM_RPM_PM8901_SMPS0] = { 134, 37, 30, 2 },
185 [QCOM_RPM_PM8901_SMPS1] = { 136, 39, 31, 2 },
186 [QCOM_RPM_PM8901_SMPS2] = { 138, 41, 32, 2 },
187 [QCOM_RPM_PM8901_SMPS3] = { 140, 43, 33, 2 },
188 [QCOM_RPM_PM8901_SMPS4] = { 142, 45, 34, 2 },
189 [QCOM_RPM_PM8901_LDO0] = { 144, 47, 35, 2 },
190 [QCOM_RPM_PM8901_LDO1] = { 146, 49, 36, 2 },
191 [QCOM_RPM_PM8901_LDO2] = { 148, 51, 37, 2 },
192 [QCOM_RPM_PM8901_LDO3] = { 150, 53, 38, 2 },
193 [QCOM_RPM_PM8901_LDO4] = { 152, 55, 39, 2 },
194 [QCOM_RPM_PM8901_LDO5] = { 154, 57, 40, 2 },
195 [QCOM_RPM_PM8901_LDO6] = { 156, 59, 41, 2 },
196 [QCOM_RPM_PM8901_LVS0] = { 158, 61, 42, 1 },
197 [QCOM_RPM_PM8901_LVS1] = { 159, 62, 43, 1 },
198 [QCOM_RPM_PM8901_LVS2] = { 160, 63, 44, 1 },
199 [QCOM_RPM_PM8901_LVS3] = { 161, 64, 45, 1 },
200 [QCOM_RPM_PM8901_MVS] = { 162, 65, 46, 1 },
201 [QCOM_RPM_PM8058_SMPS0] = { 163, 66, 47, 2 },
202 [QCOM_RPM_PM8058_SMPS1] = { 165, 68, 48, 2 },
203 [QCOM_RPM_PM8058_SMPS2] = { 167, 70, 49, 2 },
204 [QCOM_RPM_PM8058_SMPS3] = { 169, 72, 50, 2 },
205 [QCOM_RPM_PM8058_SMPS4] = { 171, 74, 51, 2 },
206 [QCOM_RPM_PM8058_LDO0] = { 173, 76, 52, 2 },
207 [QCOM_RPM_PM8058_LDO1] = { 175, 78, 53, 2 },
208 [QCOM_RPM_PM8058_LDO2] = { 177, 80, 54, 2 },
209 [QCOM_RPM_PM8058_LDO3] = { 179, 82, 55, 2 },
210 [QCOM_RPM_PM8058_LDO4] = { 181, 84, 56, 2 },
211 [QCOM_RPM_PM8058_LDO5] = { 183, 86, 57, 2 },
212 [QCOM_RPM_PM8058_LDO6] = { 185, 88, 58, 2 },
213 [QCOM_RPM_PM8058_LDO7] = { 187, 90, 59, 2 },
214 [QCOM_RPM_PM8058_LDO8] = { 189, 92, 60, 2 },
215 [QCOM_RPM_PM8058_LDO9] = { 191, 94, 61, 2 },
216 [QCOM_RPM_PM8058_LDO10] = { 193, 96, 62, 2 },
217 [QCOM_RPM_PM8058_LDO11] = { 195, 98, 63, 2 },
218 [QCOM_RPM_PM8058_LDO12] = { 197, 100, 64, 2 },
219 [QCOM_RPM_PM8058_LDO13] = { 199, 102, 65, 2 },
220 [QCOM_RPM_PM8058_LDO14] = { 201, 104, 66, 2 },
221 [QCOM_RPM_PM8058_LDO15] = { 203, 106, 67, 2 },
222 [QCOM_RPM_PM8058_LDO16] = { 205, 108, 68, 2 },
223 [QCOM_RPM_PM8058_LDO17] = { 207, 110, 69, 2 },
224 [QCOM_RPM_PM8058_LDO18] = { 209, 112, 70, 2 },
225 [QCOM_RPM_PM8058_LDO19] = { 211, 114, 71, 2 },
226 [QCOM_RPM_PM8058_LDO20] = { 213, 116, 72, 2 },
227 [QCOM_RPM_PM8058_LDO21] = { 215, 118, 73, 2 },
228 [QCOM_RPM_PM8058_LDO22] = { 217, 120, 74, 2 },
229 [QCOM_RPM_PM8058_LDO23] = { 219, 122, 75, 2 },
230 [QCOM_RPM_PM8058_LDO24] = { 221, 124, 76, 2 },
231 [QCOM_RPM_PM8058_LDO25] = { 223, 126, 77, 2 },
232 [QCOM_RPM_PM8058_LVS0] = { 225, 128, 78, 1 },
233 [QCOM_RPM_PM8058_LVS1] = { 226, 129, 79, 1 },
234 [QCOM_RPM_PM8058_NCP] = { 227, 130, 80, 2 },
235 [QCOM_RPM_CXO_BUFFERS] = { 229, 132, 81, 1 },
236};
237
238static const struct qcom_rpm_data msm8660_template = {
239 .version = 2,
240 .resource_table = msm8660_rpm_resource_table,
241 .n_resources = ARRAY_SIZE(msm8660_rpm_resource_table),
242};
243
244static const struct qcom_rpm_resource msm8960_rpm_resource_table[] = {
245 [QCOM_RPM_CXO_CLK] = { 25, 9, 5, 1 },
246 [QCOM_RPM_PXO_CLK] = { 26, 10, 6, 1 },
247 [QCOM_RPM_APPS_FABRIC_CLK] = { 27, 11, 8, 1 },
248 [QCOM_RPM_SYS_FABRIC_CLK] = { 28, 12, 9, 1 },
249 [QCOM_RPM_MM_FABRIC_CLK] = { 29, 13, 10, 1 },
250 [QCOM_RPM_DAYTONA_FABRIC_CLK] = { 30, 14, 11, 1 },
251 [QCOM_RPM_SFPB_CLK] = { 31, 15, 12, 1 },
252 [QCOM_RPM_CFPB_CLK] = { 32, 16, 13, 1 },
253 [QCOM_RPM_MMFPB_CLK] = { 33, 17, 14, 1 },
254 [QCOM_RPM_EBI1_CLK] = { 34, 18, 16, 1 },
255 [QCOM_RPM_APPS_FABRIC_HALT] = { 35, 19, 18, 1 },
256 [QCOM_RPM_APPS_FABRIC_MODE] = { 37, 20, 19, 1 },
257 [QCOM_RPM_APPS_FABRIC_IOCTL] = { 40, 21, 20, 1 },
258 [QCOM_RPM_APPS_FABRIC_ARB] = { 41, 22, 21, 12 },
259 [QCOM_RPM_SYS_FABRIC_HALT] = { 53, 23, 22, 1 },
260 [QCOM_RPM_SYS_FABRIC_MODE] = { 55, 24, 23, 1 },
261 [QCOM_RPM_SYS_FABRIC_IOCTL] = { 58, 25, 24, 1 },
262 [QCOM_RPM_SYS_FABRIC_ARB] = { 59, 26, 25, 29 },
263 [QCOM_RPM_MM_FABRIC_HALT] = { 88, 27, 26, 1 },
264 [QCOM_RPM_MM_FABRIC_MODE] = { 90, 28, 27, 1 },
265 [QCOM_RPM_MM_FABRIC_IOCTL] = { 93, 29, 28, 1 },
266 [QCOM_RPM_MM_FABRIC_ARB] = { 94, 30, 29, 23 },
267 [QCOM_RPM_PM8921_SMPS1] = { 117, 31, 30, 2 },
268 [QCOM_RPM_PM8921_SMPS2] = { 119, 33, 31, 2 },
269 [QCOM_RPM_PM8921_SMPS3] = { 121, 35, 32, 2 },
270 [QCOM_RPM_PM8921_SMPS4] = { 123, 37, 33, 2 },
271 [QCOM_RPM_PM8921_SMPS5] = { 125, 39, 34, 2 },
272 [QCOM_RPM_PM8921_SMPS6] = { 127, 41, 35, 2 },
273 [QCOM_RPM_PM8921_SMPS7] = { 129, 43, 36, 2 },
274 [QCOM_RPM_PM8921_SMPS8] = { 131, 45, 37, 2 },
275 [QCOM_RPM_PM8921_LDO1] = { 133, 47, 38, 2 },
276 [QCOM_RPM_PM8921_LDO2] = { 135, 49, 39, 2 },
277 [QCOM_RPM_PM8921_LDO3] = { 137, 51, 40, 2 },
278 [QCOM_RPM_PM8921_LDO4] = { 139, 53, 41, 2 },
279 [QCOM_RPM_PM8921_LDO5] = { 141, 55, 42, 2 },
280 [QCOM_RPM_PM8921_LDO6] = { 143, 57, 43, 2 },
281 [QCOM_RPM_PM8921_LDO7] = { 145, 59, 44, 2 },
282 [QCOM_RPM_PM8921_LDO8] = { 147, 61, 45, 2 },
283 [QCOM_RPM_PM8921_LDO9] = { 149, 63, 46, 2 },
284 [QCOM_RPM_PM8921_LDO10] = { 151, 65, 47, 2 },
285 [QCOM_RPM_PM8921_LDO11] = { 153, 67, 48, 2 },
286 [QCOM_RPM_PM8921_LDO12] = { 155, 69, 49, 2 },
287 [QCOM_RPM_PM8921_LDO13] = { 157, 71, 50, 2 },
288 [QCOM_RPM_PM8921_LDO14] = { 159, 73, 51, 2 },
289 [QCOM_RPM_PM8921_LDO15] = { 161, 75, 52, 2 },
290 [QCOM_RPM_PM8921_LDO16] = { 163, 77, 53, 2 },
291 [QCOM_RPM_PM8921_LDO17] = { 165, 79, 54, 2 },
292 [QCOM_RPM_PM8921_LDO18] = { 167, 81, 55, 2 },
293 [QCOM_RPM_PM8921_LDO19] = { 169, 83, 56, 2 },
294 [QCOM_RPM_PM8921_LDO20] = { 171, 85, 57, 2 },
295 [QCOM_RPM_PM8921_LDO21] = { 173, 87, 58, 2 },
296 [QCOM_RPM_PM8921_LDO22] = { 175, 89, 59, 2 },
297 [QCOM_RPM_PM8921_LDO23] = { 177, 91, 60, 2 },
298 [QCOM_RPM_PM8921_LDO24] = { 179, 93, 61, 2 },
299 [QCOM_RPM_PM8921_LDO25] = { 181, 95, 62, 2 },
300 [QCOM_RPM_PM8921_LDO26] = { 183, 97, 63, 2 },
301 [QCOM_RPM_PM8921_LDO27] = { 185, 99, 64, 2 },
302 [QCOM_RPM_PM8921_LDO28] = { 187, 101, 65, 2 },
303 [QCOM_RPM_PM8921_LDO29] = { 189, 103, 66, 2 },
304 [QCOM_RPM_PM8921_CLK1] = { 191, 105, 67, 2 },
305 [QCOM_RPM_PM8921_CLK2] = { 193, 107, 68, 2 },
306 [QCOM_RPM_PM8921_LVS1] = { 195, 109, 69, 1 },
307 [QCOM_RPM_PM8921_LVS2] = { 196, 110, 70, 1 },
308 [QCOM_RPM_PM8921_LVS3] = { 197, 111, 71, 1 },
309 [QCOM_RPM_PM8921_LVS4] = { 198, 112, 72, 1 },
310 [QCOM_RPM_PM8921_LVS5] = { 199, 113, 73, 1 },
311 [QCOM_RPM_PM8921_LVS6] = { 200, 114, 74, 1 },
312 [QCOM_RPM_PM8921_LVS7] = { 201, 115, 75, 1 },
313 [QCOM_RPM_PM8921_NCP] = { 202, 116, 80, 2 },
314 [QCOM_RPM_CXO_BUFFERS] = { 204, 118, 81, 1 },
315 [QCOM_RPM_USB_OTG_SWITCH] = { 205, 119, 82, 1 },
316 [QCOM_RPM_HDMI_SWITCH] = { 206, 120, 83, 1 },
317 [QCOM_RPM_DDR_DMM] = { 207, 121, 84, 2 },
318};
319
320static const struct qcom_rpm_data msm8960_template = {
321 .version = 3,
322 .resource_table = msm8960_rpm_resource_table,
323 .n_resources = ARRAY_SIZE(msm8960_rpm_resource_table),
324};
325
326static const struct of_device_id qcom_rpm_of_match[] = {
327 { .compatible = "qcom,rpm-apq8064", .data = &apq8064_template },
328 { .compatible = "qcom,rpm-msm8660", .data = &msm8660_template },
329 { .compatible = "qcom,rpm-msm8960", .data = &msm8960_template },
330 { }
331};
332MODULE_DEVICE_TABLE(of, qcom_rpm_of_match);
333
334int qcom_rpm_write(struct qcom_rpm *rpm,
335 int state,
336 int resource,
337 u32 *buf, size_t count)
338{
339 const struct qcom_rpm_resource *res;
340 const struct qcom_rpm_data *data = rpm->data;
341 u32 sel_mask[RPM_SELECT_SIZE] = { 0 };
342 int left;
343 int ret = 0;
344 int i;
345
346 if (WARN_ON(resource < 0 || resource >= data->n_resources))
347 return -EINVAL;
348
349 res = &data->resource_table[resource];
350 if (WARN_ON(res->size != count))
351 return -EINVAL;
352
353 mutex_lock(&rpm->lock);
354
355 for (i = 0; i < res->size; i++)
356 writel_relaxed(buf[i], RPM_REQ_REG(rpm, res->target_id + i));
357
358 bitmap_set((unsigned long *)sel_mask, res->select_id, 1);
359 for (i = 0; i < ARRAY_SIZE(sel_mask); i++) {
360 writel_relaxed(sel_mask[i],
361 RPM_CTRL_REG(rpm, RPM_REQ_SELECT + i));
362 }
363
364 writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, RPM_REQUEST_CONTEXT));
365
366 reinit_completion(&rpm->ack);
367 regmap_write(rpm->ipc_regmap, rpm->ipc_offset, BIT(rpm->ipc_bit));
368
369 left = wait_for_completion_timeout(&rpm->ack, RPM_REQUEST_TIMEOUT);
370 if (!left)
371 ret = -ETIMEDOUT;
372 else if (rpm->ack_status & RPM_REJECTED)
373 ret = -EIO;
374
375 mutex_unlock(&rpm->lock);
376
377 return ret;
378}
379EXPORT_SYMBOL(qcom_rpm_write);
380
381static irqreturn_t qcom_rpm_ack_interrupt(int irq, void *dev)
382{
383 struct qcom_rpm *rpm = dev;
384 u32 ack;
385 int i;
386
387 ack = readl_relaxed(RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
388 for (i = 0; i < RPM_SELECT_SIZE; i++)
389 writel_relaxed(0, RPM_CTRL_REG(rpm, RPM_ACK_SELECTOR + i));
390 writel(0, RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
391
392 if (ack & RPM_NOTIFICATION) {
393 dev_warn(rpm->dev, "ignoring notification!\n");
394 } else {
395 rpm->ack_status = ack;
396 complete(&rpm->ack);
397 }
398
399 return IRQ_HANDLED;
400}
401
402static irqreturn_t qcom_rpm_err_interrupt(int irq, void *dev)
403{
404 struct qcom_rpm *rpm = dev;
405
406 regmap_write(rpm->ipc_regmap, rpm->ipc_offset, BIT(rpm->ipc_bit));
407 dev_err(rpm->dev, "RPM triggered fatal error\n");
408
409 return IRQ_HANDLED;
410}
411
412static irqreturn_t qcom_rpm_wakeup_interrupt(int irq, void *dev)
413{
414 return IRQ_HANDLED;
415}
416
417static int qcom_rpm_probe(struct platform_device *pdev)
418{
419 const struct of_device_id *match;
420 struct device_node *syscon_np;
421 struct resource *res;
422 struct qcom_rpm *rpm;
423 u32 fw_version[3];
424 int irq_wakeup;
425 int irq_ack;
426 int irq_err;
427 int ret;
428
429 rpm = devm_kzalloc(&pdev->dev, sizeof(*rpm), GFP_KERNEL);
430 if (!rpm)
431 return -ENOMEM;
432
433 rpm->dev = &pdev->dev;
434 mutex_init(&rpm->lock);
435 init_completion(&rpm->ack);
436
437 irq_ack = platform_get_irq_byname(pdev, "ack");
438 if (irq_ack < 0) {
439 dev_err(&pdev->dev, "required ack interrupt missing\n");
440 return irq_ack;
441 }
442
443 irq_err = platform_get_irq_byname(pdev, "err");
444 if (irq_err < 0) {
445 dev_err(&pdev->dev, "required err interrupt missing\n");
446 return irq_err;
447 }
448
449 irq_wakeup = platform_get_irq_byname(pdev, "wakeup");
450 if (irq_wakeup < 0) {
451 dev_err(&pdev->dev, "required wakeup interrupt missing\n");
452 return irq_wakeup;
453 }
454
455 match = of_match_device(qcom_rpm_of_match, &pdev->dev);
456 rpm->data = match->data;
457
458 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
459 rpm->status_regs = devm_ioremap_resource(&pdev->dev, res);
460 if (IS_ERR(rpm->status_regs))
461 return PTR_ERR(rpm->status_regs);
462 rpm->ctrl_regs = rpm->status_regs + 0x400;
463 rpm->req_regs = rpm->status_regs + 0x600;
464
465 syscon_np = of_parse_phandle(pdev->dev.of_node, "qcom,ipc", 0);
466 if (!syscon_np) {
467 dev_err(&pdev->dev, "no qcom,ipc node\n");
468 return -ENODEV;
469 }
470
471 rpm->ipc_regmap = syscon_node_to_regmap(syscon_np);
472 if (IS_ERR(rpm->ipc_regmap))
473 return PTR_ERR(rpm->ipc_regmap);
474
475 ret = of_property_read_u32_index(pdev->dev.of_node, "qcom,ipc", 1,
476 &rpm->ipc_offset);
477 if (ret < 0) {
478 dev_err(&pdev->dev, "no offset in qcom,ipc\n");
479 return -EINVAL;
480 }
481
482 ret = of_property_read_u32_index(pdev->dev.of_node, "qcom,ipc", 2,
483 &rpm->ipc_bit);
484 if (ret < 0) {
485 dev_err(&pdev->dev, "no bit in qcom,ipc\n");
486 return -EINVAL;
487 }
488
489 dev_set_drvdata(&pdev->dev, rpm);
490
491 fw_version[0] = readl(RPM_STATUS_REG(rpm, 0));
492 fw_version[1] = readl(RPM_STATUS_REG(rpm, 1));
493 fw_version[2] = readl(RPM_STATUS_REG(rpm, 2));
494 if (fw_version[0] != rpm->data->version) {
495 dev_err(&pdev->dev,
496 "RPM version %u.%u.%u incompatible with driver version %u",
497 fw_version[0],
498 fw_version[1],
499 fw_version[2],
500 rpm->data->version);
501 return -EFAULT;
502 }
503
504 dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0],
505 fw_version[1],
506 fw_version[2]);
507
508 ret = devm_request_irq(&pdev->dev,
509 irq_ack,
510 qcom_rpm_ack_interrupt,
511 IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND,
512 "qcom_rpm_ack",
513 rpm);
514 if (ret) {
515 dev_err(&pdev->dev, "failed to request ack interrupt\n");
516 return ret;
517 }
518
519 ret = irq_set_irq_wake(irq_ack, 1);
520 if (ret)
521 dev_warn(&pdev->dev, "failed to mark ack irq as wakeup\n");
522
523 ret = devm_request_irq(&pdev->dev,
524 irq_err,
525 qcom_rpm_err_interrupt,
526 IRQF_TRIGGER_RISING,
527 "qcom_rpm_err",
528 rpm);
529 if (ret) {
530 dev_err(&pdev->dev, "failed to request err interrupt\n");
531 return ret;
532 }
533
534 ret = devm_request_irq(&pdev->dev,
535 irq_wakeup,
536 qcom_rpm_wakeup_interrupt,
537 IRQF_TRIGGER_RISING,
538 "qcom_rpm_wakeup",
539 rpm);
540 if (ret) {
541 dev_err(&pdev->dev, "failed to request wakeup interrupt\n");
542 return ret;
543 }
544
545 ret = irq_set_irq_wake(irq_wakeup, 1);
546 if (ret)
547 dev_warn(&pdev->dev, "failed to mark wakeup irq as wakeup\n");
548
549 return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
550}
551
552static int qcom_rpm_remove(struct platform_device *pdev)
553{
554 of_platform_depopulate(&pdev->dev);
555 return 0;
556}
557
558static struct platform_driver qcom_rpm_driver = {
559 .probe = qcom_rpm_probe,
560 .remove = qcom_rpm_remove,
561 .driver = {
562 .name = "qcom_rpm",
563 .of_match_table = qcom_rpm_of_match,
564 },
565};
566
567static int __init qcom_rpm_init(void)
568{
569 return platform_driver_register(&qcom_rpm_driver);
570}
571arch_initcall(qcom_rpm_init);
572
573static void __exit qcom_rpm_exit(void)
574{
575 platform_driver_unregister(&qcom_rpm_driver);
576}
577module_exit(qcom_rpm_exit)
578
579MODULE_DESCRIPTION("Qualcomm Resource Power Manager driver");
580MODULE_LICENSE("GPL v2");
581MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c
index 663f8a37aa6b..2d64430c719b 100644
--- a/drivers/mfd/retu-mfd.c
+++ b/drivers/mfd/retu-mfd.c
@@ -222,7 +222,7 @@ static struct regmap_bus retu_bus = {
222 .val_format_endian_default = REGMAP_ENDIAN_NATIVE, 222 .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
223}; 223};
224 224
225static struct regmap_config retu_config = { 225static const struct regmap_config retu_config = {
226 .reg_bits = 8, 226 .reg_bits = 8,
227 .val_bits = 16, 227 .val_bits = 16,
228}; 228};
diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c
new file mode 100644
index 000000000000..db395a6c52bc
--- /dev/null
+++ b/drivers/mfd/rt5033.c
@@ -0,0 +1,142 @@
1/*
2 * MFD core driver for the Richtek RT5033.
3 *
4 * RT5033 comprises multiple sub-devices switcing charger, fuel gauge,
5 * flash LED, current source, LDO and BUCK regulators.
6 *
7 * Copyright (C) 2014 Samsung Electronics, Co., Ltd.
8 * Author: Beomho Seo <beomho.seo@samsung.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published bythe Free Software Foundation.
13 */
14
15#include <linux/err.h>
16#include <linux/module.h>
17#include <linux/interrupt.h>
18#include <linux/of_device.h>
19#include <linux/mfd/core.h>
20#include <linux/mfd/rt5033.h>
21#include <linux/mfd/rt5033-private.h>
22
23static const struct regmap_irq rt5033_irqs[] = {
24 { .mask = RT5033_PMIC_IRQ_BUCKOCP, },
25 { .mask = RT5033_PMIC_IRQ_BUCKLV, },
26 { .mask = RT5033_PMIC_IRQ_SAFELDOLV, },
27 { .mask = RT5033_PMIC_IRQ_LDOLV, },
28 { .mask = RT5033_PMIC_IRQ_OT, },
29 { .mask = RT5033_PMIC_IRQ_VDDA_UV, },
30};
31
32static const struct regmap_irq_chip rt5033_irq_chip = {
33 .name = "rt5033",
34 .status_base = RT5033_REG_PMIC_IRQ_STAT,
35 .mask_base = RT5033_REG_PMIC_IRQ_CTRL,
36 .mask_invert = true,
37 .num_regs = 1,
38 .irqs = rt5033_irqs,
39 .num_irqs = ARRAY_SIZE(rt5033_irqs),
40};
41
42static const struct mfd_cell rt5033_devs[] = {
43 { .name = "rt5033-regulator", },
44 {
45 .name = "rt5033-charger",
46 .of_compatible = "richtek,rt5033-charger",
47 }, {
48 .name = "rt5033-battery",
49 .of_compatible = "richtek,rt5033-battery",
50 },
51};
52
53static const struct regmap_config rt5033_regmap_config = {
54 .reg_bits = 8,
55 .val_bits = 8,
56 .max_register = RT5033_REG_END,
57};
58
59static int rt5033_i2c_probe(struct i2c_client *i2c,
60 const struct i2c_device_id *id)
61{
62 struct rt5033_dev *rt5033;
63 unsigned int dev_id;
64 int ret;
65
66 rt5033 = devm_kzalloc(&i2c->dev, sizeof(*rt5033), GFP_KERNEL);
67 if (!rt5033)
68 return -ENOMEM;
69
70 i2c_set_clientdata(i2c, rt5033);
71 rt5033->dev = &i2c->dev;
72 rt5033->irq = i2c->irq;
73 rt5033->wakeup = true;
74
75 rt5033->regmap = devm_regmap_init_i2c(i2c, &rt5033_regmap_config);
76 if (IS_ERR(rt5033->regmap)) {
77 dev_err(&i2c->dev, "Failed to allocate register map.\n");
78 return PTR_ERR(rt5033->regmap);
79 }
80
81 ret = regmap_read(rt5033->regmap, RT5033_REG_DEVICE_ID, &dev_id);
82 if (ret) {
83 dev_err(&i2c->dev, "Device not found\n");
84 return -ENODEV;
85 }
86 dev_info(&i2c->dev, "Device found Device ID: %04x\n", dev_id);
87
88 ret = regmap_add_irq_chip(rt5033->regmap, rt5033->irq,
89 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
90 0, &rt5033_irq_chip, &rt5033->irq_data);
91 if (ret) {
92 dev_err(&i2c->dev, "Failed to request IRQ %d: %d\n",
93 rt5033->irq, ret);
94 return ret;
95 }
96
97 ret = mfd_add_devices(rt5033->dev, -1, rt5033_devs,
98 ARRAY_SIZE(rt5033_devs), NULL, 0,
99 regmap_irq_get_domain(rt5033->irq_data));
100 if (ret < 0) {
101 dev_err(&i2c->dev, "Failed to add RT5033 child devices.\n");
102 return ret;
103 }
104
105 device_init_wakeup(rt5033->dev, rt5033->wakeup);
106
107 return 0;
108}
109
110static int rt5033_i2c_remove(struct i2c_client *i2c)
111{
112 mfd_remove_devices(&i2c->dev);
113
114 return 0;
115}
116
117static const struct i2c_device_id rt5033_i2c_id[] = {
118 { "rt5033", },
119 { }
120};
121MODULE_DEVICE_TABLE(i2c, rt5033_i2c_id);
122
123static const struct of_device_id rt5033_dt_match[] = {
124 { .compatible = "richtek,rt5033", },
125 { }
126};
127
128static struct i2c_driver rt5033_driver = {
129 .driver = {
130 .name = "rt5033",
131 .of_match_table = of_match_ptr(rt5033_dt_match),
132 },
133 .probe = rt5033_i2c_probe,
134 .remove = rt5033_i2c_remove,
135 .id_table = rt5033_i2c_id,
136};
137module_i2c_driver(rt5033_driver);
138
139MODULE_ALIAS("i2c:rt5033");
140MODULE_DESCRIPTION("Richtek RT5033 multi-function core driver");
141MODULE_AUTHOR("Beomho Seo <beomho.seo@samsung.com>");
142MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
index 210d1f85679e..ede50244f265 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/mfd/rtsx_usb.c
@@ -681,9 +681,27 @@ static void rtsx_usb_disconnect(struct usb_interface *intf)
681#ifdef CONFIG_PM 681#ifdef CONFIG_PM
682static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message) 682static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message)
683{ 683{
684 struct rtsx_ucr *ucr =
685 (struct rtsx_ucr *)usb_get_intfdata(intf);
686 u16 val = 0;
687
684 dev_dbg(&intf->dev, "%s called with pm message 0x%04x\n", 688 dev_dbg(&intf->dev, "%s called with pm message 0x%04x\n",
685 __func__, message.event); 689 __func__, message.event);
686 690
691 if (PMSG_IS_AUTO(message)) {
692 if (mutex_trylock(&ucr->dev_mutex)) {
693 rtsx_usb_get_card_status(ucr, &val);
694 mutex_unlock(&ucr->dev_mutex);
695
696 /* Defer the autosuspend if card exists */
697 if (val & (SD_CD | MS_CD))
698 return -EAGAIN;
699 } else {
700 /* There is an ongoing operation*/
701 return -EAGAIN;
702 }
703 }
704
687 return 0; 705 return 0;
688} 706}
689 707
diff --git a/drivers/mfd/smsc-ece1099.c b/drivers/mfd/smsc-ece1099.c
index 90112d4cc905..03246880d484 100644
--- a/drivers/mfd/smsc-ece1099.c
+++ b/drivers/mfd/smsc-ece1099.c
@@ -24,7 +24,7 @@
24#include <linux/mfd/smsc.h> 24#include <linux/mfd/smsc.h>
25#include <linux/of_platform.h> 25#include <linux/of_platform.h>
26 26
27static struct regmap_config smsc_regmap_config = { 27static const struct regmap_config smsc_regmap_config = {
28 .reg_bits = 8, 28 .reg_bits = 8,
29 .val_bits = 8, 29 .val_bits = 8,
30 .max_register = SMSC_VEN_ID_H, 30 .max_register = SMSC_VEN_ID_H,
diff --git a/drivers/mfd/sun6i-prcm.c b/drivers/mfd/sun6i-prcm.c
index 2f2e9f062571..191173166d65 100644
--- a/drivers/mfd/sun6i-prcm.c
+++ b/drivers/mfd/sun6i-prcm.c
@@ -41,6 +41,14 @@ static const struct resource sun6i_a31_apb0_gates_clk_res[] = {
41 }, 41 },
42}; 42};
43 43
44static const struct resource sun6i_a31_ir_clk_res[] = {
45 {
46 .start = 0x54,
47 .end = 0x57,
48 .flags = IORESOURCE_MEM,
49 },
50};
51
44static const struct resource sun6i_a31_apb0_rstc_res[] = { 52static const struct resource sun6i_a31_apb0_rstc_res[] = {
45 { 53 {
46 .start = 0xb0, 54 .start = 0xb0,
@@ -69,6 +77,12 @@ static const struct mfd_cell sun6i_a31_prcm_subdevs[] = {
69 .resources = sun6i_a31_apb0_gates_clk_res, 77 .resources = sun6i_a31_apb0_gates_clk_res,
70 }, 78 },
71 { 79 {
80 .name = "sun6i-a31-ir-clk",
81 .of_compatible = "allwinner,sun4i-a10-mod0-clk",
82 .num_resources = ARRAY_SIZE(sun6i_a31_ir_clk_res),
83 .resources = sun6i_a31_ir_clk_res,
84 },
85 {
72 .name = "sun6i-a31-apb0-clock-reset", 86 .name = "sun6i-a31-apb0-clock-reset",
73 .of_compatible = "allwinner,sun6i-a31-clock-reset", 87 .of_compatible = "allwinner,sun6i-a31-clock-reset",
74 .num_resources = ARRAY_SIZE(sun6i_a31_apb0_rstc_res), 88 .num_resources = ARRAY_SIZE(sun6i_a31_apb0_rstc_res),
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 80a919a8ca97..7d1cfc1d3ce0 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -145,7 +145,7 @@ int tps65217_clear_bits(struct tps65217 *tps, unsigned int reg,
145} 145}
146EXPORT_SYMBOL_GPL(tps65217_clear_bits); 146EXPORT_SYMBOL_GPL(tps65217_clear_bits);
147 147
148static struct regmap_config tps65217_regmap_config = { 148static const struct regmap_config tps65217_regmap_config = {
149 .reg_bits = 8, 149 .reg_bits = 8,
150 .val_bits = 8, 150 .val_bits = 8,
151 151
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index d6b764349f9d..7af11a8b9753 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -135,7 +135,7 @@ static const struct regmap_access_table tps65218_volatile_table = {
135 .n_yes_ranges = ARRAY_SIZE(tps65218_yes_ranges), 135 .n_yes_ranges = ARRAY_SIZE(tps65218_yes_ranges),
136}; 136};
137 137
138static struct regmap_config tps65218_regmap_config = { 138static const struct regmap_config tps65218_regmap_config = {
139 .reg_bits = 8, 139 .reg_bits = 8,
140 .val_bits = 8, 140 .val_bits = 8,
141 .cache_type = REGCACHE_RBTREE, 141 .cache_type = REGCACHE_RBTREE,
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index db11b4f40611..489674a2497e 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -207,7 +207,7 @@ static struct twl_mapping twl4030_map[] = {
207 { 2, TWL5031_BASEADD_INTERRUPTS }, 207 { 2, TWL5031_BASEADD_INTERRUPTS },
208}; 208};
209 209
210static struct reg_default twl4030_49_defaults[] = { 210static const struct reg_default twl4030_49_defaults[] = {
211 /* Audio Registers */ 211 /* Audio Registers */
212 { 0x01, 0x00}, /* CODEC_MODE */ 212 { 0x01, 0x00}, /* CODEC_MODE */
213 { 0x02, 0x00}, /* OPTION */ 213 { 0x02, 0x00}, /* OPTION */
@@ -306,7 +306,7 @@ static const struct regmap_access_table twl4030_49_volatile_table = {
306 .n_yes_ranges = ARRAY_SIZE(twl4030_49_volatile_ranges), 306 .n_yes_ranges = ARRAY_SIZE(twl4030_49_volatile_ranges),
307}; 307};
308 308
309static struct regmap_config twl4030_regmap_config[4] = { 309static const struct regmap_config twl4030_regmap_config[4] = {
310 { 310 {
311 /* Address 0x48 */ 311 /* Address 0x48 */
312 .reg_bits = 8, 312 .reg_bits = 8,
@@ -369,7 +369,7 @@ static struct twl_mapping twl6030_map[] = {
369 { 1, TWL6030_BASEADD_GASGAUGE }, 369 { 1, TWL6030_BASEADD_GASGAUGE },
370}; 370};
371 371
372static struct regmap_config twl6030_regmap_config[3] = { 372static const struct regmap_config twl6030_regmap_config[3] = {
373 { 373 {
374 /* Address 0x48 */ 374 /* Address 0x48 */
375 .reg_bits = 8, 375 .reg_bits = 8,
@@ -1087,7 +1087,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
1087 struct twl4030_platform_data *pdata = dev_get_platdata(&client->dev); 1087 struct twl4030_platform_data *pdata = dev_get_platdata(&client->dev);
1088 struct device_node *node = client->dev.of_node; 1088 struct device_node *node = client->dev.of_node;
1089 struct platform_device *pdev; 1089 struct platform_device *pdev;
1090 struct regmap_config *twl_regmap_config; 1090 const struct regmap_config *twl_regmap_config;
1091 int irq_base = 0; 1091 int irq_base = 0;
1092 int status; 1092 int status;
1093 unsigned i, num_slaves; 1093 unsigned i, num_slaves;
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index 9687645162ae..f71ee3dbc2a2 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -44,7 +44,7 @@
44#define VIBRACTRL_MEMBER(reg) ((reg == TWL6040_REG_VIBCTLL) ? 0 : 1) 44#define VIBRACTRL_MEMBER(reg) ((reg == TWL6040_REG_VIBCTLL) ? 0 : 1)
45#define TWL6040_NUM_SUPPLIES (2) 45#define TWL6040_NUM_SUPPLIES (2)
46 46
47static struct reg_default twl6040_defaults[] = { 47static const struct reg_default twl6040_defaults[] = {
48 { 0x01, 0x4B }, /* REG_ASICID (ro) */ 48 { 0x01, 0x4B }, /* REG_ASICID (ro) */
49 { 0x02, 0x00 }, /* REG_ASICREV (ro) */ 49 { 0x02, 0x00 }, /* REG_ASICREV (ro) */
50 { 0x03, 0x00 }, /* REG_INTID */ 50 { 0x03, 0x00 }, /* REG_INTID */
@@ -580,7 +580,7 @@ static bool twl6040_writeable_reg(struct device *dev, unsigned int reg)
580 } 580 }
581} 581}
582 582
583static struct regmap_config twl6040_regmap_config = { 583static const struct regmap_config twl6040_regmap_config = {
584 .reg_bits = 8, 584 .reg_bits = 8,
585 .val_bits = 8, 585 .val_bits = 8,
586 586
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 6ca9d25cc3f0..53ae5af5d6e4 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -36,12 +36,12 @@
36static const struct mfd_cell wm8994_regulator_devs[] = { 36static const struct mfd_cell wm8994_regulator_devs[] = {
37 { 37 {
38 .name = "wm8994-ldo", 38 .name = "wm8994-ldo",
39 .id = 1, 39 .id = 0,
40 .pm_runtime_no_callbacks = true, 40 .pm_runtime_no_callbacks = true,
41 }, 41 },
42 { 42 {
43 .name = "wm8994-ldo", 43 .name = "wm8994-ldo",
44 .id = 2, 44 .id = 1,
45 .pm_runtime_no_callbacks = true, 45 .pm_runtime_no_callbacks = true,
46 }, 46 },
47}; 47};
@@ -344,7 +344,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
344 dev_set_drvdata(wm8994->dev, wm8994); 344 dev_set_drvdata(wm8994->dev, wm8994);
345 345
346 /* Add the on-chip regulators first for bootstrapping */ 346 /* Add the on-chip regulators first for bootstrapping */
347 ret = mfd_add_devices(wm8994->dev, -1, 347 ret = mfd_add_devices(wm8994->dev, 0,
348 wm8994_regulator_devs, 348 wm8994_regulator_devs,
349 ARRAY_SIZE(wm8994_regulator_devs), 349 ARRAY_SIZE(wm8994_regulator_devs),
350 NULL, 0, NULL); 350 NULL, 0, NULL);
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 6af0a28ba37d..e8a4218b5726 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -21,8 +21,6 @@
21#include <linux/err.h> 21#include <linux/err.h>
22 22
23#include <linux/clk.h> 23#include <linux/clk.h>
24#include <linux/clk/sunxi.h>
25
26#include <linux/gpio.h> 24#include <linux/gpio.h>
27#include <linux/platform_device.h> 25#include <linux/platform_device.h>
28#include <linux/spinlock.h> 26#include <linux/spinlock.h>
@@ -229,6 +227,8 @@ struct sunxi_mmc_host {
229 /* clock management */ 227 /* clock management */
230 struct clk *clk_ahb; 228 struct clk *clk_ahb;
231 struct clk *clk_mmc; 229 struct clk *clk_mmc;
230 struct clk *clk_sample;
231 struct clk *clk_output;
232 232
233 /* irq */ 233 /* irq */
234 spinlock_t lock; 234 spinlock_t lock;
@@ -653,26 +653,31 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
653 653
654 /* determine delays */ 654 /* determine delays */
655 if (rate <= 400000) { 655 if (rate <= 400000) {
656 oclk_dly = 0; 656 oclk_dly = 180;
657 sclk_dly = 7; 657 sclk_dly = 42;
658 } else if (rate <= 25000000) { 658 } else if (rate <= 25000000) {
659 oclk_dly = 0; 659 oclk_dly = 180;
660 sclk_dly = 5; 660 sclk_dly = 75;
661 } else if (rate <= 50000000) { 661 } else if (rate <= 50000000) {
662 if (ios->timing == MMC_TIMING_UHS_DDR50) { 662 if (ios->timing == MMC_TIMING_UHS_DDR50) {
663 oclk_dly = 2; 663 oclk_dly = 60;
664 sclk_dly = 4; 664 sclk_dly = 120;
665 } else { 665 } else {
666 oclk_dly = 3; 666 oclk_dly = 90;
667 sclk_dly = 5; 667 sclk_dly = 150;
668 } 668 }
669 } else if (rate <= 100000000) {
670 oclk_dly = 6;
671 sclk_dly = 24;
672 } else if (rate <= 200000000) {
673 oclk_dly = 3;
674 sclk_dly = 12;
669 } else { 675 } else {
670 /* rate > 50000000 */ 676 return -EINVAL;
671 oclk_dly = 2;
672 sclk_dly = 4;
673 } 677 }
674 678
675 clk_sunxi_mmc_phase_control(host->clk_mmc, sclk_dly, oclk_dly); 679 clk_set_phase(host->clk_sample, sclk_dly);
680 clk_set_phase(host->clk_output, oclk_dly);
676 681
677 return sunxi_mmc_oclk_onoff(host, 1); 682 return sunxi_mmc_oclk_onoff(host, 1);
678} 683}
@@ -913,6 +918,18 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
913 return PTR_ERR(host->clk_mmc); 918 return PTR_ERR(host->clk_mmc);
914 } 919 }
915 920
921 host->clk_output = devm_clk_get(&pdev->dev, "output");
922 if (IS_ERR(host->clk_output)) {
923 dev_err(&pdev->dev, "Could not get output clock\n");
924 return PTR_ERR(host->clk_output);
925 }
926
927 host->clk_sample = devm_clk_get(&pdev->dev, "sample");
928 if (IS_ERR(host->clk_sample)) {
929 dev_err(&pdev->dev, "Could not get sample clock\n");
930 return PTR_ERR(host->clk_sample);
931 }
932
916 host->reset = devm_reset_control_get(&pdev->dev, "ahb"); 933 host->reset = devm_reset_control_get(&pdev->dev, "ahb");
917 934
918 ret = clk_prepare_enable(host->clk_ahb); 935 ret = clk_prepare_enable(host->clk_ahb);
@@ -927,11 +944,23 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
927 goto error_disable_clk_ahb; 944 goto error_disable_clk_ahb;
928 } 945 }
929 946
947 ret = clk_prepare_enable(host->clk_output);
948 if (ret) {
949 dev_err(&pdev->dev, "Enable output clk err %d\n", ret);
950 goto error_disable_clk_mmc;
951 }
952
953 ret = clk_prepare_enable(host->clk_sample);
954 if (ret) {
955 dev_err(&pdev->dev, "Enable sample clk err %d\n", ret);
956 goto error_disable_clk_output;
957 }
958
930 if (!IS_ERR(host->reset)) { 959 if (!IS_ERR(host->reset)) {
931 ret = reset_control_deassert(host->reset); 960 ret = reset_control_deassert(host->reset);
932 if (ret) { 961 if (ret) {
933 dev_err(&pdev->dev, "reset err %d\n", ret); 962 dev_err(&pdev->dev, "reset err %d\n", ret);
934 goto error_disable_clk_mmc; 963 goto error_disable_clk_sample;
935 } 964 }
936 } 965 }
937 966
@@ -950,6 +979,10 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
950error_assert_reset: 979error_assert_reset:
951 if (!IS_ERR(host->reset)) 980 if (!IS_ERR(host->reset))
952 reset_control_assert(host->reset); 981 reset_control_assert(host->reset);
982error_disable_clk_sample:
983 clk_disable_unprepare(host->clk_sample);
984error_disable_clk_output:
985 clk_disable_unprepare(host->clk_output);
953error_disable_clk_mmc: 986error_disable_clk_mmc:
954 clk_disable_unprepare(host->clk_mmc); 987 clk_disable_unprepare(host->clk_mmc);
955error_disable_clk_ahb: 988error_disable_clk_ahb:
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index cc13ea5ce4d5..c0720c1ee4c9 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -15,6 +15,8 @@
15#include <linux/mtd/mtd.h> 15#include <linux/mtd/mtd.h>
16#include <linux/mtd/partitions.h> 16#include <linux/mtd/partitions.h>
17 17
18#include <uapi/linux/magic.h>
19
18/* 20/*
19 * NAND flash on Netgear R6250 was verified to contain 15 partitions. 21 * NAND flash on Netgear R6250 was verified to contain 15 partitions.
20 * This will result in allocating too big array for some old devices, but the 22 * This will result in allocating too big array for some old devices, but the
@@ -39,7 +41,8 @@
39#define ML_MAGIC1 0x39685a42 41#define ML_MAGIC1 0x39685a42
40#define ML_MAGIC2 0x26594131 42#define ML_MAGIC2 0x26594131
41#define TRX_MAGIC 0x30524448 43#define TRX_MAGIC 0x30524448
42#define SQSH_MAGIC 0x71736873 /* shsq */ 44#define SHSQ_MAGIC 0x71736873 /* shsq (weird ZTE H218N endianness) */
45#define UBI_EC_MAGIC 0x23494255 /* UBI# */
43 46
44struct trx_header { 47struct trx_header {
45 uint32_t magic; 48 uint32_t magic;
@@ -50,7 +53,7 @@ struct trx_header {
50 uint32_t offset[3]; 53 uint32_t offset[3];
51} __packed; 54} __packed;
52 55
53static void bcm47xxpart_add_part(struct mtd_partition *part, char *name, 56static void bcm47xxpart_add_part(struct mtd_partition *part, const char *name,
54 u64 offset, uint32_t mask_flags) 57 u64 offset, uint32_t mask_flags)
55{ 58{
56 part->name = name; 59 part->name = name;
@@ -58,6 +61,26 @@ static void bcm47xxpart_add_part(struct mtd_partition *part, char *name,
58 part->mask_flags = mask_flags; 61 part->mask_flags = mask_flags;
59} 62}
60 63
64static const char *bcm47xxpart_trx_data_part_name(struct mtd_info *master,
65 size_t offset)
66{
67 uint32_t buf;
68 size_t bytes_read;
69
70 if (mtd_read(master, offset, sizeof(buf), &bytes_read,
71 (uint8_t *)&buf) < 0) {
72 pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
73 offset);
74 goto out_default;
75 }
76
77 if (buf == UBI_EC_MAGIC)
78 return "ubi";
79
80out_default:
81 return "rootfs";
82}
83
61static int bcm47xxpart_parse(struct mtd_info *master, 84static int bcm47xxpart_parse(struct mtd_info *master,
62 struct mtd_partition **pparts, 85 struct mtd_partition **pparts,
63 struct mtd_part_parser_data *data) 86 struct mtd_part_parser_data *data)
@@ -73,8 +96,12 @@ static int bcm47xxpart_parse(struct mtd_info *master,
73 int last_trx_part = -1; 96 int last_trx_part = -1;
74 int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, }; 97 int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
75 98
76 if (blocksize <= 0x10000) 99 /*
77 blocksize = 0x10000; 100 * Some really old flashes (like AT45DB*) had smaller erasesize-s, but
101 * partitions were aligned to at least 0x1000 anyway.
102 */
103 if (blocksize < 0x1000)
104 blocksize = 0x1000;
78 105
79 /* Alloc */ 106 /* Alloc */
80 parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS, 107 parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,
@@ -186,8 +213,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
186 * we want to have jffs2 (overlay) in the same mtd. 213 * we want to have jffs2 (overlay) in the same mtd.
187 */ 214 */
188 if (trx->offset[i]) { 215 if (trx->offset[i]) {
216 const char *name;
217
218 name = bcm47xxpart_trx_data_part_name(master, offset + trx->offset[i]);
189 bcm47xxpart_add_part(&parts[curr_part++], 219 bcm47xxpart_add_part(&parts[curr_part++],
190 "rootfs", 220 name,
191 offset + trx->offset[i], 221 offset + trx->offset[i],
192 0); 222 0);
193 i++; 223 i++;
@@ -205,7 +235,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
205 } 235 }
206 236
207 /* Squashfs on devices not using TRX */ 237 /* Squashfs on devices not using TRX */
208 if (buf[0x000 / 4] == SQSH_MAGIC) { 238 if (le32_to_cpu(buf[0x000 / 4]) == SQUASHFS_MAGIC ||
239 buf[0x000 / 4] == SHSQ_MAGIC) {
209 bcm47xxpart_add_part(&parts[curr_part++], "rootfs", 240 bcm47xxpart_add_part(&parts[curr_part++], "rootfs",
210 offset, 0); 241 offset, 0);
211 continue; 242 continue;
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
index 991c2a1c05d3..afb43d5e1782 100644
--- a/drivers/mtd/chips/map_ram.c
+++ b/drivers/mtd/chips/map_ram.c
@@ -68,6 +68,7 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
68 mtd->_get_unmapped_area = mapram_unmapped_area; 68 mtd->_get_unmapped_area = mapram_unmapped_area;
69 mtd->_read = mapram_read; 69 mtd->_read = mapram_read;
70 mtd->_write = mapram_write; 70 mtd->_write = mapram_write;
71 mtd->_panic_write = mapram_write;
71 mtd->_sync = mapram_nop; 72 mtd->_sync = mapram_nop;
72 mtd->flags = MTD_CAP_RAM; 73 mtd->flags = MTD_CAP_RAM;
73 mtd->writesize = 1; 74 mtd->writesize = 1;
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
index 47a43cf7e5c6..e67f73ab44c9 100644
--- a/drivers/mtd/chips/map_rom.c
+++ b/drivers/mtd/chips/map_rom.c
@@ -11,6 +11,7 @@
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/of.h>
14#include <linux/mtd/mtd.h> 15#include <linux/mtd/mtd.h>
15#include <linux/mtd/map.h> 16#include <linux/mtd/map.h>
16 17
@@ -28,6 +29,15 @@ static struct mtd_chip_driver maprom_chipdrv = {
28 .module = THIS_MODULE 29 .module = THIS_MODULE
29}; 30};
30 31
32static unsigned int default_erasesize(struct map_info *map)
33{
34 const __be32 *erase_size = NULL;
35
36 erase_size = of_get_property(map->device_node, "erase-size", NULL);
37
38 return !erase_size ? map->size : be32_to_cpu(*erase_size);
39}
40
31static struct mtd_info *map_rom_probe(struct map_info *map) 41static struct mtd_info *map_rom_probe(struct map_info *map)
32{ 42{
33 struct mtd_info *mtd; 43 struct mtd_info *mtd;
@@ -47,8 +57,9 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
47 mtd->_sync = maprom_nop; 57 mtd->_sync = maprom_nop;
48 mtd->_erase = maprom_erase; 58 mtd->_erase = maprom_erase;
49 mtd->flags = MTD_CAP_ROM; 59 mtd->flags = MTD_CAP_ROM;
50 mtd->erasesize = map->size; 60 mtd->erasesize = default_erasesize(map);
51 mtd->writesize = 1; 61 mtd->writesize = 1;
62 mtd->writebufsize = 1;
52 63
53 __module_get(THIS_MODULE); 64 __module_get(THIS_MODULE);
54 return mtd; 65 return mtd;
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index 54ffe5223e64..3060025c8af4 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -24,6 +24,7 @@
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/io.h> 25#include <linux/io.h>
26#include <linux/of.h> 26#include <linux/of.h>
27#include <linux/clk.h>
27 28
28#include "serial_flash_cmds.h" 29#include "serial_flash_cmds.h"
29 30
@@ -262,6 +263,7 @@ struct stfsm {
262 struct mtd_info mtd; 263 struct mtd_info mtd;
263 struct mutex lock; 264 struct mutex lock;
264 struct flash_info *info; 265 struct flash_info *info;
266 struct clk *clk;
265 267
266 uint32_t configuration; 268 uint32_t configuration;
267 uint32_t fifo_dir_delay; 269 uint32_t fifo_dir_delay;
@@ -663,6 +665,23 @@ static struct stfsm_seq stfsm_seq_write_status = {
663 SEQ_CFG_STARTSEQ), 665 SEQ_CFG_STARTSEQ),
664}; 666};
665 667
668/* Dummy sequence to read one byte of data from flash into the FIFO */
669static const struct stfsm_seq stfsm_seq_load_fifo_byte = {
670 .data_size = TRANSFER_SIZE(1),
671 .seq_opc[0] = (SEQ_OPC_PADS_1 |
672 SEQ_OPC_CYCLES(8) |
673 SEQ_OPC_OPCODE(SPINOR_OP_RDID)),
674 .seq = {
675 STFSM_INST_CMD1,
676 STFSM_INST_DATA_READ,
677 STFSM_INST_STOP,
678 },
679 .seq_cfg = (SEQ_CFG_PADS_1 |
680 SEQ_CFG_READNOTWRITE |
681 SEQ_CFG_CSDEASSERT |
682 SEQ_CFG_STARTSEQ),
683};
684
666static int stfsm_n25q_en_32bit_addr_seq(struct stfsm_seq *seq) 685static int stfsm_n25q_en_32bit_addr_seq(struct stfsm_seq *seq)
667{ 686{
668 seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | 687 seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
@@ -695,22 +714,6 @@ static inline uint32_t stfsm_fifo_available(struct stfsm *fsm)
695 return (readl(fsm->base + SPI_FAST_SEQ_STA) >> 5) & 0x7f; 714 return (readl(fsm->base + SPI_FAST_SEQ_STA) >> 5) & 0x7f;
696} 715}
697 716
698static void stfsm_clear_fifo(struct stfsm *fsm)
699{
700 uint32_t avail;
701
702 for (;;) {
703 avail = stfsm_fifo_available(fsm);
704 if (!avail)
705 break;
706
707 while (avail) {
708 readl(fsm->base + SPI_FAST_SEQ_DATA_REG);
709 avail--;
710 }
711 }
712}
713
714static inline void stfsm_load_seq(struct stfsm *fsm, 717static inline void stfsm_load_seq(struct stfsm *fsm,
715 const struct stfsm_seq *seq) 718 const struct stfsm_seq *seq)
716{ 719{
@@ -772,6 +775,68 @@ static void stfsm_read_fifo(struct stfsm *fsm, uint32_t *buf, uint32_t size)
772 } 775 }
773} 776}
774 777
778/*
779 * Clear the data FIFO
780 *
781 * Typically, this is only required during driver initialisation, where no
782 * assumptions can be made regarding the state of the FIFO.
783 *
784 * The process of clearing the FIFO is complicated by fact that while it is
785 * possible for the FIFO to contain an arbitrary number of bytes [1], the
786 * SPI_FAST_SEQ_STA register only reports the number of complete 32-bit words
787 * present. Furthermore, data can only be drained from the FIFO by reading
788 * complete 32-bit words.
789 *
790 * With this in mind, a two stage process is used to the clear the FIFO:
791 *
792 * 1. Read any complete 32-bit words from the FIFO, as reported by the
793 * SPI_FAST_SEQ_STA register.
794 *
795 * 2. Mop up any remaining bytes. At this point, it is not known if there
796 * are 0, 1, 2, or 3 bytes in the FIFO. To handle all cases, a dummy FSM
797 * sequence is used to load one byte at a time, until a complete 32-bit
798 * word is formed; at most, 4 bytes will need to be loaded.
799 *
800 * [1] It is theoretically possible for the FIFO to contain an arbitrary number
801 * of bits. However, since there are no known use-cases that leave
802 * incomplete bytes in the FIFO, only words and bytes are considered here.
803 */
804static void stfsm_clear_fifo(struct stfsm *fsm)
805{
806 const struct stfsm_seq *seq = &stfsm_seq_load_fifo_byte;
807 uint32_t words, i;
808
809 /* 1. Clear any 32-bit words */
810 words = stfsm_fifo_available(fsm);
811 if (words) {
812 for (i = 0; i < words; i++)
813 readl(fsm->base + SPI_FAST_SEQ_DATA_REG);
814 dev_dbg(fsm->dev, "cleared %d words from FIFO\n", words);
815 }
816
817 /*
818 * 2. Clear any remaining bytes
819 * - Load the FIFO, one byte at a time, until a complete 32-bit word
820 * is available.
821 */
822 for (i = 0, words = 0; i < 4 && !words; i++) {
823 stfsm_load_seq(fsm, seq);
824 stfsm_wait_seq(fsm);
825 words = stfsm_fifo_available(fsm);
826 }
827
828 /* - A single word must be available now */
829 if (words != 1) {
830 dev_err(fsm->dev, "failed to clear bytes from the data FIFO\n");
831 return;
832 }
833
834 /* - Read the 32-bit word */
835 readl(fsm->base + SPI_FAST_SEQ_DATA_REG);
836
837 dev_dbg(fsm->dev, "cleared %d byte(s) from the data FIFO\n", 4 - i);
838}
839
775static int stfsm_write_fifo(struct stfsm *fsm, const uint32_t *buf, 840static int stfsm_write_fifo(struct stfsm *fsm, const uint32_t *buf,
776 uint32_t size) 841 uint32_t size)
777{ 842{
@@ -1521,11 +1586,11 @@ static int stfsm_write(struct stfsm *fsm, const uint8_t *buf,
1521 uint32_t size_lb; 1586 uint32_t size_lb;
1522 uint32_t size_mop; 1587 uint32_t size_mop;
1523 uint32_t tmp[4]; 1588 uint32_t tmp[4];
1589 uint32_t i;
1524 uint32_t page_buf[FLASH_PAGESIZE_32]; 1590 uint32_t page_buf[FLASH_PAGESIZE_32];
1525 uint8_t *t = (uint8_t *)&tmp; 1591 uint8_t *t = (uint8_t *)&tmp;
1526 const uint8_t *p; 1592 const uint8_t *p;
1527 int ret; 1593 int ret;
1528 int i;
1529 1594
1530 dev_dbg(fsm->dev, "writing %d bytes to 0x%08x\n", size, offset); 1595 dev_dbg(fsm->dev, "writing %d bytes to 0x%08x\n", size, offset);
1531 1596
@@ -1843,8 +1908,7 @@ static void stfsm_set_freq(struct stfsm *fsm, uint32_t spi_freq)
1843 uint32_t emi_freq; 1908 uint32_t emi_freq;
1844 uint32_t clk_div; 1909 uint32_t clk_div;
1845 1910
1846 /* TODO: Make this dynamic */ 1911 emi_freq = clk_get_rate(fsm->clk);
1847 emi_freq = STFSM_DEFAULT_EMI_FREQ;
1848 1912
1849 /* 1913 /*
1850 * Calculate clk_div - values between 2 and 128 1914 * Calculate clk_div - values between 2 and 128
@@ -1994,6 +2058,18 @@ static int stfsm_probe(struct platform_device *pdev)
1994 return PTR_ERR(fsm->base); 2058 return PTR_ERR(fsm->base);
1995 } 2059 }
1996 2060
2061 fsm->clk = devm_clk_get(&pdev->dev, NULL);
2062 if (IS_ERR(fsm->clk)) {
2063 dev_err(fsm->dev, "Couldn't find EMI clock.\n");
2064 return PTR_ERR(fsm->clk);
2065 }
2066
2067 ret = clk_prepare_enable(fsm->clk);
2068 if (ret) {
2069 dev_err(fsm->dev, "Failed to enable EMI clock.\n");
2070 return ret;
2071 }
2072
1997 mutex_init(&fsm->lock); 2073 mutex_init(&fsm->lock);
1998 2074
1999 ret = stfsm_init(fsm); 2075 ret = stfsm_init(fsm);
@@ -2058,6 +2134,28 @@ static int stfsm_remove(struct platform_device *pdev)
2058 return mtd_device_unregister(&fsm->mtd); 2134 return mtd_device_unregister(&fsm->mtd);
2059} 2135}
2060 2136
2137#ifdef CONFIG_PM_SLEEP
2138static int stfsmfsm_suspend(struct device *dev)
2139{
2140 struct stfsm *fsm = dev_get_drvdata(dev);
2141
2142 clk_disable_unprepare(fsm->clk);
2143
2144 return 0;
2145}
2146
2147static int stfsmfsm_resume(struct device *dev)
2148{
2149 struct stfsm *fsm = dev_get_drvdata(dev);
2150
2151 clk_prepare_enable(fsm->clk);
2152
2153 return 0;
2154}
2155#endif
2156
2157static SIMPLE_DEV_PM_OPS(stfsm_pm_ops, stfsmfsm_suspend, stfsmfsm_resume);
2158
2061static const struct of_device_id stfsm_match[] = { 2159static const struct of_device_id stfsm_match[] = {
2062 { .compatible = "st,spi-fsm", }, 2160 { .compatible = "st,spi-fsm", },
2063 {}, 2161 {},
@@ -2070,6 +2168,7 @@ static struct platform_driver stfsm_driver = {
2070 .driver = { 2168 .driver = {
2071 .name = "st-spi-fsm", 2169 .name = "st-spi-fsm",
2072 .of_match_table = stfsm_match, 2170 .of_match_table = stfsm_match,
2171 .pm = &stfsm_pm_ops,
2073 }, 2172 },
2074}; 2173};
2075module_platform_driver(stfsm_driver); 2174module_platform_driver(stfsm_driver);
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index f35cd2081314..ff26e979b1a1 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -269,6 +269,16 @@ static int of_flash_probe(struct platform_device *dev)
269 info->list[i].mtd = obsolete_probe(dev, 269 info->list[i].mtd = obsolete_probe(dev,
270 &info->list[i].map); 270 &info->list[i].map);
271 } 271 }
272
273 /* Fall back to mapping region as ROM */
274 if (!info->list[i].mtd) {
275 dev_warn(&dev->dev,
276 "do_map_probe() failed for type %s\n",
277 probe_type);
278
279 info->list[i].mtd = do_map_probe("map_rom",
280 &info->list[i].map);
281 }
272 mtd_list[i] = info->list[i].mtd; 282 mtd_list[i] = info->list[i].mtd;
273 283
274 err = -ENXIO; 284 err = -ENXIO;
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 485ea751c7f9..bb4c14f83c75 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -45,8 +45,6 @@ struct mtdblk_dev {
45 enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; 45 enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
46}; 46};
47 47
48static DEFINE_MUTEX(mtdblks_lock);
49
50/* 48/*
51 * Cache stuff... 49 * Cache stuff...
52 * 50 *
@@ -286,10 +284,8 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
286 284
287 pr_debug("mtdblock_open\n"); 285 pr_debug("mtdblock_open\n");
288 286
289 mutex_lock(&mtdblks_lock);
290 if (mtdblk->count) { 287 if (mtdblk->count) {
291 mtdblk->count++; 288 mtdblk->count++;
292 mutex_unlock(&mtdblks_lock);
293 return 0; 289 return 0;
294 } 290 }
295 291
@@ -302,8 +298,6 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
302 mtdblk->cache_data = NULL; 298 mtdblk->cache_data = NULL;
303 } 299 }
304 300
305 mutex_unlock(&mtdblks_lock);
306
307 pr_debug("ok\n"); 301 pr_debug("ok\n");
308 302
309 return 0; 303 return 0;
@@ -315,8 +309,6 @@ static void mtdblock_release(struct mtd_blktrans_dev *mbd)
315 309
316 pr_debug("mtdblock_release\n"); 310 pr_debug("mtdblock_release\n");
317 311
318 mutex_lock(&mtdblks_lock);
319
320 mutex_lock(&mtdblk->cache_mutex); 312 mutex_lock(&mtdblk->cache_mutex);
321 write_cached_data(mtdblk); 313 write_cached_data(mtdblk);
322 mutex_unlock(&mtdblk->cache_mutex); 314 mutex_unlock(&mtdblk->cache_mutex);
@@ -331,8 +323,6 @@ static void mtdblock_release(struct mtd_blktrans_dev *mbd)
331 vfree(mtdblk->cache_data); 323 vfree(mtdblk->cache_data);
332 } 324 }
333 325
334 mutex_unlock(&mtdblks_lock);
335
336 pr_debug("ok\n"); 326 pr_debug("ok\n");
337} 327}
338 328
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index eacc3aac7327..239a8c806b67 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -311,7 +311,8 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
311 devops.len = subdev->size - to; 311 devops.len = subdev->size - to;
312 312
313 err = mtd_write_oob(subdev, to, &devops); 313 err = mtd_write_oob(subdev, to, &devops);
314 ops->retlen += devops.oobretlen; 314 ops->retlen += devops.retlen;
315 ops->oobretlen += devops.oobretlen;
315 if (err) 316 if (err)
316 return err; 317 return err;
317 318
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 0ec4d6ea1e4b..11883bd26d9d 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -37,6 +37,7 @@
37#include <linux/backing-dev.h> 37#include <linux/backing-dev.h>
38#include <linux/gfp.h> 38#include <linux/gfp.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/reboot.h>
40 41
41#include <linux/mtd/mtd.h> 42#include <linux/mtd/mtd.h>
42#include <linux/mtd/partitions.h> 43#include <linux/mtd/partitions.h>
@@ -356,6 +357,17 @@ unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
356EXPORT_SYMBOL_GPL(mtd_mmap_capabilities); 357EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
357#endif 358#endif
358 359
360static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
361 void *cmd)
362{
363 struct mtd_info *mtd;
364
365 mtd = container_of(n, struct mtd_info, reboot_notifier);
366 mtd->_reboot(mtd);
367
368 return NOTIFY_DONE;
369}
370
359/** 371/**
360 * add_mtd_device - register an MTD device 372 * add_mtd_device - register an MTD device
361 * @mtd: pointer to new MTD device info structure 373 * @mtd: pointer to new MTD device info structure
@@ -544,6 +556,19 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
544 err = -ENODEV; 556 err = -ENODEV;
545 } 557 }
546 558
559 /*
560 * FIXME: some drivers unfortunately call this function more than once.
561 * So we have to check if we've already assigned the reboot notifier.
562 *
563 * Generally, we can make multiple calls work for most cases, but it
564 * does cause problems with parse_mtd_partitions() above (e.g.,
565 * cmdlineparts will register partitions more than once).
566 */
567 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
568 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
569 register_reboot_notifier(&mtd->reboot_notifier);
570 }
571
547 return err; 572 return err;
548} 573}
549EXPORT_SYMBOL_GPL(mtd_device_parse_register); 574EXPORT_SYMBOL_GPL(mtd_device_parse_register);
@@ -558,6 +583,9 @@ int mtd_device_unregister(struct mtd_info *master)
558{ 583{
559 int err; 584 int err;
560 585
586 if (master->_reboot)
587 unregister_reboot_notifier(&master->reboot_notifier);
588
561 err = del_mtd_partitions(master); 589 err = del_mtd_partitions(master);
562 if (err) 590 if (err)
563 return err; 591 return err;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 7d0150d20432..5b76a173cd95 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -421,7 +421,7 @@ config MTD_NAND_ORION
421 421
422config MTD_NAND_FSL_ELBC 422config MTD_NAND_FSL_ELBC
423 tristate "NAND support for Freescale eLBC controllers" 423 tristate "NAND support for Freescale eLBC controllers"
424 depends on PPC_OF 424 depends on PPC
425 select FSL_LBC 425 select FSL_LBC
426 help 426 help
427 Various Freescale chips, including the 8313, include a NAND Flash 427 Various Freescale chips, including the 8313, include a NAND Flash
@@ -524,4 +524,9 @@ config MTD_NAND_SUNXI
524 help 524 help
525 Enables support for NAND Flash chips on Allwinner SoCs. 525 Enables support for NAND Flash chips on Allwinner SoCs.
526 526
527config MTD_NAND_HISI504
528 tristate "Support for NAND controller on Hisilicon SoC Hip04"
529 help
530 Enables support for NAND controller on Hisilicon SoC Hip04.
531
527endif # MTD_NAND 532endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index bd38f21d2e28..582bbd05aff7 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -51,5 +51,6 @@ obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
51obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o 51obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
52obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/ 52obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
53obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o 53obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
54obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
54 55
55nand-objs := nand_base.o nand_bbt.o nand_timings.o 56nand-objs := nand_base.o nand_bbt.o nand_timings.o
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index f1d555cfb332..842f8fe91b56 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -183,7 +183,7 @@ static int ams_delta_init(struct platform_device *pdev)
183 return -ENXIO; 183 return -ENXIO;
184 184
185 /* Allocate memory for MTD device structure and private data */ 185 /* Allocate memory for MTD device structure and private data */
186 ams_delta_mtd = kmalloc(sizeof(struct mtd_info) + 186 ams_delta_mtd = kzalloc(sizeof(struct mtd_info) +
187 sizeof(struct nand_chip), GFP_KERNEL); 187 sizeof(struct nand_chip), GFP_KERNEL);
188 if (!ams_delta_mtd) { 188 if (!ams_delta_mtd) {
189 printk (KERN_WARNING "Unable to allocate E3 NAND MTD device structure.\n"); 189 printk (KERN_WARNING "Unable to allocate E3 NAND MTD device structure.\n");
@@ -196,10 +196,6 @@ static int ams_delta_init(struct platform_device *pdev)
196 /* Get pointer to private data */ 196 /* Get pointer to private data */
197 this = (struct nand_chip *) (&ams_delta_mtd[1]); 197 this = (struct nand_chip *) (&ams_delta_mtd[1]);
198 198
199 /* Initialize structures */
200 memset(ams_delta_mtd, 0, sizeof(struct mtd_info));
201 memset(this, 0, sizeof(struct nand_chip));
202
203 /* Link the private data with the MTD structure */ 199 /* Link the private data with the MTD structure */
204 ams_delta_mtd->priv = this; 200 ams_delta_mtd->priv = this;
205 201
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index a345e7b2463a..d93c849b70b5 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -63,6 +63,10 @@ module_param(on_flash_bbt, int, 0);
63#include "atmel_nand_ecc.h" /* Hardware ECC registers */ 63#include "atmel_nand_ecc.h" /* Hardware ECC registers */
64#include "atmel_nand_nfc.h" /* Nand Flash Controller definition */ 64#include "atmel_nand_nfc.h" /* Nand Flash Controller definition */
65 65
66struct atmel_nand_caps {
67 bool pmecc_correct_erase_page;
68};
69
66/* oob layout for large page size 70/* oob layout for large page size
67 * bad block info is on bytes 0 and 1 71 * bad block info is on bytes 0 and 1
68 * the bytes have to be consecutives to avoid 72 * the bytes have to be consecutives to avoid
@@ -124,6 +128,7 @@ struct atmel_nand_host {
124 128
125 struct atmel_nfc *nfc; 129 struct atmel_nfc *nfc;
126 130
131 struct atmel_nand_caps *caps;
127 bool has_pmecc; 132 bool has_pmecc;
128 u8 pmecc_corr_cap; 133 u8 pmecc_corr_cap;
129 u16 pmecc_sector_size; 134 u16 pmecc_sector_size;
@@ -847,7 +852,11 @@ static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf,
847 struct atmel_nand_host *host = nand_chip->priv; 852 struct atmel_nand_host *host = nand_chip->priv;
848 int i, err_nbr; 853 int i, err_nbr;
849 uint8_t *buf_pos; 854 uint8_t *buf_pos;
850 int total_err = 0; 855 int max_bitflips = 0;
856
857 /* If can correct bitfilps from erased page, do the normal check */
858 if (host->caps->pmecc_correct_erase_page)
859 goto normal_check;
851 860
852 for (i = 0; i < nand_chip->ecc.total; i++) 861 for (i = 0; i < nand_chip->ecc.total; i++)
853 if (ecc[i] != 0xff) 862 if (ecc[i] != 0xff)
@@ -874,13 +883,13 @@ normal_check:
874 pmecc_correct_data(mtd, buf_pos, ecc, i, 883 pmecc_correct_data(mtd, buf_pos, ecc, i,
875 nand_chip->ecc.bytes, err_nbr); 884 nand_chip->ecc.bytes, err_nbr);
876 mtd->ecc_stats.corrected += err_nbr; 885 mtd->ecc_stats.corrected += err_nbr;
877 total_err += err_nbr; 886 max_bitflips = max_t(int, max_bitflips, err_nbr);
878 } 887 }
879 } 888 }
880 pmecc_stat >>= 1; 889 pmecc_stat >>= 1;
881 } 890 }
882 891
883 return total_err; 892 return max_bitflips;
884} 893}
885 894
886static void pmecc_enable(struct atmel_nand_host *host, int ecc_op) 895static void pmecc_enable(struct atmel_nand_host *host, int ecc_op)
@@ -1474,6 +1483,8 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
1474 ecc_writel(host->ecc, CR, ATMEL_ECC_RST); 1483 ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
1475} 1484}
1476 1485
1486static const struct of_device_id atmel_nand_dt_ids[];
1487
1477static int atmel_of_init_port(struct atmel_nand_host *host, 1488static int atmel_of_init_port(struct atmel_nand_host *host,
1478 struct device_node *np) 1489 struct device_node *np)
1479{ 1490{
@@ -1483,6 +1494,9 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
1483 struct atmel_nand_data *board = &host->board; 1494 struct atmel_nand_data *board = &host->board;
1484 enum of_gpio_flags flags = 0; 1495 enum of_gpio_flags flags = 0;
1485 1496
1497 host->caps = (struct atmel_nand_caps *)
1498 of_match_device(atmel_nand_dt_ids, host->dev)->data;
1499
1486 if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) { 1500 if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
1487 if (val >= 32) { 1501 if (val >= 32) {
1488 dev_err(host->dev, "invalid addr-offset %u\n", val); 1502 dev_err(host->dev, "invalid addr-offset %u\n", val);
@@ -2288,8 +2302,17 @@ static int atmel_nand_remove(struct platform_device *pdev)
2288 return 0; 2302 return 0;
2289} 2303}
2290 2304
2305static struct atmel_nand_caps at91rm9200_caps = {
2306 .pmecc_correct_erase_page = false,
2307};
2308
2309static struct atmel_nand_caps sama5d4_caps = {
2310 .pmecc_correct_erase_page = true,
2311};
2312
2291static const struct of_device_id atmel_nand_dt_ids[] = { 2313static const struct of_device_id atmel_nand_dt_ids[] = {
2292 { .compatible = "atmel,at91rm9200-nand" }, 2314 { .compatible = "atmel,at91rm9200-nand", .data = &at91rm9200_caps },
2315 { .compatible = "atmel,sama5d4-nand", .data = &sama5d4_caps },
2293 { /* sentinel */ } 2316 { /* sentinel */ }
2294}; 2317};
2295 2318
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index b3b7ca1bafb8..f44c6061536a 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1041,7 +1041,7 @@ static void denali_setup_dma(struct denali_nand_info *denali, int op)
1041 index_addr(denali, mode | ((addr >> 16) << 8), 0x2200); 1041 index_addr(denali, mode | ((addr >> 16) << 8), 0x2200);
1042 1042
1043 /* 3. set memory low address bits 23:8 */ 1043 /* 3. set memory low address bits 23:8 */
1044 index_addr(denali, mode | ((addr & 0xff) << 8), 0x2300); 1044 index_addr(denali, mode | ((addr & 0xffff) << 8), 0x2300);
1045 1045
1046 /* 4. interrupt when complete, burst len = 64 bytes */ 1046 /* 4. interrupt when complete, burst len = 64 bytes */
1047 index_addr(denali, mode | 0x14000, 0x2400); 1047 index_addr(denali, mode | 0x14000, 0x2400);
@@ -1328,35 +1328,6 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
1328 break; 1328 break;
1329 } 1329 }
1330} 1330}
1331
1332/* stubs for ECC functions not used by the NAND core */
1333static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
1334 uint8_t *ecc_code)
1335{
1336 struct denali_nand_info *denali = mtd_to_denali(mtd);
1337
1338 dev_err(denali->dev, "denali_ecc_calculate called unexpectedly\n");
1339 BUG();
1340 return -EIO;
1341}
1342
1343static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
1344 uint8_t *read_ecc, uint8_t *calc_ecc)
1345{
1346 struct denali_nand_info *denali = mtd_to_denali(mtd);
1347
1348 dev_err(denali->dev, "denali_ecc_correct called unexpectedly\n");
1349 BUG();
1350 return -EIO;
1351}
1352
1353static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
1354{
1355 struct denali_nand_info *denali = mtd_to_denali(mtd);
1356
1357 dev_err(denali->dev, "denali_ecc_hwctl called unexpectedly\n");
1358 BUG();
1359}
1360/* end NAND core entry points */ 1331/* end NAND core entry points */
1361 1332
1362/* Initialization code to bring the device up to a known good state */ 1333/* Initialization code to bring the device up to a known good state */
@@ -1609,15 +1580,6 @@ int denali_init(struct denali_nand_info *denali)
1609 denali->totalblks = denali->mtd.size >> denali->nand.phys_erase_shift; 1580 denali->totalblks = denali->mtd.size >> denali->nand.phys_erase_shift;
1610 denali->blksperchip = denali->totalblks / denali->nand.numchips; 1581 denali->blksperchip = denali->totalblks / denali->nand.numchips;
1611 1582
1612 /*
1613 * These functions are required by the NAND core framework, otherwise,
1614 * the NAND core will assert. However, we don't need them, so we'll stub
1615 * them out.
1616 */
1617 denali->nand.ecc.calculate = denali_ecc_calculate;
1618 denali->nand.ecc.correct = denali_ecc_correct;
1619 denali->nand.ecc.hwctl = denali_ecc_hwctl;
1620
1621 /* override the default read operations */ 1583 /* override the default read operations */
1622 denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum; 1584 denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum;
1623 denali->nand.ecc.read_page = denali_read_page; 1585 denali->nand.ecc.read_page = denali_read_page;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 4f3851a24bb2..33f3c3c54dbc 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1294,14 +1294,6 @@ exit_auxiliary:
1294 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an 1294 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1295 * ECC-based or raw view of the page is implicit in which function it calls 1295 * ECC-based or raw view of the page is implicit in which function it calls
1296 * (there is a similar pair of ECC-based/raw functions for writing). 1296 * (there is a similar pair of ECC-based/raw functions for writing).
1297 *
1298 * FIXME: The following paragraph is incorrect, now that there exist
1299 * ecc.read_oob_raw and ecc.write_oob_raw functions.
1300 *
1301 * Since MTD assumes the OOB is not covered by ECC, there is no pair of
1302 * ECC-based/raw functions for reading or or writing the OOB. The fact that the
1303 * caller wants an ECC-based or raw view of the page is not propagated down to
1304 * this driver.
1305 */ 1297 */
1306static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 1298static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1307 int page) 1299 int page)
@@ -2029,7 +2021,6 @@ static int gpmi_nand_probe(struct platform_device *pdev)
2029exit_nfc_init: 2021exit_nfc_init:
2030 release_resources(this); 2022 release_resources(this);
2031exit_acquire_resources: 2023exit_acquire_resources:
2032 dev_err(this->dev, "driver registration failed: %d\n", ret);
2033 2024
2034 return ret; 2025 return ret;
2035} 2026}
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
new file mode 100644
index 000000000000..289ad3ac3e80
--- /dev/null
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -0,0 +1,891 @@
1/*
2 * Hisilicon NAND Flash controller driver
3 *
4 * Copyright © 2012-2014 HiSilicon Technologies Co., Ltd.
5 * http://www.hisilicon.com
6 *
7 * Author: Zhou Wang <wangzhou.bry@gmail.com>
8 * The initial developer of the original code is Zhiyong Cai
9 * <caizhiyong@huawei.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 */
21#include <linux/of.h>
22#include <linux/of_mtd.h>
23#include <linux/mtd/mtd.h>
24#include <linux/sizes.h>
25#include <linux/clk.h>
26#include <linux/slab.h>
27#include <linux/module.h>
28#include <linux/delay.h>
29#include <linux/interrupt.h>
30#include <linux/mtd/nand.h>
31#include <linux/dma-mapping.h>
32#include <linux/platform_device.h>
33#include <linux/mtd/partitions.h>
34
35#define HINFC504_MAX_CHIP (4)
36#define HINFC504_W_LATCH (5)
37#define HINFC504_R_LATCH (7)
38#define HINFC504_RW_LATCH (3)
39
40#define HINFC504_NFC_TIMEOUT (2 * HZ)
41#define HINFC504_NFC_PM_TIMEOUT (1 * HZ)
42#define HINFC504_NFC_DMA_TIMEOUT (5 * HZ)
43#define HINFC504_CHIP_DELAY (25)
44
45#define HINFC504_REG_BASE_ADDRESS_LEN (0x100)
46#define HINFC504_BUFFER_BASE_ADDRESS_LEN (2048 + 128)
47
48#define HINFC504_ADDR_CYCLE_MASK 0x4
49
50#define HINFC504_CON 0x00
51#define HINFC504_CON_OP_MODE_NORMAL BIT(0)
52#define HINFC504_CON_PAGEISZE_SHIFT (1)
53#define HINFC504_CON_PAGESIZE_MASK (0x07)
54#define HINFC504_CON_BUS_WIDTH BIT(4)
55#define HINFC504_CON_READY_BUSY_SEL BIT(8)
56#define HINFC504_CON_ECCTYPE_SHIFT (9)
57#define HINFC504_CON_ECCTYPE_MASK (0x07)
58
59#define HINFC504_PWIDTH 0x04
60#define SET_HINFC504_PWIDTH(_w_lcnt, _r_lcnt, _rw_hcnt) \
61 ((_w_lcnt) | (((_r_lcnt) & 0x0F) << 4) | (((_rw_hcnt) & 0x0F) << 8))
62
63#define HINFC504_CMD 0x0C
64#define HINFC504_ADDRL 0x10
65#define HINFC504_ADDRH 0x14
66#define HINFC504_DATA_NUM 0x18
67
68#define HINFC504_OP 0x1C
69#define HINFC504_OP_READ_DATA_EN BIT(1)
70#define HINFC504_OP_WAIT_READY_EN BIT(2)
71#define HINFC504_OP_CMD2_EN BIT(3)
72#define HINFC504_OP_WRITE_DATA_EN BIT(4)
73#define HINFC504_OP_ADDR_EN BIT(5)
74#define HINFC504_OP_CMD1_EN BIT(6)
75#define HINFC504_OP_NF_CS_SHIFT (7)
76#define HINFC504_OP_NF_CS_MASK (3)
77#define HINFC504_OP_ADDR_CYCLE_SHIFT (9)
78#define HINFC504_OP_ADDR_CYCLE_MASK (7)
79
80#define HINFC504_STATUS 0x20
81#define HINFC504_READY BIT(0)
82
83#define HINFC504_INTEN 0x24
84#define HINFC504_INTEN_DMA BIT(9)
85#define HINFC504_INTEN_UE BIT(6)
86#define HINFC504_INTEN_CE BIT(5)
87
88#define HINFC504_INTS 0x28
89#define HINFC504_INTS_DMA BIT(9)
90#define HINFC504_INTS_UE BIT(6)
91#define HINFC504_INTS_CE BIT(5)
92
93#define HINFC504_INTCLR 0x2C
94#define HINFC504_INTCLR_DMA BIT(9)
95#define HINFC504_INTCLR_UE BIT(6)
96#define HINFC504_INTCLR_CE BIT(5)
97
98#define HINFC504_ECC_STATUS 0x5C
99#define HINFC504_ECC_16_BIT_SHIFT 12
100
101#define HINFC504_DMA_CTRL 0x60
102#define HINFC504_DMA_CTRL_DMA_START BIT(0)
103#define HINFC504_DMA_CTRL_WE BIT(1)
104#define HINFC504_DMA_CTRL_DATA_AREA_EN BIT(2)
105#define HINFC504_DMA_CTRL_OOB_AREA_EN BIT(3)
106#define HINFC504_DMA_CTRL_BURST4_EN BIT(4)
107#define HINFC504_DMA_CTRL_BURST8_EN BIT(5)
108#define HINFC504_DMA_CTRL_BURST16_EN BIT(6)
109#define HINFC504_DMA_CTRL_ADDR_NUM_SHIFT (7)
110#define HINFC504_DMA_CTRL_ADDR_NUM_MASK (1)
111#define HINFC504_DMA_CTRL_CS_SHIFT (8)
112#define HINFC504_DMA_CTRL_CS_MASK (0x03)
113
114#define HINFC504_DMA_ADDR_DATA 0x64
115#define HINFC504_DMA_ADDR_OOB 0x68
116
117#define HINFC504_DMA_LEN 0x6C
118#define HINFC504_DMA_LEN_OOB_SHIFT (16)
119#define HINFC504_DMA_LEN_OOB_MASK (0xFFF)
120
121#define HINFC504_DMA_PARA 0x70
122#define HINFC504_DMA_PARA_DATA_RW_EN BIT(0)
123#define HINFC504_DMA_PARA_OOB_RW_EN BIT(1)
124#define HINFC504_DMA_PARA_DATA_EDC_EN BIT(2)
125#define HINFC504_DMA_PARA_OOB_EDC_EN BIT(3)
126#define HINFC504_DMA_PARA_DATA_ECC_EN BIT(4)
127#define HINFC504_DMA_PARA_OOB_ECC_EN BIT(5)
128
129#define HINFC_VERSION 0x74
130#define HINFC504_LOG_READ_ADDR 0x7C
131#define HINFC504_LOG_READ_LEN 0x80
132
133#define HINFC504_NANDINFO_LEN 0x10
134
135struct hinfc_host {
136 struct nand_chip chip;
137 struct mtd_info mtd;
138 struct device *dev;
139 void __iomem *iobase;
140 void __iomem *mmio;
141 struct completion cmd_complete;
142 unsigned int offset;
143 unsigned int command;
144 int chipselect;
145 unsigned int addr_cycle;
146 u32 addr_value[2];
147 u32 cache_addr_value[2];
148 char *buffer;
149 dma_addr_t dma_buffer;
150 dma_addr_t dma_oob;
151 int version;
152 unsigned int irq_status; /* interrupt status */
153};
154
155static inline unsigned int hinfc_read(struct hinfc_host *host, unsigned int reg)
156{
157 return readl(host->iobase + reg);
158}
159
160static inline void hinfc_write(struct hinfc_host *host, unsigned int value,
161 unsigned int reg)
162{
163 writel(value, host->iobase + reg);
164}
165
166static void wait_controller_finished(struct hinfc_host *host)
167{
168 unsigned long timeout = jiffies + HINFC504_NFC_TIMEOUT;
169 int val;
170
171 while (time_before(jiffies, timeout)) {
172 val = hinfc_read(host, HINFC504_STATUS);
173 if (host->command == NAND_CMD_ERASE2) {
174 /* nfc is ready */
175 while (!(val & HINFC504_READY)) {
176 usleep_range(500, 1000);
177 val = hinfc_read(host, HINFC504_STATUS);
178 }
179 return;
180 }
181
182 if (val & HINFC504_READY)
183 return;
184 }
185
186 /* wait cmd timeout */
187 dev_err(host->dev, "Wait NAND controller exec cmd timeout.\n");
188}
189
190static void hisi_nfc_dma_transfer(struct hinfc_host *host, int todev)
191{
192 struct mtd_info *mtd = &host->mtd;
193 struct nand_chip *chip = mtd->priv;
194 unsigned long val;
195 int ret;
196
197 hinfc_write(host, host->dma_buffer, HINFC504_DMA_ADDR_DATA);
198 hinfc_write(host, host->dma_oob, HINFC504_DMA_ADDR_OOB);
199
200 if (chip->ecc.mode == NAND_ECC_NONE) {
201 hinfc_write(host, ((mtd->oobsize & HINFC504_DMA_LEN_OOB_MASK)
202 << HINFC504_DMA_LEN_OOB_SHIFT), HINFC504_DMA_LEN);
203
204 hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN
205 | HINFC504_DMA_PARA_OOB_RW_EN, HINFC504_DMA_PARA);
206 } else {
207 if (host->command == NAND_CMD_READOOB)
208 hinfc_write(host, HINFC504_DMA_PARA_OOB_RW_EN
209 | HINFC504_DMA_PARA_OOB_EDC_EN
210 | HINFC504_DMA_PARA_OOB_ECC_EN, HINFC504_DMA_PARA);
211 else
212 hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN
213 | HINFC504_DMA_PARA_OOB_RW_EN
214 | HINFC504_DMA_PARA_DATA_EDC_EN
215 | HINFC504_DMA_PARA_OOB_EDC_EN
216 | HINFC504_DMA_PARA_DATA_ECC_EN
217 | HINFC504_DMA_PARA_OOB_ECC_EN, HINFC504_DMA_PARA);
218
219 }
220
221 val = (HINFC504_DMA_CTRL_DMA_START | HINFC504_DMA_CTRL_BURST4_EN
222 | HINFC504_DMA_CTRL_BURST8_EN | HINFC504_DMA_CTRL_BURST16_EN
223 | HINFC504_DMA_CTRL_DATA_AREA_EN | HINFC504_DMA_CTRL_OOB_AREA_EN
224 | ((host->addr_cycle == 4 ? 1 : 0)
225 << HINFC504_DMA_CTRL_ADDR_NUM_SHIFT)
226 | ((host->chipselect & HINFC504_DMA_CTRL_CS_MASK)
227 << HINFC504_DMA_CTRL_CS_SHIFT));
228
229 if (todev)
230 val |= HINFC504_DMA_CTRL_WE;
231
232 init_completion(&host->cmd_complete);
233
234 hinfc_write(host, val, HINFC504_DMA_CTRL);
235 ret = wait_for_completion_timeout(&host->cmd_complete,
236 HINFC504_NFC_DMA_TIMEOUT);
237
238 if (!ret) {
239 dev_err(host->dev, "DMA operation(irq) timeout!\n");
240 /* sanity check */
241 val = hinfc_read(host, HINFC504_DMA_CTRL);
242 if (!(val & HINFC504_DMA_CTRL_DMA_START))
243 dev_err(host->dev, "DMA is already done but without irq ACK!\n");
244 else
245 dev_err(host->dev, "DMA is really timeout!\n");
246 }
247}
248
249static int hisi_nfc_send_cmd_pageprog(struct hinfc_host *host)
250{
251 host->addr_value[0] &= 0xffff0000;
252
253 hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
254 hinfc_write(host, host->addr_value[1], HINFC504_ADDRH);
255 hinfc_write(host, NAND_CMD_PAGEPROG << 8 | NAND_CMD_SEQIN,
256 HINFC504_CMD);
257
258 hisi_nfc_dma_transfer(host, 1);
259
260 return 0;
261}
262
263static int hisi_nfc_send_cmd_readstart(struct hinfc_host *host)
264{
265 struct mtd_info *mtd = &host->mtd;
266
267 if ((host->addr_value[0] == host->cache_addr_value[0]) &&
268 (host->addr_value[1] == host->cache_addr_value[1]))
269 return 0;
270
271 host->addr_value[0] &= 0xffff0000;
272
273 hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
274 hinfc_write(host, host->addr_value[1], HINFC504_ADDRH);
275 hinfc_write(host, NAND_CMD_READSTART << 8 | NAND_CMD_READ0,
276 HINFC504_CMD);
277
278 hinfc_write(host, 0, HINFC504_LOG_READ_ADDR);
279 hinfc_write(host, mtd->writesize + mtd->oobsize,
280 HINFC504_LOG_READ_LEN);
281
282 hisi_nfc_dma_transfer(host, 0);
283
284 host->cache_addr_value[0] = host->addr_value[0];
285 host->cache_addr_value[1] = host->addr_value[1];
286
287 return 0;
288}
289
290static int hisi_nfc_send_cmd_erase(struct hinfc_host *host)
291{
292 hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
293 hinfc_write(host, (NAND_CMD_ERASE2 << 8) | NAND_CMD_ERASE1,
294 HINFC504_CMD);
295
296 hinfc_write(host, HINFC504_OP_WAIT_READY_EN
297 | HINFC504_OP_CMD2_EN
298 | HINFC504_OP_CMD1_EN
299 | HINFC504_OP_ADDR_EN
300 | ((host->chipselect & HINFC504_OP_NF_CS_MASK)
301 << HINFC504_OP_NF_CS_SHIFT)
302 | ((host->addr_cycle & HINFC504_OP_ADDR_CYCLE_MASK)
303 << HINFC504_OP_ADDR_CYCLE_SHIFT),
304 HINFC504_OP);
305
306 wait_controller_finished(host);
307
308 return 0;
309}
310
311static int hisi_nfc_send_cmd_readid(struct hinfc_host *host)
312{
313 hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM);
314 hinfc_write(host, NAND_CMD_READID, HINFC504_CMD);
315 hinfc_write(host, 0, HINFC504_ADDRL);
316
317 hinfc_write(host, HINFC504_OP_CMD1_EN | HINFC504_OP_ADDR_EN
318 | HINFC504_OP_READ_DATA_EN
319 | ((host->chipselect & HINFC504_OP_NF_CS_MASK)
320 << HINFC504_OP_NF_CS_SHIFT)
321 | 1 << HINFC504_OP_ADDR_CYCLE_SHIFT, HINFC504_OP);
322
323 wait_controller_finished(host);
324
325 return 0;
326}
327
328static int hisi_nfc_send_cmd_status(struct hinfc_host *host)
329{
330 hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM);
331 hinfc_write(host, NAND_CMD_STATUS, HINFC504_CMD);
332 hinfc_write(host, HINFC504_OP_CMD1_EN
333 | HINFC504_OP_READ_DATA_EN
334 | ((host->chipselect & HINFC504_OP_NF_CS_MASK)
335 << HINFC504_OP_NF_CS_SHIFT),
336 HINFC504_OP);
337
338 wait_controller_finished(host);
339
340 return 0;
341}
342
343static int hisi_nfc_send_cmd_reset(struct hinfc_host *host, int chipselect)
344{
345 hinfc_write(host, NAND_CMD_RESET, HINFC504_CMD);
346
347 hinfc_write(host, HINFC504_OP_CMD1_EN
348 | ((chipselect & HINFC504_OP_NF_CS_MASK)
349 << HINFC504_OP_NF_CS_SHIFT)
350 | HINFC504_OP_WAIT_READY_EN,
351 HINFC504_OP);
352
353 wait_controller_finished(host);
354
355 return 0;
356}
357
358static void hisi_nfc_select_chip(struct mtd_info *mtd, int chipselect)
359{
360 struct nand_chip *chip = mtd->priv;
361 struct hinfc_host *host = chip->priv;
362
363 if (chipselect < 0)
364 return;
365
366 host->chipselect = chipselect;
367}
368
369static uint8_t hisi_nfc_read_byte(struct mtd_info *mtd)
370{
371 struct nand_chip *chip = mtd->priv;
372 struct hinfc_host *host = chip->priv;
373
374 if (host->command == NAND_CMD_STATUS)
375 return *(uint8_t *)(host->mmio);
376
377 host->offset++;
378
379 if (host->command == NAND_CMD_READID)
380 return *(uint8_t *)(host->mmio + host->offset - 1);
381
382 return *(uint8_t *)(host->buffer + host->offset - 1);
383}
384
385static u16 hisi_nfc_read_word(struct mtd_info *mtd)
386{
387 struct nand_chip *chip = mtd->priv;
388 struct hinfc_host *host = chip->priv;
389
390 host->offset += 2;
391 return *(u16 *)(host->buffer + host->offset - 2);
392}
393
394static void
395hisi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
396{
397 struct nand_chip *chip = mtd->priv;
398 struct hinfc_host *host = chip->priv;
399
400 memcpy(host->buffer + host->offset, buf, len);
401 host->offset += len;
402}
403
404static void hisi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
405{
406 struct nand_chip *chip = mtd->priv;
407 struct hinfc_host *host = chip->priv;
408
409 memcpy(buf, host->buffer + host->offset, len);
410 host->offset += len;
411}
412
413static void set_addr(struct mtd_info *mtd, int column, int page_addr)
414{
415 struct nand_chip *chip = mtd->priv;
416 struct hinfc_host *host = chip->priv;
417 unsigned int command = host->command;
418
419 host->addr_cycle = 0;
420 host->addr_value[0] = 0;
421 host->addr_value[1] = 0;
422
423 /* Serially input address */
424 if (column != -1) {
425 /* Adjust columns for 16 bit buswidth */
426 if (chip->options & NAND_BUSWIDTH_16 &&
427 !nand_opcode_8bits(command))
428 column >>= 1;
429
430 host->addr_value[0] = column & 0xffff;
431 host->addr_cycle = 2;
432 }
433 if (page_addr != -1) {
434 host->addr_value[0] |= (page_addr & 0xffff)
435 << (host->addr_cycle * 8);
436 host->addr_cycle += 2;
437 /* One more address cycle for devices > 128MiB */
438 if (chip->chipsize > (128 << 20)) {
439 host->addr_cycle += 1;
440 if (host->command == NAND_CMD_ERASE1)
441 host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16;
442 else
443 host->addr_value[1] |= ((page_addr >> 16) & 0xff);
444 }
445 }
446}
447
448static void hisi_nfc_cmdfunc(struct mtd_info *mtd, unsigned command, int column,
449 int page_addr)
450{
451 struct nand_chip *chip = mtd->priv;
452 struct hinfc_host *host = chip->priv;
453 int is_cache_invalid = 1;
454 unsigned int flag = 0;
455
456 host->command = command;
457
458 switch (command) {
459 case NAND_CMD_READ0:
460 case NAND_CMD_READOOB:
461 if (command == NAND_CMD_READ0)
462 host->offset = column;
463 else
464 host->offset = column + mtd->writesize;
465
466 is_cache_invalid = 0;
467 set_addr(mtd, column, page_addr);
468 hisi_nfc_send_cmd_readstart(host);
469 break;
470
471 case NAND_CMD_SEQIN:
472 host->offset = column;
473 set_addr(mtd, column, page_addr);
474 break;
475
476 case NAND_CMD_ERASE1:
477 set_addr(mtd, column, page_addr);
478 break;
479
480 case NAND_CMD_PAGEPROG:
481 hisi_nfc_send_cmd_pageprog(host);
482 break;
483
484 case NAND_CMD_ERASE2:
485 hisi_nfc_send_cmd_erase(host);
486 break;
487
488 case NAND_CMD_READID:
489 host->offset = column;
490 memset(host->mmio, 0, 0x10);
491 hisi_nfc_send_cmd_readid(host);
492 break;
493
494 case NAND_CMD_STATUS:
495 flag = hinfc_read(host, HINFC504_CON);
496 if (chip->ecc.mode == NAND_ECC_HW)
497 hinfc_write(host,
498 flag & ~(HINFC504_CON_ECCTYPE_MASK <<
499 HINFC504_CON_ECCTYPE_SHIFT), HINFC504_CON);
500
501 host->offset = 0;
502 memset(host->mmio, 0, 0x10);
503 hisi_nfc_send_cmd_status(host);
504 hinfc_write(host, flag, HINFC504_CON);
505 break;
506
507 case NAND_CMD_RESET:
508 hisi_nfc_send_cmd_reset(host, host->chipselect);
509 break;
510
511 default:
512 dev_err(host->dev, "Error: unsupported cmd(cmd=%x, col=%x, page=%x)\n",
513 command, column, page_addr);
514 }
515
516 if (is_cache_invalid) {
517 host->cache_addr_value[0] = ~0;
518 host->cache_addr_value[1] = ~0;
519 }
520}
521
522static irqreturn_t hinfc_irq_handle(int irq, void *devid)
523{
524 struct hinfc_host *host = devid;
525 unsigned int flag;
526
527 flag = hinfc_read(host, HINFC504_INTS);
528 /* store interrupts state */
529 host->irq_status |= flag;
530
531 if (flag & HINFC504_INTS_DMA) {
532 hinfc_write(host, HINFC504_INTCLR_DMA, HINFC504_INTCLR);
533 complete(&host->cmd_complete);
534 } else if (flag & HINFC504_INTS_CE) {
535 hinfc_write(host, HINFC504_INTCLR_CE, HINFC504_INTCLR);
536 } else if (flag & HINFC504_INTS_UE) {
537 hinfc_write(host, HINFC504_INTCLR_UE, HINFC504_INTCLR);
538 }
539
540 return IRQ_HANDLED;
541}
542
543static int hisi_nand_read_page_hwecc(struct mtd_info *mtd,
544 struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
545{
546 struct hinfc_host *host = chip->priv;
547 int max_bitflips = 0, stat = 0, stat_max = 0, status_ecc;
548 int stat_1, stat_2;
549
550 chip->read_buf(mtd, buf, mtd->writesize);
551 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
552
553 /* errors which can not be corrected by ECC */
554 if (host->irq_status & HINFC504_INTS_UE) {
555 mtd->ecc_stats.failed++;
556 } else if (host->irq_status & HINFC504_INTS_CE) {
557 /* TODO: need add other ECC modes! */
558 switch (chip->ecc.strength) {
559 case 16:
560 status_ecc = hinfc_read(host, HINFC504_ECC_STATUS) >>
561 HINFC504_ECC_16_BIT_SHIFT & 0x0fff;
562 stat_2 = status_ecc & 0x3f;
563 stat_1 = status_ecc >> 6 & 0x3f;
564 stat = stat_1 + stat_2;
565 stat_max = max_t(int, stat_1, stat_2);
566 }
567 mtd->ecc_stats.corrected += stat;
568 max_bitflips = max_t(int, max_bitflips, stat_max);
569 }
570 host->irq_status = 0;
571
572 return max_bitflips;
573}
574
575static int hisi_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
576 int page)
577{
578 struct hinfc_host *host = chip->priv;
579
580 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
581 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
582
583 if (host->irq_status & HINFC504_INTS_UE) {
584 host->irq_status = 0;
585 return -EBADMSG;
586 }
587
588 host->irq_status = 0;
589 return 0;
590}
591
592static int hisi_nand_write_page_hwecc(struct mtd_info *mtd,
593 struct nand_chip *chip, const uint8_t *buf, int oob_required)
594{
595 chip->write_buf(mtd, buf, mtd->writesize);
596 if (oob_required)
597 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
598
599 return 0;
600}
601
602static void hisi_nfc_host_init(struct hinfc_host *host)
603{
604 struct nand_chip *chip = &host->chip;
605 unsigned int flag = 0;
606
607 host->version = hinfc_read(host, HINFC_VERSION);
608 host->addr_cycle = 0;
609 host->addr_value[0] = 0;
610 host->addr_value[1] = 0;
611 host->cache_addr_value[0] = ~0;
612 host->cache_addr_value[1] = ~0;
613 host->chipselect = 0;
614
615 /* default page size: 2K, ecc_none. need modify */
616 flag = HINFC504_CON_OP_MODE_NORMAL | HINFC504_CON_READY_BUSY_SEL
617 | ((0x001 & HINFC504_CON_PAGESIZE_MASK)
618 << HINFC504_CON_PAGEISZE_SHIFT)
619 | ((0x0 & HINFC504_CON_ECCTYPE_MASK)
620 << HINFC504_CON_ECCTYPE_SHIFT)
621 | ((chip->options & NAND_BUSWIDTH_16) ?
622 HINFC504_CON_BUS_WIDTH : 0);
623 hinfc_write(host, flag, HINFC504_CON);
624
625 memset(host->mmio, 0xff, HINFC504_BUFFER_BASE_ADDRESS_LEN);
626
627 hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH,
628 HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH);
629
630 /* enable DMA irq */
631 hinfc_write(host, HINFC504_INTEN_DMA, HINFC504_INTEN);
632}
633
634static struct nand_ecclayout nand_ecc_2K_16bits = {
635 .oobavail = 6,
636 .oobfree = { {2, 6} },
637};
638
639static int hisi_nfc_ecc_probe(struct hinfc_host *host)
640{
641 unsigned int flag;
642 int size, strength, ecc_bits;
643 struct device *dev = host->dev;
644 struct nand_chip *chip = &host->chip;
645 struct mtd_info *mtd = &host->mtd;
646 struct device_node *np = host->dev->of_node;
647
648 size = of_get_nand_ecc_step_size(np);
649 strength = of_get_nand_ecc_strength(np);
650 if (size != 1024) {
651 dev_err(dev, "error ecc size: %d\n", size);
652 return -EINVAL;
653 }
654
655 if ((size == 1024) && ((strength != 8) && (strength != 16) &&
656 (strength != 24) && (strength != 40))) {
657 dev_err(dev, "ecc size and strength do not match\n");
658 return -EINVAL;
659 }
660
661 chip->ecc.size = size;
662 chip->ecc.strength = strength;
663
664 chip->ecc.read_page = hisi_nand_read_page_hwecc;
665 chip->ecc.read_oob = hisi_nand_read_oob;
666 chip->ecc.write_page = hisi_nand_write_page_hwecc;
667
668 switch (chip->ecc.strength) {
669 case 16:
670 ecc_bits = 6;
671 if (mtd->writesize == 2048)
672 chip->ecc.layout = &nand_ecc_2K_16bits;
673
674 /* TODO: add more page size support */
675 break;
676
677 /* TODO: add more ecc strength support */
678 default:
679 dev_err(dev, "not support strength: %d\n", chip->ecc.strength);
680 return -EINVAL;
681 }
682
683 flag = hinfc_read(host, HINFC504_CON);
684 /* add ecc type configure */
685 flag |= ((ecc_bits & HINFC504_CON_ECCTYPE_MASK)
686 << HINFC504_CON_ECCTYPE_SHIFT);
687 hinfc_write(host, flag, HINFC504_CON);
688
689 /* enable ecc irq */
690 flag = hinfc_read(host, HINFC504_INTEN) & 0xfff;
691 hinfc_write(host, flag | HINFC504_INTEN_UE | HINFC504_INTEN_CE,
692 HINFC504_INTEN);
693
694 return 0;
695}
696
697static int hisi_nfc_probe(struct platform_device *pdev)
698{
699 int ret = 0, irq, buswidth, flag, max_chips = HINFC504_MAX_CHIP;
700 struct device *dev = &pdev->dev;
701 struct hinfc_host *host;
702 struct nand_chip *chip;
703 struct mtd_info *mtd;
704 struct resource *res;
705 struct device_node *np = dev->of_node;
706 struct mtd_part_parser_data ppdata;
707
708 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
709 if (!host)
710 return -ENOMEM;
711 host->dev = dev;
712
713 platform_set_drvdata(pdev, host);
714 chip = &host->chip;
715 mtd = &host->mtd;
716
717 irq = platform_get_irq(pdev, 0);
718 if (irq < 0) {
719 dev_err(dev, "no IRQ resource defined\n");
720 ret = -ENXIO;
721 goto err_res;
722 }
723
724 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
725 host->iobase = devm_ioremap_resource(dev, res);
726 if (IS_ERR(host->iobase)) {
727 ret = PTR_ERR(host->iobase);
728 goto err_res;
729 }
730
731 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
732 host->mmio = devm_ioremap_resource(dev, res);
733 if (IS_ERR(host->mmio)) {
734 ret = PTR_ERR(host->mmio);
735 dev_err(dev, "devm_ioremap_resource[1] fail\n");
736 goto err_res;
737 }
738
739 mtd->priv = chip;
740 mtd->owner = THIS_MODULE;
741 mtd->name = "hisi_nand";
742 mtd->dev.parent = &pdev->dev;
743
744 chip->priv = host;
745 chip->cmdfunc = hisi_nfc_cmdfunc;
746 chip->select_chip = hisi_nfc_select_chip;
747 chip->read_byte = hisi_nfc_read_byte;
748 chip->read_word = hisi_nfc_read_word;
749 chip->write_buf = hisi_nfc_write_buf;
750 chip->read_buf = hisi_nfc_read_buf;
751 chip->chip_delay = HINFC504_CHIP_DELAY;
752
753 chip->ecc.mode = of_get_nand_ecc_mode(np);
754
755 buswidth = of_get_nand_bus_width(np);
756 if (buswidth == 16)
757 chip->options |= NAND_BUSWIDTH_16;
758
759 hisi_nfc_host_init(host);
760
761 ret = devm_request_irq(dev, irq, hinfc_irq_handle, IRQF_DISABLED,
762 "nandc", host);
763 if (ret) {
764 dev_err(dev, "failed to request IRQ\n");
765 goto err_res;
766 }
767
768 ret = nand_scan_ident(mtd, max_chips, NULL);
769 if (ret) {
770 ret = -ENODEV;
771 goto err_res;
772 }
773
774 host->buffer = dmam_alloc_coherent(dev, mtd->writesize + mtd->oobsize,
775 &host->dma_buffer, GFP_KERNEL);
776 if (!host->buffer) {
777 ret = -ENOMEM;
778 goto err_res;
779 }
780
781 host->dma_oob = host->dma_buffer + mtd->writesize;
782 memset(host->buffer, 0xff, mtd->writesize + mtd->oobsize);
783
784 flag = hinfc_read(host, HINFC504_CON);
785 flag &= ~(HINFC504_CON_PAGESIZE_MASK << HINFC504_CON_PAGEISZE_SHIFT);
786 switch (mtd->writesize) {
787 case 2048:
788 flag |= (0x001 << HINFC504_CON_PAGEISZE_SHIFT); break;
789 /*
790 * TODO: add more pagesize support,
791 * default pagesize has been set in hisi_nfc_host_init
792 */
793 default:
794 dev_err(dev, "NON-2KB page size nand flash\n");
795 ret = -EINVAL;
796 goto err_res;
797 }
798 hinfc_write(host, flag, HINFC504_CON);
799
800 if (chip->ecc.mode == NAND_ECC_HW)
801 hisi_nfc_ecc_probe(host);
802
803 ret = nand_scan_tail(mtd);
804 if (ret) {
805 dev_err(dev, "nand_scan_tail failed: %d\n", ret);
806 goto err_res;
807 }
808
809 ppdata.of_node = np;
810 ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
811 if (ret) {
812 dev_err(dev, "Err MTD partition=%d\n", ret);
813 goto err_mtd;
814 }
815
816 return 0;
817
818err_mtd:
819 nand_release(mtd);
820err_res:
821 return ret;
822}
823
824static int hisi_nfc_remove(struct platform_device *pdev)
825{
826 struct hinfc_host *host = platform_get_drvdata(pdev);
827 struct mtd_info *mtd = &host->mtd;
828
829 nand_release(mtd);
830
831 return 0;
832}
833
834#ifdef CONFIG_PM_SLEEP
835static int hisi_nfc_suspend(struct device *dev)
836{
837 struct hinfc_host *host = dev_get_drvdata(dev);
838 unsigned long timeout = jiffies + HINFC504_NFC_PM_TIMEOUT;
839
840 while (time_before(jiffies, timeout)) {
841 if (((hinfc_read(host, HINFC504_STATUS) & 0x1) == 0x0) &&
842 (hinfc_read(host, HINFC504_DMA_CTRL) &
843 HINFC504_DMA_CTRL_DMA_START)) {
844 cond_resched();
845 return 0;
846 }
847 }
848
849 dev_err(host->dev, "nand controller suspend timeout.\n");
850
851 return -EAGAIN;
852}
853
854static int hisi_nfc_resume(struct device *dev)
855{
856 int cs;
857 struct hinfc_host *host = dev_get_drvdata(dev);
858 struct nand_chip *chip = &host->chip;
859
860 for (cs = 0; cs < chip->numchips; cs++)
861 hisi_nfc_send_cmd_reset(host, cs);
862 hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH,
863 HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH);
864
865 return 0;
866}
867#endif
868static SIMPLE_DEV_PM_OPS(hisi_nfc_pm_ops, hisi_nfc_suspend, hisi_nfc_resume);
869
870static const struct of_device_id nfc_id_table[] = {
871 { .compatible = "hisilicon,504-nfc" },
872 {}
873};
874MODULE_DEVICE_TABLE(of, nfc_id_table);
875
876static struct platform_driver hisi_nfc_driver = {
877 .driver = {
878 .name = "hisi_nand",
879 .of_match_table = nfc_id_table,
880 .pm = &hisi_nfc_pm_ops,
881 },
882 .probe = hisi_nfc_probe,
883 .remove = hisi_nfc_remove,
884};
885
886module_platform_driver(hisi_nfc_driver);
887
888MODULE_LICENSE("GPL");
889MODULE_AUTHOR("Zhou Wang");
890MODULE_AUTHOR("Zhiyong Cai");
891MODULE_DESCRIPTION("Hisilicon Nand Flash Controller Driver");
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index 1633ec9c5108..ebf2cce04cba 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -69,7 +69,7 @@ struct jz_nand {
69 69
70 int selected_bank; 70 int selected_bank;
71 71
72 struct jz_nand_platform_data *pdata; 72 struct gpio_desc *busy_gpio;
73 bool is_reading; 73 bool is_reading;
74}; 74};
75 75
@@ -131,7 +131,7 @@ static void jz_nand_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
131static int jz_nand_dev_ready(struct mtd_info *mtd) 131static int jz_nand_dev_ready(struct mtd_info *mtd)
132{ 132{
133 struct jz_nand *nand = mtd_to_jz_nand(mtd); 133 struct jz_nand *nand = mtd_to_jz_nand(mtd);
134 return gpio_get_value_cansleep(nand->pdata->busy_gpio); 134 return gpiod_get_value_cansleep(nand->busy_gpio);
135} 135}
136 136
137static void jz_nand_hwctl(struct mtd_info *mtd, int mode) 137static void jz_nand_hwctl(struct mtd_info *mtd, int mode)
@@ -423,14 +423,12 @@ static int jz_nand_probe(struct platform_device *pdev)
423 if (ret) 423 if (ret)
424 goto err_free; 424 goto err_free;
425 425
426 if (pdata && gpio_is_valid(pdata->busy_gpio)) { 426 nand->busy_gpio = devm_gpiod_get_optional(&pdev->dev, "busy", GPIOD_IN);
427 ret = gpio_request(pdata->busy_gpio, "NAND busy pin"); 427 if (IS_ERR(nand->busy_gpio)) {
428 if (ret) { 428 ret = PTR_ERR(nand->busy_gpio);
429 dev_err(&pdev->dev, 429 dev_err(&pdev->dev, "Failed to request busy gpio %d\n",
430 "Failed to request busy gpio %d: %d\n", 430 ret);
431 pdata->busy_gpio, ret); 431 goto err_iounmap_mmio;
432 goto err_iounmap_mmio;
433 }
434 } 432 }
435 433
436 mtd = &nand->mtd; 434 mtd = &nand->mtd;
@@ -454,10 +452,9 @@ static int jz_nand_probe(struct platform_device *pdev)
454 chip->cmd_ctrl = jz_nand_cmd_ctrl; 452 chip->cmd_ctrl = jz_nand_cmd_ctrl;
455 chip->select_chip = jz_nand_select_chip; 453 chip->select_chip = jz_nand_select_chip;
456 454
457 if (pdata && gpio_is_valid(pdata->busy_gpio)) 455 if (nand->busy_gpio)
458 chip->dev_ready = jz_nand_dev_ready; 456 chip->dev_ready = jz_nand_dev_ready;
459 457
460 nand->pdata = pdata;
461 platform_set_drvdata(pdev, nand); 458 platform_set_drvdata(pdev, nand);
462 459
463 /* We are going to autodetect NAND chips in the banks specified in the 460 /* We are going to autodetect NAND chips in the banks specified in the
@@ -496,7 +493,7 @@ static int jz_nand_probe(struct platform_device *pdev)
496 } 493 }
497 if (chipnr == 0) { 494 if (chipnr == 0) {
498 dev_err(&pdev->dev, "No NAND chips found\n"); 495 dev_err(&pdev->dev, "No NAND chips found\n");
499 goto err_gpio_busy; 496 goto err_iounmap_mmio;
500 } 497 }
501 498
502 if (pdata && pdata->ident_callback) { 499 if (pdata && pdata->ident_callback) {
@@ -533,9 +530,6 @@ err_unclaim_banks:
533 nand->bank_base[bank - 1]); 530 nand->bank_base[bank - 1]);
534 } 531 }
535 writel(0, nand->base + JZ_REG_NAND_CTRL); 532 writel(0, nand->base + JZ_REG_NAND_CTRL);
536err_gpio_busy:
537 if (pdata && gpio_is_valid(pdata->busy_gpio))
538 gpio_free(pdata->busy_gpio);
539err_iounmap_mmio: 533err_iounmap_mmio:
540 jz_nand_iounmap_resource(nand->mem, nand->base); 534 jz_nand_iounmap_resource(nand->mem, nand->base);
541err_free: 535err_free:
@@ -546,7 +540,6 @@ err_free:
546static int jz_nand_remove(struct platform_device *pdev) 540static int jz_nand_remove(struct platform_device *pdev)
547{ 541{
548 struct jz_nand *nand = platform_get_drvdata(pdev); 542 struct jz_nand *nand = platform_get_drvdata(pdev);
549 struct jz_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
550 size_t i; 543 size_t i;
551 544
552 nand_release(&nand->mtd); 545 nand_release(&nand->mtd);
@@ -562,8 +555,6 @@ static int jz_nand_remove(struct platform_device *pdev)
562 gpio_free(JZ_GPIO_MEM_CS0 + bank - 1); 555 gpio_free(JZ_GPIO_MEM_CS0 + bank - 1);
563 } 556 }
564 } 557 }
565 if (pdata && gpio_is_valid(pdata->busy_gpio))
566 gpio_free(pdata->busy_gpio);
567 558
568 jz_nand_iounmap_resource(nand->mem, nand->base); 559 jz_nand_iounmap_resource(nand->mem, nand->base);
569 560
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 41585dfb206f..df7eb4ff07d1 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -157,7 +157,6 @@ static uint8_t nand_read_byte(struct mtd_info *mtd)
157 157
158/** 158/**
159 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip 159 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
160 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
161 * @mtd: MTD device structure 160 * @mtd: MTD device structure
162 * 161 *
163 * Default read function for 16bit buswidth with endianness conversion. 162 * Default read function for 16bit buswidth with endianness conversion.
@@ -1751,11 +1750,10 @@ static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1751static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1750static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1752 int page) 1751 int page)
1753{ 1752{
1754 uint8_t *buf = chip->oob_poi;
1755 int length = mtd->oobsize; 1753 int length = mtd->oobsize;
1756 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; 1754 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
1757 int eccsize = chip->ecc.size; 1755 int eccsize = chip->ecc.size;
1758 uint8_t *bufpoi = buf; 1756 uint8_t *bufpoi = chip->oob_poi;
1759 int i, toread, sndrnd = 0, pos; 1757 int i, toread, sndrnd = 0, pos;
1760 1758
1761 chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page); 1759 chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
@@ -2944,6 +2942,16 @@ static void nand_resume(struct mtd_info *mtd)
2944 __func__); 2942 __func__);
2945} 2943}
2946 2944
2945/**
2946 * nand_shutdown - [MTD Interface] Finish the current NAND operation and
2947 * prevent further operations
2948 * @mtd: MTD device structure
2949 */
2950static void nand_shutdown(struct mtd_info *mtd)
2951{
2952 nand_get_device(mtd, FL_SHUTDOWN);
2953}
2954
2947/* Set default functions */ 2955/* Set default functions */
2948static void nand_set_defaults(struct nand_chip *chip, int busw) 2956static void nand_set_defaults(struct nand_chip *chip, int busw)
2949{ 2957{
@@ -4028,22 +4036,24 @@ int nand_scan_tail(struct mtd_info *mtd)
4028 ecc->read_oob = nand_read_oob_std; 4036 ecc->read_oob = nand_read_oob_std;
4029 ecc->write_oob = nand_write_oob_std; 4037 ecc->write_oob = nand_write_oob_std;
4030 /* 4038 /*
4031 * Board driver should supply ecc.size and ecc.bytes values to 4039 * Board driver should supply ecc.size and ecc.strength values
4032 * select how many bits are correctable; see nand_bch_init() 4040 * to select how many bits are correctable. Otherwise, default
4033 * for details. Otherwise, default to 4 bits for large page 4041 * to 4 bits for large page devices.
4034 * devices.
4035 */ 4042 */
4036 if (!ecc->size && (mtd->oobsize >= 64)) { 4043 if (!ecc->size && (mtd->oobsize >= 64)) {
4037 ecc->size = 512; 4044 ecc->size = 512;
4038 ecc->bytes = DIV_ROUND_UP(13 * ecc->strength, 8); 4045 ecc->strength = 4;
4039 } 4046 }
4047
4048 /* See nand_bch_init() for details. */
4049 ecc->bytes = DIV_ROUND_UP(
4050 ecc->strength * fls(8 * ecc->size), 8);
4040 ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes, 4051 ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes,
4041 &ecc->layout); 4052 &ecc->layout);
4042 if (!ecc->priv) { 4053 if (!ecc->priv) {
4043 pr_warn("BCH ECC initialization failed!\n"); 4054 pr_warn("BCH ECC initialization failed!\n");
4044 BUG(); 4055 BUG();
4045 } 4056 }
4046 ecc->strength = ecc->bytes * 8 / fls(8 * ecc->size);
4047 break; 4057 break;
4048 4058
4049 case NAND_ECC_NONE: 4059 case NAND_ECC_NONE:
@@ -4146,6 +4156,7 @@ int nand_scan_tail(struct mtd_info *mtd)
4146 mtd->_unlock = NULL; 4156 mtd->_unlock = NULL;
4147 mtd->_suspend = nand_suspend; 4157 mtd->_suspend = nand_suspend;
4148 mtd->_resume = nand_resume; 4158 mtd->_resume = nand_resume;
4159 mtd->_reboot = nand_shutdown;
4149 mtd->_block_isreserved = nand_block_isreserved; 4160 mtd->_block_isreserved = nand_block_isreserved;
4150 mtd->_block_isbad = nand_block_isbad; 4161 mtd->_block_isbad = nand_block_isbad;
4151 mtd->_block_markbad = nand_block_markbad; 4162 mtd->_block_markbad = nand_block_markbad;
@@ -4161,7 +4172,7 @@ int nand_scan_tail(struct mtd_info *mtd)
4161 * properly set. 4172 * properly set.
4162 */ 4173 */
4163 if (!mtd->bitflip_threshold) 4174 if (!mtd->bitflip_threshold)
4164 mtd->bitflip_threshold = mtd->ecc_strength; 4175 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
4165 4176
4166 /* Check, if we should skip the bad block table scan */ 4177 /* Check, if we should skip the bad block table scan */
4167 if (chip->options & NAND_SKIP_BBTSCAN) 4178 if (chip->options & NAND_SKIP_BBTSCAN)
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index ab5bbf567439..f2324271b94e 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -245,7 +245,6 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
245#define STATE_DATAOUT 0x00001000 /* waiting for page data output */ 245#define STATE_DATAOUT 0x00001000 /* waiting for page data output */
246#define STATE_DATAOUT_ID 0x00002000 /* waiting for ID bytes output */ 246#define STATE_DATAOUT_ID 0x00002000 /* waiting for ID bytes output */
247#define STATE_DATAOUT_STATUS 0x00003000 /* waiting for status output */ 247#define STATE_DATAOUT_STATUS 0x00003000 /* waiting for status output */
248#define STATE_DATAOUT_STATUS_M 0x00004000 /* waiting for multi-plane status output */
249#define STATE_DATAOUT_MASK 0x00007000 /* data output states mask */ 248#define STATE_DATAOUT_MASK 0x00007000 /* data output states mask */
250 249
251/* Previous operation is done, ready to accept new requests */ 250/* Previous operation is done, ready to accept new requests */
@@ -269,7 +268,6 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
269#define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */ 268#define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */
270#define OPT_PAGE512 0x00000002 /* 512-byte page chips */ 269#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
271#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */ 270#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
272#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
273#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */ 271#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
274#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */ 272#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
275#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */ 273#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
@@ -1096,8 +1094,6 @@ static char *get_state_name(uint32_t state)
1096 return "STATE_DATAOUT_ID"; 1094 return "STATE_DATAOUT_ID";
1097 case STATE_DATAOUT_STATUS: 1095 case STATE_DATAOUT_STATUS:
1098 return "STATE_DATAOUT_STATUS"; 1096 return "STATE_DATAOUT_STATUS";
1099 case STATE_DATAOUT_STATUS_M:
1100 return "STATE_DATAOUT_STATUS_M";
1101 case STATE_READY: 1097 case STATE_READY:
1102 return "STATE_READY"; 1098 return "STATE_READY";
1103 case STATE_UNKNOWN: 1099 case STATE_UNKNOWN:
@@ -1865,7 +1861,6 @@ static void switch_state(struct nandsim *ns)
1865 break; 1861 break;
1866 1862
1867 case STATE_DATAOUT_STATUS: 1863 case STATE_DATAOUT_STATUS:
1868 case STATE_DATAOUT_STATUS_M:
1869 ns->regs.count = ns->regs.num = 0; 1864 ns->regs.count = ns->regs.num = 0;
1870 break; 1865 break;
1871 1866
@@ -2005,7 +2000,6 @@ static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
2005 } 2000 }
2006 2001
2007 if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS 2002 if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS
2008 || NS_STATE(ns->state) == STATE_DATAOUT_STATUS_M
2009 || NS_STATE(ns->state) == STATE_DATAOUT) { 2003 || NS_STATE(ns->state) == STATE_DATAOUT) {
2010 int row = ns->regs.row; 2004 int row = ns->regs.row;
2011 2005
@@ -2343,6 +2337,7 @@ static int __init ns_init_module(void)
2343 } 2337 }
2344 chip->ecc.mode = NAND_ECC_SOFT_BCH; 2338 chip->ecc.mode = NAND_ECC_SOFT_BCH;
2345 chip->ecc.size = 512; 2339 chip->ecc.size = 512;
2340 chip->ecc.strength = bch;
2346 chip->ecc.bytes = eccbytes; 2341 chip->ecc.bytes = eccbytes;
2347 NS_INFO("using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size); 2342 NS_INFO("using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size);
2348 } 2343 }
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 63f858e6bf39..60fa89939c24 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1048,10 +1048,9 @@ static int omap_dev_ready(struct mtd_info *mtd)
1048 * @mtd: MTD device structure 1048 * @mtd: MTD device structure
1049 * @mode: Read/Write mode 1049 * @mode: Read/Write mode
1050 * 1050 *
1051 * When using BCH, sector size is hardcoded to 512 bytes. 1051 * When using BCH with SW correction (i.e. no ELM), sector size is set
1052 * Using wrapping mode 6 both for reading and writing if ELM module not uses 1052 * to 512 bytes and we use BCH_WRAPMODE_6 wrapping mode
1053 * for error correction. 1053 * for both reading and writing with:
1054 * On writing,
1055 * eccsize0 = 0 (no additional protected byte in spare area) 1054 * eccsize0 = 0 (no additional protected byte in spare area)
1056 * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area) 1055 * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
1057 */ 1056 */
@@ -1071,15 +1070,9 @@ static void __maybe_unused omap_enable_hwecc_bch(struct mtd_info *mtd, int mode)
1071 case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: 1070 case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1072 bch_type = 0; 1071 bch_type = 0;
1073 nsectors = 1; 1072 nsectors = 1;
1074 if (mode == NAND_ECC_READ) { 1073 wr_mode = BCH_WRAPMODE_6;
1075 wr_mode = BCH_WRAPMODE_6; 1074 ecc_size0 = BCH_ECC_SIZE0;
1076 ecc_size0 = BCH_ECC_SIZE0; 1075 ecc_size1 = BCH_ECC_SIZE1;
1077 ecc_size1 = BCH_ECC_SIZE1;
1078 } else {
1079 wr_mode = BCH_WRAPMODE_6;
1080 ecc_size0 = BCH_ECC_SIZE0;
1081 ecc_size1 = BCH_ECC_SIZE1;
1082 }
1083 break; 1076 break;
1084 case OMAP_ECC_BCH4_CODE_HW: 1077 case OMAP_ECC_BCH4_CODE_HW:
1085 bch_type = 0; 1078 bch_type = 0;
@@ -1097,15 +1090,9 @@ static void __maybe_unused omap_enable_hwecc_bch(struct mtd_info *mtd, int mode)
1097 case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: 1090 case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1098 bch_type = 1; 1091 bch_type = 1;
1099 nsectors = 1; 1092 nsectors = 1;
1100 if (mode == NAND_ECC_READ) { 1093 wr_mode = BCH_WRAPMODE_6;
1101 wr_mode = BCH_WRAPMODE_6; 1094 ecc_size0 = BCH_ECC_SIZE0;
1102 ecc_size0 = BCH_ECC_SIZE0; 1095 ecc_size1 = BCH_ECC_SIZE1;
1103 ecc_size1 = BCH_ECC_SIZE1;
1104 } else {
1105 wr_mode = BCH_WRAPMODE_6;
1106 ecc_size0 = BCH_ECC_SIZE0;
1107 ecc_size1 = BCH_ECC_SIZE1;
1108 }
1109 break; 1096 break;
1110 case OMAP_ECC_BCH8_CODE_HW: 1097 case OMAP_ECC_BCH8_CODE_HW:
1111 bch_type = 1; 1098 bch_type = 1;
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index ccaa8e283388..6f93b2990d25 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -1110,8 +1110,6 @@ static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
1110 1110
1111 switch (ecc->mode) { 1111 switch (ecc->mode) {
1112 case NAND_ECC_SOFT_BCH: 1112 case NAND_ECC_SOFT_BCH:
1113 ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * ecc->size),
1114 8);
1115 break; 1113 break;
1116 case NAND_ECC_HW: 1114 case NAND_ECC_HW:
1117 ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np); 1115 ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np);
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index 51b9d6af307f..a5dfbfbebfca 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -89,9 +89,10 @@ static int find_boot_record(struct NFTLrecord *nftl)
89 } 89 }
90 90
91 /* To be safer with BIOS, also use erase mark as discriminant */ 91 /* To be safer with BIOS, also use erase mark as discriminant */
92 if ((ret = nftl_read_oob(mtd, block * nftl->EraseSize + 92 ret = nftl_read_oob(mtd, block * nftl->EraseSize +
93 SECTORSIZE + 8, 8, &retlen, 93 SECTORSIZE + 8, 8, &retlen,
94 (char *)&h1) < 0)) { 94 (char *)&h1);
95 if (ret < 0) {
95 printk(KERN_WARNING "ANAND header found at 0x%x in mtd%d, but OOB data read failed (err %d)\n", 96 printk(KERN_WARNING "ANAND header found at 0x%x in mtd%d, but OOB data read failed (err %d)\n",
96 block * nftl->EraseSize, nftl->mbd.mtd->index, ret); 97 block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
97 continue; 98 continue;
@@ -109,8 +110,9 @@ static int find_boot_record(struct NFTLrecord *nftl)
109 } 110 }
110 111
111 /* Finally reread to check ECC */ 112 /* Finally reread to check ECC */
112 if ((ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE, 113 ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE,
113 &retlen, buf) < 0)) { 114 &retlen, buf);
115 if (ret < 0) {
114 printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but ECC read failed (err %d)\n", 116 printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but ECC read failed (err %d)\n",
115 block * nftl->EraseSize, nftl->mbd.mtd->index, ret); 117 block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
116 continue; 118 continue;
@@ -228,9 +230,11 @@ device is already correct.
228The new DiskOnChip driver already scanned the bad block table. Just query it. 230The new DiskOnChip driver already scanned the bad block table. Just query it.
229 if ((i & (SECTORSIZE - 1)) == 0) { 231 if ((i & (SECTORSIZE - 1)) == 0) {
230 /* read one sector for every SECTORSIZE of blocks */ 232 /* read one sector for every SECTORSIZE of blocks */
231 if ((ret = mtd->read(nftl->mbd.mtd, block * nftl->EraseSize + 233 ret = mtd->read(nftl->mbd.mtd,
232 i + SECTORSIZE, SECTORSIZE, &retlen, 234 block * nftl->EraseSize + i +
233 buf)) < 0) { 235 SECTORSIZE, SECTORSIZE,
236 &retlen, buf);
237 if (ret < 0) {
234 printk(KERN_NOTICE "Read of bad sector table failed (err %d)\n", 238 printk(KERN_NOTICE "Read of bad sector table failed (err %d)\n",
235 ret); 239 ret);
236 kfree(nftl->ReplUnitTable); 240 kfree(nftl->ReplUnitTable);
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index 39763b94f67d..1c7308c2c77d 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -57,7 +57,9 @@
57 57
58#define QUADSPI_BUF3CR 0x1c 58#define QUADSPI_BUF3CR 0x1c
59#define QUADSPI_BUF3CR_ALLMST_SHIFT 31 59#define QUADSPI_BUF3CR_ALLMST_SHIFT 31
60#define QUADSPI_BUF3CR_ALLMST (1 << QUADSPI_BUF3CR_ALLMST_SHIFT) 60#define QUADSPI_BUF3CR_ALLMST_MASK (1 << QUADSPI_BUF3CR_ALLMST_SHIFT)
61#define QUADSPI_BUF3CR_ADATSZ_SHIFT 8
62#define QUADSPI_BUF3CR_ADATSZ_MASK (0xFF << QUADSPI_BUF3CR_ADATSZ_SHIFT)
61 63
62#define QUADSPI_BFGENCR 0x20 64#define QUADSPI_BFGENCR 0x20
63#define QUADSPI_BFGENCR_PAR_EN_SHIFT 16 65#define QUADSPI_BFGENCR_PAR_EN_SHIFT 16
@@ -198,18 +200,21 @@ struct fsl_qspi_devtype_data {
198 enum fsl_qspi_devtype devtype; 200 enum fsl_qspi_devtype devtype;
199 int rxfifo; 201 int rxfifo;
200 int txfifo; 202 int txfifo;
203 int ahb_buf_size;
201}; 204};
202 205
203static struct fsl_qspi_devtype_data vybrid_data = { 206static struct fsl_qspi_devtype_data vybrid_data = {
204 .devtype = FSL_QUADSPI_VYBRID, 207 .devtype = FSL_QUADSPI_VYBRID,
205 .rxfifo = 128, 208 .rxfifo = 128,
206 .txfifo = 64 209 .txfifo = 64,
210 .ahb_buf_size = 1024
207}; 211};
208 212
209static struct fsl_qspi_devtype_data imx6sx_data = { 213static struct fsl_qspi_devtype_data imx6sx_data = {
210 .devtype = FSL_QUADSPI_IMX6SX, 214 .devtype = FSL_QUADSPI_IMX6SX,
211 .rxfifo = 128, 215 .rxfifo = 128,
212 .txfifo = 512 216 .txfifo = 512,
217 .ahb_buf_size = 1024
213}; 218};
214 219
215#define FSL_QSPI_MAX_CHIP 4 220#define FSL_QSPI_MAX_CHIP 4
@@ -227,6 +232,7 @@ struct fsl_qspi {
227 u32 nor_num; 232 u32 nor_num;
228 u32 clk_rate; 233 u32 clk_rate;
229 unsigned int chip_base_addr; /* We may support two chips. */ 234 unsigned int chip_base_addr; /* We may support two chips. */
235 bool has_second_chip;
230}; 236};
231 237
232static inline int is_vybrid_qspi(struct fsl_qspi *q) 238static inline int is_vybrid_qspi(struct fsl_qspi *q)
@@ -583,7 +589,12 @@ static void fsl_qspi_init_abh_read(struct fsl_qspi *q)
583 writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR); 589 writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR);
584 writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR); 590 writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR);
585 writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR); 591 writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR);
586 writel(QUADSPI_BUF3CR_ALLMST, base + QUADSPI_BUF3CR); 592 /*
593 * Set ADATSZ with the maximum AHB buffer size to improve the
594 * read performance.
595 */
596 writel(QUADSPI_BUF3CR_ALLMST_MASK | ((q->devtype_data->ahb_buf_size / 8)
597 << QUADSPI_BUF3CR_ADATSZ_SHIFT), base + QUADSPI_BUF3CR);
587 598
588 /* We only use the buffer3 */ 599 /* We only use the buffer3 */
589 writel(0, base + QUADSPI_BUF0IND); 600 writel(0, base + QUADSPI_BUF0IND);
@@ -783,7 +794,6 @@ static int fsl_qspi_probe(struct platform_device *pdev)
783 struct spi_nor *nor; 794 struct spi_nor *nor;
784 struct mtd_info *mtd; 795 struct mtd_info *mtd;
785 int ret, i = 0; 796 int ret, i = 0;
786 bool has_second_chip = false;
787 const struct of_device_id *of_id = 797 const struct of_device_id *of_id =
788 of_match_device(fsl_qspi_dt_ids, &pdev->dev); 798 of_match_device(fsl_qspi_dt_ids, &pdev->dev);
789 799
@@ -798,37 +808,30 @@ static int fsl_qspi_probe(struct platform_device *pdev)
798 /* find the resources */ 808 /* find the resources */
799 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI"); 809 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI");
800 q->iobase = devm_ioremap_resource(dev, res); 810 q->iobase = devm_ioremap_resource(dev, res);
801 if (IS_ERR(q->iobase)) { 811 if (IS_ERR(q->iobase))
802 ret = PTR_ERR(q->iobase); 812 return PTR_ERR(q->iobase);
803 goto map_failed;
804 }
805 813
806 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 814 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
807 "QuadSPI-memory"); 815 "QuadSPI-memory");
808 q->ahb_base = devm_ioremap_resource(dev, res); 816 q->ahb_base = devm_ioremap_resource(dev, res);
809 if (IS_ERR(q->ahb_base)) { 817 if (IS_ERR(q->ahb_base))
810 ret = PTR_ERR(q->ahb_base); 818 return PTR_ERR(q->ahb_base);
811 goto map_failed; 819
812 }
813 q->memmap_phy = res->start; 820 q->memmap_phy = res->start;
814 821
815 /* find the clocks */ 822 /* find the clocks */
816 q->clk_en = devm_clk_get(dev, "qspi_en"); 823 q->clk_en = devm_clk_get(dev, "qspi_en");
817 if (IS_ERR(q->clk_en)) { 824 if (IS_ERR(q->clk_en))
818 ret = PTR_ERR(q->clk_en); 825 return PTR_ERR(q->clk_en);
819 goto map_failed;
820 }
821 826
822 q->clk = devm_clk_get(dev, "qspi"); 827 q->clk = devm_clk_get(dev, "qspi");
823 if (IS_ERR(q->clk)) { 828 if (IS_ERR(q->clk))
824 ret = PTR_ERR(q->clk); 829 return PTR_ERR(q->clk);
825 goto map_failed;
826 }
827 830
828 ret = clk_prepare_enable(q->clk_en); 831 ret = clk_prepare_enable(q->clk_en);
829 if (ret) { 832 if (ret) {
830 dev_err(dev, "can not enable the qspi_en clock\n"); 833 dev_err(dev, "can not enable the qspi_en clock\n");
831 goto map_failed; 834 return ret;
832 } 835 }
833 836
834 ret = clk_prepare_enable(q->clk); 837 ret = clk_prepare_enable(q->clk);
@@ -860,14 +863,14 @@ static int fsl_qspi_probe(struct platform_device *pdev)
860 goto irq_failed; 863 goto irq_failed;
861 864
862 if (of_get_property(np, "fsl,qspi-has-second-chip", NULL)) 865 if (of_get_property(np, "fsl,qspi-has-second-chip", NULL))
863 has_second_chip = true; 866 q->has_second_chip = true;
864 867
865 /* iterate the subnodes. */ 868 /* iterate the subnodes. */
866 for_each_available_child_of_node(dev->of_node, np) { 869 for_each_available_child_of_node(dev->of_node, np) {
867 char modalias[40]; 870 char modalias[40];
868 871
869 /* skip the holes */ 872 /* skip the holes */
870 if (!has_second_chip) 873 if (!q->has_second_chip)
871 i *= 2; 874 i *= 2;
872 875
873 nor = &q->nor[i]; 876 nor = &q->nor[i];
@@ -890,24 +893,24 @@ static int fsl_qspi_probe(struct platform_device *pdev)
890 893
891 ret = of_modalias_node(np, modalias, sizeof(modalias)); 894 ret = of_modalias_node(np, modalias, sizeof(modalias));
892 if (ret < 0) 895 if (ret < 0)
893 goto map_failed; 896 goto irq_failed;
894 897
895 ret = of_property_read_u32(np, "spi-max-frequency", 898 ret = of_property_read_u32(np, "spi-max-frequency",
896 &q->clk_rate); 899 &q->clk_rate);
897 if (ret < 0) 900 if (ret < 0)
898 goto map_failed; 901 goto irq_failed;
899 902
900 /* set the chip address for READID */ 903 /* set the chip address for READID */
901 fsl_qspi_set_base_addr(q, nor); 904 fsl_qspi_set_base_addr(q, nor);
902 905
903 ret = spi_nor_scan(nor, modalias, SPI_NOR_QUAD); 906 ret = spi_nor_scan(nor, modalias, SPI_NOR_QUAD);
904 if (ret) 907 if (ret)
905 goto map_failed; 908 goto irq_failed;
906 909
907 ppdata.of_node = np; 910 ppdata.of_node = np;
908 ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0); 911 ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
909 if (ret) 912 if (ret)
910 goto map_failed; 913 goto irq_failed;
911 914
912 /* Set the correct NOR size now. */ 915 /* Set the correct NOR size now. */
913 if (q->nor_size == 0) { 916 if (q->nor_size == 0) {
@@ -939,19 +942,19 @@ static int fsl_qspi_probe(struct platform_device *pdev)
939 942
940 clk_disable(q->clk); 943 clk_disable(q->clk);
941 clk_disable(q->clk_en); 944 clk_disable(q->clk_en);
942 dev_info(dev, "QuadSPI SPI NOR flash driver\n");
943 return 0; 945 return 0;
944 946
945last_init_failed: 947last_init_failed:
946 for (i = 0; i < q->nor_num; i++) 948 for (i = 0; i < q->nor_num; i++) {
949 /* skip the holes */
950 if (!q->has_second_chip)
951 i *= 2;
947 mtd_device_unregister(&q->mtd[i]); 952 mtd_device_unregister(&q->mtd[i]);
948 953 }
949irq_failed: 954irq_failed:
950 clk_disable_unprepare(q->clk); 955 clk_disable_unprepare(q->clk);
951clk_failed: 956clk_failed:
952 clk_disable_unprepare(q->clk_en); 957 clk_disable_unprepare(q->clk_en);
953map_failed:
954 dev_err(dev, "Freescale QuadSPI probe failed\n");
955 return ret; 958 return ret;
956} 959}
957 960
@@ -960,8 +963,12 @@ static int fsl_qspi_remove(struct platform_device *pdev)
960 struct fsl_qspi *q = platform_get_drvdata(pdev); 963 struct fsl_qspi *q = platform_get_drvdata(pdev);
961 int i; 964 int i;
962 965
963 for (i = 0; i < q->nor_num; i++) 966 for (i = 0; i < q->nor_num; i++) {
967 /* skip the holes */
968 if (!q->has_second_chip)
969 i *= 2;
964 mtd_device_unregister(&q->mtd[i]); 970 mtd_device_unregister(&q->mtd[i]);
971 }
965 972
966 /* disable the hardware */ 973 /* disable the hardware */
967 writel(QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); 974 writel(QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
@@ -972,6 +979,22 @@ static int fsl_qspi_remove(struct platform_device *pdev)
972 return 0; 979 return 0;
973} 980}
974 981
982static int fsl_qspi_suspend(struct platform_device *pdev, pm_message_t state)
983{
984 return 0;
985}
986
987static int fsl_qspi_resume(struct platform_device *pdev)
988{
989 struct fsl_qspi *q = platform_get_drvdata(pdev);
990
991 fsl_qspi_nor_setup(q);
992 fsl_qspi_set_map_addr(q);
993 fsl_qspi_nor_setup_last(q);
994
995 return 0;
996}
997
975static struct platform_driver fsl_qspi_driver = { 998static struct platform_driver fsl_qspi_driver = {
976 .driver = { 999 .driver = {
977 .name = "fsl-quadspi", 1000 .name = "fsl-quadspi",
@@ -980,6 +1003,8 @@ static struct platform_driver fsl_qspi_driver = {
980 }, 1003 },
981 .probe = fsl_qspi_probe, 1004 .probe = fsl_qspi_probe,
982 .remove = fsl_qspi_remove, 1005 .remove = fsl_qspi_remove,
1006 .suspend = fsl_qspi_suspend,
1007 .resume = fsl_qspi_resume,
983}; 1008};
984module_platform_driver(fsl_qspi_driver); 1009module_platform_driver(fsl_qspi_driver);
985 1010
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 0f8ec3c2d015..b6a5a0c269e1 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -538,6 +538,7 @@ static const struct spi_device_id spi_nor_ids[] = {
538 /* GigaDevice */ 538 /* GigaDevice */
539 { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) }, 539 { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
540 { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) }, 540 { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
541 { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, SECT_4K) },
541 542
542 /* Intel/Numonyx -- xxxs33b */ 543 /* Intel/Numonyx -- xxxs33b */
543 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, 544 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
@@ -560,14 +561,14 @@ static const struct spi_device_id spi_nor_ids[] = {
560 { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) }, 561 { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
561 562
562 /* Micron */ 563 /* Micron */
563 { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, 0) }, 564 { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
564 { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) }, 565 { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SPI_NOR_QUAD_READ) },
565 { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) }, 566 { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
566 { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) }, 567 { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
567 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) }, 568 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
568 { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K) }, 569 { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
569 { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, USE_FSR) }, 570 { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
570 { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, USE_FSR) }, 571 { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
571 572
572 /* PMC */ 573 /* PMC */
573 { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) }, 574 { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
@@ -891,6 +892,45 @@ static int spansion_quad_enable(struct spi_nor *nor)
891 return 0; 892 return 0;
892} 893}
893 894
895static int micron_quad_enable(struct spi_nor *nor)
896{
897 int ret;
898 u8 val;
899
900 ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
901 if (ret < 0) {
902 dev_err(nor->dev, "error %d reading EVCR\n", ret);
903 return ret;
904 }
905
906 write_enable(nor);
907
908 /* set EVCR, enable quad I/O */
909 nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
910 ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1, 0);
911 if (ret < 0) {
912 dev_err(nor->dev, "error while writing EVCR register\n");
913 return ret;
914 }
915
916 ret = spi_nor_wait_till_ready(nor);
917 if (ret)
918 return ret;
919
920 /* read EVCR and check it */
921 ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
922 if (ret < 0) {
923 dev_err(nor->dev, "error %d reading EVCR\n", ret);
924 return ret;
925 }
926 if (val & EVCR_QUAD_EN_MICRON) {
927 dev_err(nor->dev, "Micron EVCR Quad bit not clear\n");
928 return -EINVAL;
929 }
930
931 return 0;
932}
933
894static int set_quad_mode(struct spi_nor *nor, struct flash_info *info) 934static int set_quad_mode(struct spi_nor *nor, struct flash_info *info)
895{ 935{
896 int status; 936 int status;
@@ -903,6 +943,13 @@ static int set_quad_mode(struct spi_nor *nor, struct flash_info *info)
903 return -EINVAL; 943 return -EINVAL;
904 } 944 }
905 return status; 945 return status;
946 case CFI_MFR_ST:
947 status = micron_quad_enable(nor);
948 if (status) {
949 dev_err(nor->dev, "Micron quad-read not enabled\n");
950 return -EINVAL;
951 }
952 return status;
906 default: 953 default:
907 status = spansion_quad_enable(nor); 954 status = spansion_quad_enable(nor);
908 if (status) { 955 if (status) {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 84673ebcf428..df51d6025a90 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -157,7 +157,7 @@ config IPVLAN
157 making it transparent to the connected L2 switch. 157 making it transparent to the connected L2 switch.
158 158
159 Ipvlan devices can be added using the "ip" command from the 159 Ipvlan devices can be added using the "ip" command from the
160 iproute2 package starting with the iproute2-X.Y.ZZ release: 160 iproute2 package starting with the iproute2-3.19 release:
161 161
162 "ip link add link <main-dev> [ NAME ] type ipvlan" 162 "ip link add link <main-dev> [ NAME ] type ipvlan"
163 163
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index 4ce6ca5f3d36..dc6b78e5342f 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -40,7 +40,7 @@ config DEV_APPLETALK
40 40
41config LTPC 41config LTPC
42 tristate "Apple/Farallon LocalTalk PC support" 42 tristate "Apple/Farallon LocalTalk PC support"
43 depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API 43 depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API && VIRT_TO_BUS
44 help 44 help
45 This allows you to use the AppleTalk PC card to connect to LocalTalk 45 This allows you to use the AppleTalk PC card to connect to LocalTalk
46 networks. The card is also known as the Farallon PhoneNet PC card. 46 networks. The card is also known as the Farallon PhoneNet PC card.
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 0f217e99904f..22e2ebf31333 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -107,8 +107,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \
107{ \ 107{ \
108 u32 indir, dir; \ 108 u32 indir, dir; \
109 spin_lock(&priv->indir_lock); \ 109 spin_lock(&priv->indir_lock); \
110 indir = reg_readl(priv, REG_DIR_DATA_READ); \
111 dir = __raw_readl(priv->name + off); \ 110 dir = __raw_readl(priv->name + off); \
111 indir = reg_readl(priv, REG_DIR_DATA_READ); \
112 spin_unlock(&priv->indir_lock); \ 112 spin_unlock(&priv->indir_lock); \
113 return (u64)indir << 32 | dir; \ 113 return (u64)indir << 32 | dir; \
114} \ 114} \
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 7769c05543f1..ec6eac1f8c95 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -484,11 +484,8 @@ static int axnet_open(struct net_device *dev)
484 link->open++; 484 link->open++;
485 485
486 info->link_status = 0x00; 486 info->link_status = 0x00;
487 init_timer(&info->watchdog); 487 setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
488 info->watchdog.function = ei_watchdog; 488 mod_timer(&info->watchdog, jiffies + HZ);
489 info->watchdog.data = (u_long)dev;
490 info->watchdog.expires = jiffies + HZ;
491 add_timer(&info->watchdog);
492 489
493 return ax_open(dev); 490 return ax_open(dev);
494} /* axnet_open */ 491} /* axnet_open */
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 9fb7b9d4fd6c..2777289a26c0 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -918,11 +918,8 @@ static int pcnet_open(struct net_device *dev)
918 918
919 info->phy_id = info->eth_phy; 919 info->phy_id = info->eth_phy;
920 info->link_status = 0x00; 920 info->link_status = 0x00;
921 init_timer(&info->watchdog); 921 setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
922 info->watchdog.function = ei_watchdog; 922 mod_timer(&info->watchdog, jiffies + HZ);
923 info->watchdog.data = (u_long)dev;
924 info->watchdog.expires = jiffies + HZ;
925 add_timer(&info->watchdog);
926 923
927 return ei_open(dev); 924 return ei_open(dev);
928} /* pcnet_open */ 925} /* pcnet_open */
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index a1ee261bff5c..fd9296a5014d 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -376,7 +376,8 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
376 u16 pktlength; 376 u16 pktlength;
377 u16 pktstatus; 377 u16 pktstatus;
378 378
379 while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) { 379 while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) &&
380 (count < limit)) {
380 pktstatus = rxstatus >> 16; 381 pktstatus = rxstatus >> 16;
381 pktlength = rxstatus & 0xffff; 382 pktlength = rxstatus & 0xffff;
382 383
@@ -491,28 +492,27 @@ static int tse_poll(struct napi_struct *napi, int budget)
491 struct altera_tse_private *priv = 492 struct altera_tse_private *priv =
492 container_of(napi, struct altera_tse_private, napi); 493 container_of(napi, struct altera_tse_private, napi);
493 int rxcomplete = 0; 494 int rxcomplete = 0;
494 int txcomplete = 0;
495 unsigned long int flags; 495 unsigned long int flags;
496 496
497 txcomplete = tse_tx_complete(priv); 497 tse_tx_complete(priv);
498 498
499 rxcomplete = tse_rx(priv, budget); 499 rxcomplete = tse_rx(priv, budget);
500 500
501 if (rxcomplete >= budget || txcomplete > 0) 501 if (rxcomplete < budget) {
502 return rxcomplete;
503 502
504 napi_gro_flush(napi, false); 503 napi_gro_flush(napi, false);
505 __napi_complete(napi); 504 __napi_complete(napi);
506 505
507 netdev_dbg(priv->dev, 506 netdev_dbg(priv->dev,
508 "NAPI Complete, did %d packets with budget %d\n", 507 "NAPI Complete, did %d packets with budget %d\n",
509 txcomplete+rxcomplete, budget); 508 rxcomplete, budget);
510 509
511 spin_lock_irqsave(&priv->rxdma_irq_lock, flags); 510 spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
512 priv->dmaops->enable_rxirq(priv); 511 priv->dmaops->enable_rxirq(priv);
513 priv->dmaops->enable_txirq(priv); 512 priv->dmaops->enable_txirq(priv);
514 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); 513 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
515 return rxcomplete + txcomplete; 514 }
515 return rxcomplete;
516} 516}
517 517
518/* DMA TX & RX FIFO interrupt routing 518/* DMA TX & RX FIFO interrupt routing
@@ -521,7 +521,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
521{ 521{
522 struct net_device *dev = dev_id; 522 struct net_device *dev = dev_id;
523 struct altera_tse_private *priv; 523 struct altera_tse_private *priv;
524 unsigned long int flags;
525 524
526 if (unlikely(!dev)) { 525 if (unlikely(!dev)) {
527 pr_err("%s: invalid dev pointer\n", __func__); 526 pr_err("%s: invalid dev pointer\n", __func__);
@@ -529,20 +528,20 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
529 } 528 }
530 priv = netdev_priv(dev); 529 priv = netdev_priv(dev);
531 530
532 /* turn off desc irqs and enable napi rx */ 531 spin_lock(&priv->rxdma_irq_lock);
533 spin_lock_irqsave(&priv->rxdma_irq_lock, flags); 532 /* reset IRQs */
533 priv->dmaops->clear_rxirq(priv);
534 priv->dmaops->clear_txirq(priv);
535 spin_unlock(&priv->rxdma_irq_lock);
534 536
535 if (likely(napi_schedule_prep(&priv->napi))) { 537 if (likely(napi_schedule_prep(&priv->napi))) {
538 spin_lock(&priv->rxdma_irq_lock);
536 priv->dmaops->disable_rxirq(priv); 539 priv->dmaops->disable_rxirq(priv);
537 priv->dmaops->disable_txirq(priv); 540 priv->dmaops->disable_txirq(priv);
541 spin_unlock(&priv->rxdma_irq_lock);
538 __napi_schedule(&priv->napi); 542 __napi_schedule(&priv->napi);
539 } 543 }
540 544
541 /* reset IRQs */
542 priv->dmaops->clear_rxirq(priv);
543 priv->dmaops->clear_txirq(priv);
544
545 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
546 545
547 return IRQ_HANDLED; 546 return IRQ_HANDLED;
548} 547}
@@ -1407,7 +1406,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1407 } 1406 }
1408 1407
1409 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", 1408 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1410 &priv->rx_fifo_depth)) { 1409 &priv->tx_fifo_depth)) {
1411 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); 1410 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
1412 ret = -ENXIO; 1411 ret = -ENXIO;
1413 goto err_free_netdev; 1412 goto err_free_netdev;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index b93d4404d975..885b02b5be07 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -609,6 +609,68 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
609 } 609 }
610} 610}
611 611
612static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
613{
614 struct xgbe_channel *channel;
615 struct net_device *netdev = pdata->netdev;
616 unsigned int i;
617 int ret;
618
619 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
620 netdev->name, pdata);
621 if (ret) {
622 netdev_alert(netdev, "error requesting irq %d\n",
623 pdata->dev_irq);
624 return ret;
625 }
626
627 if (!pdata->per_channel_irq)
628 return 0;
629
630 channel = pdata->channel;
631 for (i = 0; i < pdata->channel_count; i++, channel++) {
632 snprintf(channel->dma_irq_name,
633 sizeof(channel->dma_irq_name) - 1,
634 "%s-TxRx-%u", netdev_name(netdev),
635 channel->queue_index);
636
637 ret = devm_request_irq(pdata->dev, channel->dma_irq,
638 xgbe_dma_isr, 0,
639 channel->dma_irq_name, channel);
640 if (ret) {
641 netdev_alert(netdev, "error requesting irq %d\n",
642 channel->dma_irq);
643 goto err_irq;
644 }
645 }
646
647 return 0;
648
649err_irq:
650 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
651 for (i--, channel--; i < pdata->channel_count; i--, channel--)
652 devm_free_irq(pdata->dev, channel->dma_irq, channel);
653
654 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
655
656 return ret;
657}
658
659static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
660{
661 struct xgbe_channel *channel;
662 unsigned int i;
663
664 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
665
666 if (!pdata->per_channel_irq)
667 return;
668
669 channel = pdata->channel;
670 for (i = 0; i < pdata->channel_count; i++, channel++)
671 devm_free_irq(pdata->dev, channel->dma_irq, channel);
672}
673
612void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) 674void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
613{ 675{
614 struct xgbe_hw_if *hw_if = &pdata->hw_if; 676 struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -810,20 +872,20 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
810 return -EINVAL; 872 return -EINVAL;
811 } 873 }
812 874
813 phy_stop(pdata->phydev);
814
815 spin_lock_irqsave(&pdata->lock, flags); 875 spin_lock_irqsave(&pdata->lock, flags);
816 876
817 if (caller == XGMAC_DRIVER_CONTEXT) 877 if (caller == XGMAC_DRIVER_CONTEXT)
818 netif_device_detach(netdev); 878 netif_device_detach(netdev);
819 879
820 netif_tx_stop_all_queues(netdev); 880 netif_tx_stop_all_queues(netdev);
821 xgbe_napi_disable(pdata, 0);
822 881
823 /* Powerdown Tx/Rx */
824 hw_if->powerdown_tx(pdata); 882 hw_if->powerdown_tx(pdata);
825 hw_if->powerdown_rx(pdata); 883 hw_if->powerdown_rx(pdata);
826 884
885 xgbe_napi_disable(pdata, 0);
886
887 phy_stop(pdata->phydev);
888
827 pdata->power_down = 1; 889 pdata->power_down = 1;
828 890
829 spin_unlock_irqrestore(&pdata->lock, flags); 891 spin_unlock_irqrestore(&pdata->lock, flags);
@@ -854,14 +916,14 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
854 916
855 phy_start(pdata->phydev); 917 phy_start(pdata->phydev);
856 918
857 /* Enable Tx/Rx */ 919 xgbe_napi_enable(pdata, 0);
920
858 hw_if->powerup_tx(pdata); 921 hw_if->powerup_tx(pdata);
859 hw_if->powerup_rx(pdata); 922 hw_if->powerup_rx(pdata);
860 923
861 if (caller == XGMAC_DRIVER_CONTEXT) 924 if (caller == XGMAC_DRIVER_CONTEXT)
862 netif_device_attach(netdev); 925 netif_device_attach(netdev);
863 926
864 xgbe_napi_enable(pdata, 0);
865 netif_tx_start_all_queues(netdev); 927 netif_tx_start_all_queues(netdev);
866 928
867 spin_unlock_irqrestore(&pdata->lock, flags); 929 spin_unlock_irqrestore(&pdata->lock, flags);
@@ -875,6 +937,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
875{ 937{
876 struct xgbe_hw_if *hw_if = &pdata->hw_if; 938 struct xgbe_hw_if *hw_if = &pdata->hw_if;
877 struct net_device *netdev = pdata->netdev; 939 struct net_device *netdev = pdata->netdev;
940 int ret;
878 941
879 DBGPR("-->xgbe_start\n"); 942 DBGPR("-->xgbe_start\n");
880 943
@@ -884,17 +947,31 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
884 947
885 phy_start(pdata->phydev); 948 phy_start(pdata->phydev);
886 949
950 xgbe_napi_enable(pdata, 1);
951
952 ret = xgbe_request_irqs(pdata);
953 if (ret)
954 goto err_napi;
955
887 hw_if->enable_tx(pdata); 956 hw_if->enable_tx(pdata);
888 hw_if->enable_rx(pdata); 957 hw_if->enable_rx(pdata);
889 958
890 xgbe_init_tx_timers(pdata); 959 xgbe_init_tx_timers(pdata);
891 960
892 xgbe_napi_enable(pdata, 1);
893 netif_tx_start_all_queues(netdev); 961 netif_tx_start_all_queues(netdev);
894 962
895 DBGPR("<--xgbe_start\n"); 963 DBGPR("<--xgbe_start\n");
896 964
897 return 0; 965 return 0;
966
967err_napi:
968 xgbe_napi_disable(pdata, 1);
969
970 phy_stop(pdata->phydev);
971
972 hw_if->exit(pdata);
973
974 return ret;
898} 975}
899 976
900static void xgbe_stop(struct xgbe_prv_data *pdata) 977static void xgbe_stop(struct xgbe_prv_data *pdata)
@@ -907,16 +984,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
907 984
908 DBGPR("-->xgbe_stop\n"); 985 DBGPR("-->xgbe_stop\n");
909 986
910 phy_stop(pdata->phydev);
911
912 netif_tx_stop_all_queues(netdev); 987 netif_tx_stop_all_queues(netdev);
913 xgbe_napi_disable(pdata, 1);
914 988
915 xgbe_stop_tx_timers(pdata); 989 xgbe_stop_tx_timers(pdata);
916 990
917 hw_if->disable_tx(pdata); 991 hw_if->disable_tx(pdata);
918 hw_if->disable_rx(pdata); 992 hw_if->disable_rx(pdata);
919 993
994 xgbe_free_irqs(pdata);
995
996 xgbe_napi_disable(pdata, 1);
997
998 phy_stop(pdata->phydev);
999
1000 hw_if->exit(pdata);
1001
920 channel = pdata->channel; 1002 channel = pdata->channel;
921 for (i = 0; i < pdata->channel_count; i++, channel++) { 1003 for (i = 0; i < pdata->channel_count; i++, channel++) {
922 if (!channel->tx_ring) 1004 if (!channel->tx_ring)
@@ -931,10 +1013,6 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
931 1013
932static void xgbe_restart_dev(struct xgbe_prv_data *pdata) 1014static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
933{ 1015{
934 struct xgbe_channel *channel;
935 struct xgbe_hw_if *hw_if = &pdata->hw_if;
936 unsigned int i;
937
938 DBGPR("-->xgbe_restart_dev\n"); 1016 DBGPR("-->xgbe_restart_dev\n");
939 1017
940 /* If not running, "restart" will happen on open */ 1018 /* If not running, "restart" will happen on open */
@@ -942,19 +1020,10 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
942 return; 1020 return;
943 1021
944 xgbe_stop(pdata); 1022 xgbe_stop(pdata);
945 synchronize_irq(pdata->dev_irq);
946 if (pdata->per_channel_irq) {
947 channel = pdata->channel;
948 for (i = 0; i < pdata->channel_count; i++, channel++)
949 synchronize_irq(channel->dma_irq);
950 }
951 1023
952 xgbe_free_tx_data(pdata); 1024 xgbe_free_tx_data(pdata);
953 xgbe_free_rx_data(pdata); 1025 xgbe_free_rx_data(pdata);
954 1026
955 /* Issue software reset to device */
956 hw_if->exit(pdata);
957
958 xgbe_start(pdata); 1027 xgbe_start(pdata);
959 1028
960 DBGPR("<--xgbe_restart_dev\n"); 1029 DBGPR("<--xgbe_restart_dev\n");
@@ -1283,10 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1283static int xgbe_open(struct net_device *netdev) 1352static int xgbe_open(struct net_device *netdev)
1284{ 1353{
1285 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1354 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1286 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1287 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1355 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1288 struct xgbe_channel *channel = NULL;
1289 unsigned int i = 0;
1290 int ret; 1356 int ret;
1291 1357
1292 DBGPR("-->xgbe_open\n"); 1358 DBGPR("-->xgbe_open\n");
@@ -1329,55 +1395,14 @@ static int xgbe_open(struct net_device *netdev)
1329 INIT_WORK(&pdata->restart_work, xgbe_restart); 1395 INIT_WORK(&pdata->restart_work, xgbe_restart);
1330 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); 1396 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1331 1397
1332 /* Request interrupts */
1333 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
1334 netdev->name, pdata);
1335 if (ret) {
1336 netdev_alert(netdev, "error requesting irq %d\n",
1337 pdata->dev_irq);
1338 goto err_rings;
1339 }
1340
1341 if (pdata->per_channel_irq) {
1342 channel = pdata->channel;
1343 for (i = 0; i < pdata->channel_count; i++, channel++) {
1344 snprintf(channel->dma_irq_name,
1345 sizeof(channel->dma_irq_name) - 1,
1346 "%s-TxRx-%u", netdev_name(netdev),
1347 channel->queue_index);
1348
1349 ret = devm_request_irq(pdata->dev, channel->dma_irq,
1350 xgbe_dma_isr, 0,
1351 channel->dma_irq_name, channel);
1352 if (ret) {
1353 netdev_alert(netdev,
1354 "error requesting irq %d\n",
1355 channel->dma_irq);
1356 goto err_irq;
1357 }
1358 }
1359 }
1360
1361 ret = xgbe_start(pdata); 1398 ret = xgbe_start(pdata);
1362 if (ret) 1399 if (ret)
1363 goto err_start; 1400 goto err_rings;
1364 1401
1365 DBGPR("<--xgbe_open\n"); 1402 DBGPR("<--xgbe_open\n");
1366 1403
1367 return 0; 1404 return 0;
1368 1405
1369err_start:
1370 hw_if->exit(pdata);
1371
1372err_irq:
1373 if (pdata->per_channel_irq) {
1374 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
1375 for (i--, channel--; i < pdata->channel_count; i--, channel--)
1376 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1377 }
1378
1379 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1380
1381err_rings: 1406err_rings:
1382 desc_if->free_ring_resources(pdata); 1407 desc_if->free_ring_resources(pdata);
1383 1408
@@ -1399,30 +1424,16 @@ err_phy_init:
1399static int xgbe_close(struct net_device *netdev) 1424static int xgbe_close(struct net_device *netdev)
1400{ 1425{
1401 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1426 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1402 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1403 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1427 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1404 struct xgbe_channel *channel;
1405 unsigned int i;
1406 1428
1407 DBGPR("-->xgbe_close\n"); 1429 DBGPR("-->xgbe_close\n");
1408 1430
1409 /* Stop the device */ 1431 /* Stop the device */
1410 xgbe_stop(pdata); 1432 xgbe_stop(pdata);
1411 1433
1412 /* Issue software reset to device */
1413 hw_if->exit(pdata);
1414
1415 /* Free the ring descriptors and buffers */ 1434 /* Free the ring descriptors and buffers */
1416 desc_if->free_ring_resources(pdata); 1435 desc_if->free_ring_resources(pdata);
1417 1436
1418 /* Release the interrupts */
1419 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1420 if (pdata->per_channel_irq) {
1421 channel = pdata->channel;
1422 for (i = 0; i < pdata->channel_count; i++, channel++)
1423 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1424 }
1425
1426 /* Free the channel and ring structures */ 1437 /* Free the channel and ring structures */
1427 xgbe_free_channels(pdata); 1438 xgbe_free_channels(pdata);
1428 1439
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 5b308a4a4d0e..783543ad1fcf 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -274,9 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
274 /* RBUF misc statistics */ 274 /* RBUF misc statistics */
275 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), 275 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
276 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), 276 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
277 STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 277 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
278 STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed), 278 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
279 STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed), 279 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
280}; 280};
281 281
282#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) 282#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
@@ -345,6 +345,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
345 s = &bcm_sysport_gstrings_stats[i]; 345 s = &bcm_sysport_gstrings_stats[i];
346 switch (s->type) { 346 switch (s->type) {
347 case BCM_SYSPORT_STAT_NETDEV: 347 case BCM_SYSPORT_STAT_NETDEV:
348 case BCM_SYSPORT_STAT_SOFT:
348 continue; 349 continue;
349 case BCM_SYSPORT_STAT_MIB_RX: 350 case BCM_SYSPORT_STAT_MIB_RX:
350 case BCM_SYSPORT_STAT_MIB_TX: 351 case BCM_SYSPORT_STAT_MIB_TX:
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index fc19417d82a5..7e3d87a88c76 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -570,6 +570,7 @@ enum bcm_sysport_stat_type {
570 BCM_SYSPORT_STAT_RUNT, 570 BCM_SYSPORT_STAT_RUNT,
571 BCM_SYSPORT_STAT_RXCHK, 571 BCM_SYSPORT_STAT_RXCHK,
572 BCM_SYSPORT_STAT_RBUF, 572 BCM_SYSPORT_STAT_RBUF,
573 BCM_SYSPORT_STAT_SOFT,
573}; 574};
574 575
575/* Macros to help define ethtool statistics */ 576/* Macros to help define ethtool statistics */
@@ -590,6 +591,7 @@ enum bcm_sysport_stat_type {
590#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX) 591#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
591#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX) 592#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
592#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT) 593#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
594#define STAT_MIB_SOFT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_SOFT)
593 595
594#define STAT_RXCHK(str, m, ofs) { \ 596#define STAT_RXCHK(str, m, ofs) { \
595 .stat_string = str, \ 597 .stat_string = str, \
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 51300532ec26..84feb241d60b 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -487,6 +487,7 @@ enum bcmgenet_stat_type {
487 BCMGENET_STAT_MIB_TX, 487 BCMGENET_STAT_MIB_TX,
488 BCMGENET_STAT_RUNT, 488 BCMGENET_STAT_RUNT,
489 BCMGENET_STAT_MISC, 489 BCMGENET_STAT_MISC,
490 BCMGENET_STAT_SOFT,
490}; 491};
491 492
492struct bcmgenet_stats { 493struct bcmgenet_stats {
@@ -515,6 +516,7 @@ struct bcmgenet_stats {
515#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) 516#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
516#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) 517#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
517#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) 518#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
519#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
518 520
519#define STAT_GENET_MISC(str, m, offset) { \ 521#define STAT_GENET_MISC(str, m, offset) { \
520 .stat_string = str, \ 522 .stat_string = str, \
@@ -614,9 +616,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
614 UMAC_RBUF_OVFL_CNT), 616 UMAC_RBUF_OVFL_CNT),
615 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), 617 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
616 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), 618 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
617 STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 619 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
618 STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed), 620 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
619 STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed), 621 STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
620}; 622};
621 623
622#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) 624#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -668,6 +670,7 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
668 s = &bcmgenet_gstrings_stats[i]; 670 s = &bcmgenet_gstrings_stats[i];
669 switch (s->type) { 671 switch (s->type) {
670 case BCMGENET_STAT_NETDEV: 672 case BCMGENET_STAT_NETDEV:
673 case BCMGENET_STAT_SOFT:
671 continue; 674 continue;
672 case BCMGENET_STAT_MIB_RX: 675 case BCMGENET_STAT_MIB_RX:
673 case BCMGENET_STAT_MIB_TX: 676 case BCMGENET_STAT_MIB_TX:
@@ -971,13 +974,14 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
971} 974}
972 975
973/* Unlocked version of the reclaim routine */ 976/* Unlocked version of the reclaim routine */
974static void __bcmgenet_tx_reclaim(struct net_device *dev, 977static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
975 struct bcmgenet_tx_ring *ring) 978 struct bcmgenet_tx_ring *ring)
976{ 979{
977 struct bcmgenet_priv *priv = netdev_priv(dev); 980 struct bcmgenet_priv *priv = netdev_priv(dev);
978 int last_tx_cn, last_c_index, num_tx_bds; 981 int last_tx_cn, last_c_index, num_tx_bds;
979 struct enet_cb *tx_cb_ptr; 982 struct enet_cb *tx_cb_ptr;
980 struct netdev_queue *txq; 983 struct netdev_queue *txq;
984 unsigned int pkts_compl = 0;
981 unsigned int bds_compl; 985 unsigned int bds_compl;
982 unsigned int c_index; 986 unsigned int c_index;
983 987
@@ -1005,6 +1009,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
1005 tx_cb_ptr = ring->cbs + last_c_index; 1009 tx_cb_ptr = ring->cbs + last_c_index;
1006 bds_compl = 0; 1010 bds_compl = 0;
1007 if (tx_cb_ptr->skb) { 1011 if (tx_cb_ptr->skb) {
1012 pkts_compl++;
1008 bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; 1013 bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
1009 dev->stats.tx_bytes += tx_cb_ptr->skb->len; 1014 dev->stats.tx_bytes += tx_cb_ptr->skb->len;
1010 dma_unmap_single(&dev->dev, 1015 dma_unmap_single(&dev->dev,
@@ -1028,23 +1033,45 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
1028 last_c_index &= (num_tx_bds - 1); 1033 last_c_index &= (num_tx_bds - 1);
1029 } 1034 }
1030 1035
1031 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) 1036 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1032 ring->int_disable(priv, ring); 1037 if (netif_tx_queue_stopped(txq))
1033 1038 netif_tx_wake_queue(txq);
1034 if (netif_tx_queue_stopped(txq)) 1039 }
1035 netif_tx_wake_queue(txq);
1036 1040
1037 ring->c_index = c_index; 1041 ring->c_index = c_index;
1042
1043 return pkts_compl;
1038} 1044}
1039 1045
1040static void bcmgenet_tx_reclaim(struct net_device *dev, 1046static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1041 struct bcmgenet_tx_ring *ring) 1047 struct bcmgenet_tx_ring *ring)
1042{ 1048{
1049 unsigned int released;
1043 unsigned long flags; 1050 unsigned long flags;
1044 1051
1045 spin_lock_irqsave(&ring->lock, flags); 1052 spin_lock_irqsave(&ring->lock, flags);
1046 __bcmgenet_tx_reclaim(dev, ring); 1053 released = __bcmgenet_tx_reclaim(dev, ring);
1047 spin_unlock_irqrestore(&ring->lock, flags); 1054 spin_unlock_irqrestore(&ring->lock, flags);
1055
1056 return released;
1057}
1058
1059static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1060{
1061 struct bcmgenet_tx_ring *ring =
1062 container_of(napi, struct bcmgenet_tx_ring, napi);
1063 unsigned int work_done = 0;
1064
1065 work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
1066
1067 if (work_done == 0) {
1068 napi_complete(napi);
1069 ring->int_enable(ring->priv, ring);
1070
1071 return 0;
1072 }
1073
1074 return budget;
1048} 1075}
1049 1076
1050static void bcmgenet_tx_reclaim_all(struct net_device *dev) 1077static void bcmgenet_tx_reclaim_all(struct net_device *dev)
@@ -1302,10 +1329,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1302 bcmgenet_tdma_ring_writel(priv, ring->index, 1329 bcmgenet_tdma_ring_writel(priv, ring->index,
1303 ring->prod_index, TDMA_PROD_INDEX); 1330 ring->prod_index, TDMA_PROD_INDEX);
1304 1331
1305 if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) { 1332 if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
1306 netif_tx_stop_queue(txq); 1333 netif_tx_stop_queue(txq);
1307 ring->int_enable(priv, ring);
1308 }
1309 1334
1310out: 1335out:
1311 spin_unlock_irqrestore(&ring->lock, flags); 1336 spin_unlock_irqrestore(&ring->lock, flags);
@@ -1621,6 +1646,7 @@ static int init_umac(struct bcmgenet_priv *priv)
1621 struct device *kdev = &priv->pdev->dev; 1646 struct device *kdev = &priv->pdev->dev;
1622 int ret; 1647 int ret;
1623 u32 reg, cpu_mask_clear; 1648 u32 reg, cpu_mask_clear;
1649 int index;
1624 1650
1625 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); 1651 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1626 1652
@@ -1647,7 +1673,7 @@ static int init_umac(struct bcmgenet_priv *priv)
1647 1673
1648 bcmgenet_intr_disable(priv); 1674 bcmgenet_intr_disable(priv);
1649 1675
1650 cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE; 1676 cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
1651 1677
1652 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); 1678 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
1653 1679
@@ -1674,6 +1700,10 @@ static int init_umac(struct bcmgenet_priv *priv)
1674 1700
1675 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); 1701 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
1676 1702
1703 for (index = 0; index < priv->hw_params->tx_queues; index++)
1704 bcmgenet_intrl2_1_writel(priv, (1 << index),
1705 INTRL2_CPU_MASK_CLEAR);
1706
1677 /* Enable rx/tx engine.*/ 1707 /* Enable rx/tx engine.*/
1678 dev_dbg(kdev, "done init umac\n"); 1708 dev_dbg(kdev, "done init umac\n");
1679 1709
@@ -1690,6 +1720,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1690 u32 flow_period_val = 0; 1720 u32 flow_period_val = 0;
1691 1721
1692 spin_lock_init(&ring->lock); 1722 spin_lock_init(&ring->lock);
1723 ring->priv = priv;
1724 netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
1693 ring->index = index; 1725 ring->index = index;
1694 if (index == DESC_INDEX) { 1726 if (index == DESC_INDEX) {
1695 ring->queue = 0; 1727 ring->queue = 0;
@@ -1732,6 +1764,17 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1732 TDMA_WRITE_PTR); 1764 TDMA_WRITE_PTR);
1733 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, 1765 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1734 DMA_END_ADDR); 1766 DMA_END_ADDR);
1767
1768 napi_enable(&ring->napi);
1769}
1770
1771static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
1772 unsigned int index)
1773{
1774 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1775
1776 napi_disable(&ring->napi);
1777 netif_napi_del(&ring->napi);
1735} 1778}
1736 1779
1737/* Initialize a RDMA ring */ 1780/* Initialize a RDMA ring */
@@ -1896,7 +1939,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
1896 return ret; 1939 return ret;
1897} 1940}
1898 1941
1899static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) 1942static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1900{ 1943{
1901 int i; 1944 int i;
1902 1945
@@ -1915,6 +1958,18 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1915 kfree(priv->tx_cbs); 1958 kfree(priv->tx_cbs);
1916} 1959}
1917 1960
1961static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1962{
1963 int i;
1964
1965 bcmgenet_fini_tx_ring(priv, DESC_INDEX);
1966
1967 for (i = 0; i < priv->hw_params->tx_queues; i++)
1968 bcmgenet_fini_tx_ring(priv, i);
1969
1970 __bcmgenet_fini_dma(priv);
1971}
1972
1918/* init_edma: Initialize DMA control register */ 1973/* init_edma: Initialize DMA control register */
1919static int bcmgenet_init_dma(struct bcmgenet_priv *priv) 1974static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
1920{ 1975{
@@ -1943,7 +1998,7 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
1943 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), 1998 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
1944 GFP_KERNEL); 1999 GFP_KERNEL);
1945 if (!priv->tx_cbs) { 2000 if (!priv->tx_cbs) {
1946 bcmgenet_fini_dma(priv); 2001 __bcmgenet_fini_dma(priv);
1947 return -ENOMEM; 2002 return -ENOMEM;
1948 } 2003 }
1949 2004
@@ -1965,9 +2020,6 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget)
1965 struct bcmgenet_priv, napi); 2020 struct bcmgenet_priv, napi);
1966 unsigned int work_done; 2021 unsigned int work_done;
1967 2022
1968 /* tx reclaim */
1969 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
1970
1971 work_done = bcmgenet_desc_rx(priv, budget); 2023 work_done = bcmgenet_desc_rx(priv, budget);
1972 2024
1973 /* Advancing our consumer index*/ 2025 /* Advancing our consumer index*/
@@ -2012,28 +2064,34 @@ static void bcmgenet_irq_task(struct work_struct *work)
2012static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) 2064static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2013{ 2065{
2014 struct bcmgenet_priv *priv = dev_id; 2066 struct bcmgenet_priv *priv = dev_id;
2067 struct bcmgenet_tx_ring *ring;
2015 unsigned int index; 2068 unsigned int index;
2016 2069
2017 /* Save irq status for bottom-half processing. */ 2070 /* Save irq status for bottom-half processing. */
2018 priv->irq1_stat = 2071 priv->irq1_stat =
2019 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & 2072 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2020 ~priv->int1_mask; 2073 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2021 /* clear interrupts */ 2074 /* clear interrupts */
2022 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); 2075 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
2023 2076
2024 netif_dbg(priv, intr, priv->dev, 2077 netif_dbg(priv, intr, priv->dev,
2025 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); 2078 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
2079
2026 /* Check the MBDONE interrupts. 2080 /* Check the MBDONE interrupts.
2027 * packet is done, reclaim descriptors 2081 * packet is done, reclaim descriptors
2028 */ 2082 */
2029 if (priv->irq1_stat & 0x0000ffff) { 2083 for (index = 0; index < priv->hw_params->tx_queues; index++) {
2030 index = 0; 2084 if (!(priv->irq1_stat & BIT(index)))
2031 for (index = 0; index < 16; index++) { 2085 continue;
2032 if (priv->irq1_stat & (1 << index)) 2086
2033 bcmgenet_tx_reclaim(priv->dev, 2087 ring = &priv->tx_rings[index];
2034 &priv->tx_rings[index]); 2088
2089 if (likely(napi_schedule_prep(&ring->napi))) {
2090 ring->int_disable(priv, ring);
2091 __napi_schedule(&ring->napi);
2035 } 2092 }
2036 } 2093 }
2094
2037 return IRQ_HANDLED; 2095 return IRQ_HANDLED;
2038} 2096}
2039 2097
@@ -2065,8 +2123,12 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2065 } 2123 }
2066 if (priv->irq0_stat & 2124 if (priv->irq0_stat &
2067 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { 2125 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
2068 /* Tx reclaim */ 2126 struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
2069 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); 2127
2128 if (likely(napi_schedule_prep(&ring->napi))) {
2129 ring->int_disable(priv, ring);
2130 __napi_schedule(&ring->napi);
2131 }
2070 } 2132 }
2071 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | 2133 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
2072 UMAC_IRQ_PHY_DET_F | 2134 UMAC_IRQ_PHY_DET_F |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 3a8a90f95365..016bd12bf493 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -520,6 +520,7 @@ struct bcmgenet_hw_params {
520 520
521struct bcmgenet_tx_ring { 521struct bcmgenet_tx_ring {
522 spinlock_t lock; /* ring lock */ 522 spinlock_t lock; /* ring lock */
523 struct napi_struct napi; /* NAPI per tx queue */
523 unsigned int index; /* ring index */ 524 unsigned int index; /* ring index */
524 unsigned int queue; /* queue index */ 525 unsigned int queue; /* queue index */
525 struct enet_cb *cbs; /* tx ring buffer control block*/ 526 struct enet_cb *cbs; /* tx ring buffer control block*/
@@ -534,6 +535,7 @@ struct bcmgenet_tx_ring {
534 struct bcmgenet_tx_ring *); 535 struct bcmgenet_tx_ring *);
535 void (*int_disable)(struct bcmgenet_priv *priv, 536 void (*int_disable)(struct bcmgenet_priv *priv,
536 struct bcmgenet_tx_ring *); 537 struct bcmgenet_tx_ring *);
538 struct bcmgenet_priv *priv;
537}; 539};
538 540
539/* device context */ 541/* device context */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 9062a8434246..c308429dd9c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -35,10 +35,10 @@ static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
35} 35}
36 36
37static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr, 37static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
38 int addr_len) 38 u8 v6)
39{ 39{
40 return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) : 40 return v6 ? ipv6_clip_hash(ctbl, addr) :
41 ipv6_clip_hash(ctbl, addr); 41 ipv4_clip_hash(ctbl, addr);
42} 42}
43 43
44static int clip6_get_mbox(const struct net_device *dev, 44static int clip6_get_mbox(const struct net_device *dev,
@@ -78,23 +78,22 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
78 struct clip_entry *ce, *cte; 78 struct clip_entry *ce, *cte;
79 u32 *addr = (u32 *)lip; 79 u32 *addr = (u32 *)lip;
80 int hash; 80 int hash;
81 int addr_len; 81 int ret = -1;
82 int ret = 0;
83 82
84 if (!ctbl) 83 if (!ctbl)
85 return 0; 84 return 0;
86 85
87 if (v6) 86 hash = clip_addr_hash(ctbl, addr, v6);
88 addr_len = 16;
89 else
90 addr_len = 4;
91
92 hash = clip_addr_hash(ctbl, addr, addr_len);
93 87
94 read_lock_bh(&ctbl->lock); 88 read_lock_bh(&ctbl->lock);
95 list_for_each_entry(cte, &ctbl->hash_list[hash], list) { 89 list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
96 if (addr_len == cte->addr_len && 90 if (cte->addr6.sin6_family == AF_INET6 && v6)
97 memcmp(lip, cte->addr, cte->addr_len) == 0) { 91 ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
92 sizeof(struct in6_addr));
93 else if (cte->addr.sin_family == AF_INET && !v6)
94 ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
95 sizeof(struct in_addr));
96 if (!ret) {
98 ce = cte; 97 ce = cte;
99 read_unlock_bh(&ctbl->lock); 98 read_unlock_bh(&ctbl->lock);
100 goto found; 99 goto found;
@@ -111,15 +110,20 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
111 spin_lock_init(&ce->lock); 110 spin_lock_init(&ce->lock);
112 atomic_set(&ce->refcnt, 0); 111 atomic_set(&ce->refcnt, 0);
113 atomic_dec(&ctbl->nfree); 112 atomic_dec(&ctbl->nfree);
114 ce->addr_len = addr_len;
115 memcpy(ce->addr, lip, addr_len);
116 list_add_tail(&ce->list, &ctbl->hash_list[hash]); 113 list_add_tail(&ce->list, &ctbl->hash_list[hash]);
117 if (v6) { 114 if (v6) {
115 ce->addr6.sin6_family = AF_INET6;
116 memcpy(ce->addr6.sin6_addr.s6_addr,
117 lip, sizeof(struct in6_addr));
118 ret = clip6_get_mbox(dev, (const struct in6_addr *)lip); 118 ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
119 if (ret) { 119 if (ret) {
120 write_unlock_bh(&ctbl->lock); 120 write_unlock_bh(&ctbl->lock);
121 return ret; 121 return ret;
122 } 122 }
123 } else {
124 ce->addr.sin_family = AF_INET;
125 memcpy((char *)(&ce->addr.sin_addr), lip,
126 sizeof(struct in_addr));
123 } 127 }
124 } else { 128 } else {
125 write_unlock_bh(&ctbl->lock); 129 write_unlock_bh(&ctbl->lock);
@@ -140,19 +144,19 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
140 struct clip_entry *ce, *cte; 144 struct clip_entry *ce, *cte;
141 u32 *addr = (u32 *)lip; 145 u32 *addr = (u32 *)lip;
142 int hash; 146 int hash;
143 int addr_len; 147 int ret = -1;
144
145 if (v6)
146 addr_len = 16;
147 else
148 addr_len = 4;
149 148
150 hash = clip_addr_hash(ctbl, addr, addr_len); 149 hash = clip_addr_hash(ctbl, addr, v6);
151 150
152 read_lock_bh(&ctbl->lock); 151 read_lock_bh(&ctbl->lock);
153 list_for_each_entry(cte, &ctbl->hash_list[hash], list) { 152 list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
154 if (addr_len == cte->addr_len && 153 if (cte->addr6.sin6_family == AF_INET6 && v6)
155 memcmp(lip, cte->addr, cte->addr_len) == 0) { 154 ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
155 sizeof(struct in6_addr));
156 else if (cte->addr.sin_family == AF_INET && !v6)
157 ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
158 sizeof(struct in_addr));
159 if (!ret) {
156 ce = cte; 160 ce = cte;
157 read_unlock_bh(&ctbl->lock); 161 read_unlock_bh(&ctbl->lock);
158 goto found; 162 goto found;
@@ -249,10 +253,7 @@ int clip_tbl_show(struct seq_file *seq, void *v)
249 for (i = 0 ; i < ctbl->clipt_size; ++i) { 253 for (i = 0 ; i < ctbl->clipt_size; ++i) {
250 list_for_each_entry(ce, &ctbl->hash_list[i], list) { 254 list_for_each_entry(ce, &ctbl->hash_list[i], list) {
251 ip[0] = '\0'; 255 ip[0] = '\0';
252 if (ce->addr_len == 16) 256 sprintf(ip, "%pISc", &ce->addr);
253 sprintf(ip, "%pI6c", ce->addr);
254 else
255 sprintf(ip, "%pI4c", ce->addr);
256 seq_printf(seq, "%-25s %u\n", ip, 257 seq_printf(seq, "%-25s %u\n", ip,
257 atomic_read(&ce->refcnt)); 258 atomic_read(&ce->refcnt));
258 } 259 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
index 2eaba0161cf8..35eb43c6bcbb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
@@ -14,8 +14,10 @@ struct clip_entry {
14 spinlock_t lock; /* Hold while modifying clip reference */ 14 spinlock_t lock; /* Hold while modifying clip reference */
15 atomic_t refcnt; 15 atomic_t refcnt;
16 struct list_head list; 16 struct list_head list;
17 u32 addr[4]; 17 union {
18 int addr_len; 18 struct sockaddr_in addr;
19 struct sockaddr_in6 addr6;
20 };
19}; 21};
20 22
21struct clip_tbl { 23struct clip_tbl {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index d6cda17efe6e..97842d03675b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1103,7 +1103,7 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
1103#define T4_MEMORY_WRITE 0 1103#define T4_MEMORY_WRITE 0
1104#define T4_MEMORY_READ 1 1104#define T4_MEMORY_READ 1
1105int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len, 1105int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
1106 __be32 *buf, int dir); 1106 void *buf, int dir);
1107static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr, 1107static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
1108 u32 len, __be32 *buf) 1108 u32 len, __be32 *buf)
1109{ 1109{
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 4d643b65265e..853c38997c82 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -449,7 +449,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
449 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC 449 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
450 * @addr: address within indicated memory type 450 * @addr: address within indicated memory type
451 * @len: amount of memory to transfer 451 * @len: amount of memory to transfer
452 * @buf: host memory buffer 452 * @hbuf: host memory buffer
453 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) 453 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
454 * 454 *
455 * Reads/writes an [almost] arbitrary memory region in the firmware: the 455 * Reads/writes an [almost] arbitrary memory region in the firmware: the
@@ -460,15 +460,17 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
460 * caller's responsibility to perform appropriate byte order conversions. 460 * caller's responsibility to perform appropriate byte order conversions.
461 */ 461 */
462int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, 462int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
463 u32 len, __be32 *buf, int dir) 463 u32 len, void *hbuf, int dir)
464{ 464{
465 u32 pos, offset, resid, memoffset; 465 u32 pos, offset, resid, memoffset;
466 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base; 466 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
467 u32 *buf;
467 468
468 /* Argument sanity checks ... 469 /* Argument sanity checks ...
469 */ 470 */
470 if (addr & 0x3) 471 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
471 return -EINVAL; 472 return -EINVAL;
473 buf = (u32 *)hbuf;
472 474
473 /* It's convenient to be able to handle lengths which aren't a 475 /* It's convenient to be able to handle lengths which aren't a
474 * multiple of 32-bits because we often end up transferring files to 476 * multiple of 32-bits because we often end up transferring files to
@@ -532,14 +534,45 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
532 534
533 /* Transfer data to/from the adapter as long as there's an integral 535 /* Transfer data to/from the adapter as long as there's an integral
534 * number of 32-bit transfers to complete. 536 * number of 32-bit transfers to complete.
537 *
538 * A note on Endianness issues:
539 *
540 * The "register" reads and writes below from/to the PCI-E Memory
541 * Window invoke the standard adapter Big-Endian to PCI-E Link
542 * Little-Endian "swizzel." As a result, if we have the following
543 * data in adapter memory:
544 *
545 * Memory: ... | b0 | b1 | b2 | b3 | ...
546 * Address: i+0 i+1 i+2 i+3
547 *
548 * Then a read of the adapter memory via the PCI-E Memory Window
549 * will yield:
550 *
551 * x = readl(i)
552 * 31 0
553 * [ b3 | b2 | b1 | b0 ]
554 *
555 * If this value is stored into local memory on a Little-Endian system
556 * it will show up correctly in local memory as:
557 *
558 * ( ..., b0, b1, b2, b3, ... )
559 *
560 * But on a Big-Endian system, the store will show up in memory
561 * incorrectly swizzled as:
562 *
563 * ( ..., b3, b2, b1, b0, ... )
564 *
565 * So we need to account for this in the reads and writes to the
566 * PCI-E Memory Window below by undoing the register read/write
567 * swizzels.
535 */ 568 */
536 while (len > 0) { 569 while (len > 0) {
537 if (dir == T4_MEMORY_READ) 570 if (dir == T4_MEMORY_READ)
538 *buf++ = (__force __be32) t4_read_reg(adap, 571 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
539 mem_base + offset); 572 mem_base + offset));
540 else 573 else
541 t4_write_reg(adap, mem_base + offset, 574 t4_write_reg(adap, mem_base + offset,
542 (__force u32) *buf++); 575 (__force u32)cpu_to_le32(*buf++));
543 offset += sizeof(__be32); 576 offset += sizeof(__be32);
544 len -= sizeof(__be32); 577 len -= sizeof(__be32);
545 578
@@ -568,15 +601,16 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
568 */ 601 */
569 if (resid) { 602 if (resid) {
570 union { 603 union {
571 __be32 word; 604 u32 word;
572 char byte[4]; 605 char byte[4];
573 } last; 606 } last;
574 unsigned char *bp; 607 unsigned char *bp;
575 int i; 608 int i;
576 609
577 if (dir == T4_MEMORY_READ) { 610 if (dir == T4_MEMORY_READ) {
578 last.word = (__force __be32) t4_read_reg(adap, 611 last.word = le32_to_cpu(
579 mem_base + offset); 612 (__force __le32)t4_read_reg(adap,
613 mem_base + offset));
580 for (bp = (unsigned char *)buf, i = resid; i < 4; i++) 614 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
581 bp[i] = last.byte[i]; 615 bp[i] = last.byte[i];
582 } else { 616 } else {
@@ -584,7 +618,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
584 for (i = resid; i < 4; i++) 618 for (i = resid; i < 4; i++)
585 last.byte[i] = 0; 619 last.byte[i] = 0;
586 t4_write_reg(adap, mem_base + offset, 620 t4_write_reg(adap, mem_base + offset,
587 (__force u32) last.word); 621 (__force u32)cpu_to_le32(last.word));
588 } 622 }
589 } 623 }
590 624
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index a368c0a96ec7..204bd182473b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -272,8 +272,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
272 } 272 }
273 273
274 if (ENIC_TEST_INTR(pba, notify_intr)) { 274 if (ENIC_TEST_INTR(pba, notify_intr)) {
275 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
276 enic_notify_check(enic); 275 enic_notify_check(enic);
276 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
277 } 277 }
278 278
279 if (ENIC_TEST_INTR(pba, err_intr)) { 279 if (ENIC_TEST_INTR(pba, err_intr)) {
@@ -346,8 +346,8 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
346 struct enic *enic = data; 346 struct enic *enic = data;
347 unsigned int intr = enic_msix_notify_intr(enic); 347 unsigned int intr = enic_msix_notify_intr(enic);
348 348
349 vnic_intr_return_all_credits(&enic->intr[intr]);
350 enic_notify_check(enic); 349 enic_notify_check(enic);
350 vnic_intr_return_all_credits(&enic->intr[intr]);
351 351
352 return IRQ_HANDLED; 352 return IRQ_HANDLED;
353} 353}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 43df78882e48..178e54028d10 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -3162,8 +3162,8 @@ static void adjust_link(struct net_device *dev)
3162 struct phy_device *phydev = priv->phydev; 3162 struct phy_device *phydev = priv->phydev;
3163 3163
3164 if (unlikely(phydev->link != priv->oldlink || 3164 if (unlikely(phydev->link != priv->oldlink ||
3165 phydev->duplex != priv->oldduplex || 3165 (phydev->link && (phydev->duplex != priv->oldduplex ||
3166 phydev->speed != priv->oldspeed)) 3166 phydev->speed != priv->oldspeed))))
3167 gfar_update_link_state(priv); 3167 gfar_update_link_state(priv);
3168} 3168}
3169 3169
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index e8a1adb7a962..c05e50759621 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3262,6 +3262,139 @@ static void ehea_remove_device_sysfs(struct platform_device *dev)
3262 device_remove_file(&dev->dev, &dev_attr_remove_port); 3262 device_remove_file(&dev->dev, &dev_attr_remove_port);
3263} 3263}
3264 3264
3265static int ehea_reboot_notifier(struct notifier_block *nb,
3266 unsigned long action, void *unused)
3267{
3268 if (action == SYS_RESTART) {
3269 pr_info("Reboot: freeing all eHEA resources\n");
3270 ibmebus_unregister_driver(&ehea_driver);
3271 }
3272 return NOTIFY_DONE;
3273}
3274
3275static struct notifier_block ehea_reboot_nb = {
3276 .notifier_call = ehea_reboot_notifier,
3277};
3278
3279static int ehea_mem_notifier(struct notifier_block *nb,
3280 unsigned long action, void *data)
3281{
3282 int ret = NOTIFY_BAD;
3283 struct memory_notify *arg = data;
3284
3285 mutex_lock(&dlpar_mem_lock);
3286
3287 switch (action) {
3288 case MEM_CANCEL_OFFLINE:
3289 pr_info("memory offlining canceled");
3290 /* Fall through: re-add canceled memory block */
3291
3292 case MEM_ONLINE:
3293 pr_info("memory is going online");
3294 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3295 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3296 goto out_unlock;
3297 ehea_rereg_mrs();
3298 break;
3299
3300 case MEM_GOING_OFFLINE:
3301 pr_info("memory is going offline");
3302 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3303 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3304 goto out_unlock;
3305 ehea_rereg_mrs();
3306 break;
3307
3308 default:
3309 break;
3310 }
3311
3312 ehea_update_firmware_handles();
3313 ret = NOTIFY_OK;
3314
3315out_unlock:
3316 mutex_unlock(&dlpar_mem_lock);
3317 return ret;
3318}
3319
3320static struct notifier_block ehea_mem_nb = {
3321 .notifier_call = ehea_mem_notifier,
3322};
3323
3324static void ehea_crash_handler(void)
3325{
3326 int i;
3327
3328 if (ehea_fw_handles.arr)
3329 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3330 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3331 ehea_fw_handles.arr[i].fwh,
3332 FORCE_FREE);
3333
3334 if (ehea_bcmc_regs.arr)
3335 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3336 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3337 ehea_bcmc_regs.arr[i].port_id,
3338 ehea_bcmc_regs.arr[i].reg_type,
3339 ehea_bcmc_regs.arr[i].macaddr,
3340 0, H_DEREG_BCMC);
3341}
3342
3343static atomic_t ehea_memory_hooks_registered;
3344
3345/* Register memory hooks on probe of first adapter */
3346static int ehea_register_memory_hooks(void)
3347{
3348 int ret = 0;
3349
3350 if (atomic_inc_and_test(&ehea_memory_hooks_registered))
3351 return 0;
3352
3353 ret = ehea_create_busmap();
3354 if (ret) {
3355 pr_info("ehea_create_busmap failed\n");
3356 goto out;
3357 }
3358
3359 ret = register_reboot_notifier(&ehea_reboot_nb);
3360 if (ret) {
3361 pr_info("register_reboot_notifier failed\n");
3362 goto out;
3363 }
3364
3365 ret = register_memory_notifier(&ehea_mem_nb);
3366 if (ret) {
3367 pr_info("register_memory_notifier failed\n");
3368 goto out2;
3369 }
3370
3371 ret = crash_shutdown_register(ehea_crash_handler);
3372 if (ret) {
3373 pr_info("crash_shutdown_register failed\n");
3374 goto out3;
3375 }
3376
3377 return 0;
3378
3379out3:
3380 unregister_memory_notifier(&ehea_mem_nb);
3381out2:
3382 unregister_reboot_notifier(&ehea_reboot_nb);
3383out:
3384 return ret;
3385}
3386
3387static void ehea_unregister_memory_hooks(void)
3388{
3389 if (atomic_read(&ehea_memory_hooks_registered))
3390 return;
3391
3392 unregister_reboot_notifier(&ehea_reboot_nb);
3393 if (crash_shutdown_unregister(ehea_crash_handler))
3394 pr_info("failed unregistering crash handler\n");
3395 unregister_memory_notifier(&ehea_mem_nb);
3396}
3397
3265static int ehea_probe_adapter(struct platform_device *dev) 3398static int ehea_probe_adapter(struct platform_device *dev)
3266{ 3399{
3267 struct ehea_adapter *adapter; 3400 struct ehea_adapter *adapter;
@@ -3269,6 +3402,10 @@ static int ehea_probe_adapter(struct platform_device *dev)
3269 int ret; 3402 int ret;
3270 int i; 3403 int i;
3271 3404
3405 ret = ehea_register_memory_hooks();
3406 if (ret)
3407 return ret;
3408
3272 if (!dev || !dev->dev.of_node) { 3409 if (!dev || !dev->dev.of_node) {
3273 pr_err("Invalid ibmebus device probed\n"); 3410 pr_err("Invalid ibmebus device probed\n");
3274 return -EINVAL; 3411 return -EINVAL;
@@ -3392,81 +3529,6 @@ static int ehea_remove(struct platform_device *dev)
3392 return 0; 3529 return 0;
3393} 3530}
3394 3531
3395static void ehea_crash_handler(void)
3396{
3397 int i;
3398
3399 if (ehea_fw_handles.arr)
3400 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3401 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3402 ehea_fw_handles.arr[i].fwh,
3403 FORCE_FREE);
3404
3405 if (ehea_bcmc_regs.arr)
3406 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3407 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3408 ehea_bcmc_regs.arr[i].port_id,
3409 ehea_bcmc_regs.arr[i].reg_type,
3410 ehea_bcmc_regs.arr[i].macaddr,
3411 0, H_DEREG_BCMC);
3412}
3413
3414static int ehea_mem_notifier(struct notifier_block *nb,
3415 unsigned long action, void *data)
3416{
3417 int ret = NOTIFY_BAD;
3418 struct memory_notify *arg = data;
3419
3420 mutex_lock(&dlpar_mem_lock);
3421
3422 switch (action) {
3423 case MEM_CANCEL_OFFLINE:
3424 pr_info("memory offlining canceled");
3425 /* Readd canceled memory block */
3426 case MEM_ONLINE:
3427 pr_info("memory is going online");
3428 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3429 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3430 goto out_unlock;
3431 ehea_rereg_mrs();
3432 break;
3433 case MEM_GOING_OFFLINE:
3434 pr_info("memory is going offline");
3435 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3436 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3437 goto out_unlock;
3438 ehea_rereg_mrs();
3439 break;
3440 default:
3441 break;
3442 }
3443
3444 ehea_update_firmware_handles();
3445 ret = NOTIFY_OK;
3446
3447out_unlock:
3448 mutex_unlock(&dlpar_mem_lock);
3449 return ret;
3450}
3451
3452static struct notifier_block ehea_mem_nb = {
3453 .notifier_call = ehea_mem_notifier,
3454};
3455
3456static int ehea_reboot_notifier(struct notifier_block *nb,
3457 unsigned long action, void *unused)
3458{
3459 if (action == SYS_RESTART) {
3460 pr_info("Reboot: freeing all eHEA resources\n");
3461 ibmebus_unregister_driver(&ehea_driver);
3462 }
3463 return NOTIFY_DONE;
3464}
3465
3466static struct notifier_block ehea_reboot_nb = {
3467 .notifier_call = ehea_reboot_notifier,
3468};
3469
3470static int check_module_parm(void) 3532static int check_module_parm(void)
3471{ 3533{
3472 int ret = 0; 3534 int ret = 0;
@@ -3520,26 +3582,10 @@ static int __init ehea_module_init(void)
3520 if (ret) 3582 if (ret)
3521 goto out; 3583 goto out;
3522 3584
3523 ret = ehea_create_busmap();
3524 if (ret)
3525 goto out;
3526
3527 ret = register_reboot_notifier(&ehea_reboot_nb);
3528 if (ret)
3529 pr_info("failed registering reboot notifier\n");
3530
3531 ret = register_memory_notifier(&ehea_mem_nb);
3532 if (ret)
3533 pr_info("failed registering memory remove notifier\n");
3534
3535 ret = crash_shutdown_register(ehea_crash_handler);
3536 if (ret)
3537 pr_info("failed registering crash handler\n");
3538
3539 ret = ibmebus_register_driver(&ehea_driver); 3585 ret = ibmebus_register_driver(&ehea_driver);
3540 if (ret) { 3586 if (ret) {
3541 pr_err("failed registering eHEA device driver on ebus\n"); 3587 pr_err("failed registering eHEA device driver on ebus\n");
3542 goto out2; 3588 goto out;
3543 } 3589 }
3544 3590
3545 ret = driver_create_file(&ehea_driver.driver, 3591 ret = driver_create_file(&ehea_driver.driver,
@@ -3547,32 +3593,22 @@ static int __init ehea_module_init(void)
3547 if (ret) { 3593 if (ret) {
3548 pr_err("failed to register capabilities attribute, ret=%d\n", 3594 pr_err("failed to register capabilities attribute, ret=%d\n",
3549 ret); 3595 ret);
3550 goto out3; 3596 goto out2;
3551 } 3597 }
3552 3598
3553 return ret; 3599 return ret;
3554 3600
3555out3:
3556 ibmebus_unregister_driver(&ehea_driver);
3557out2: 3601out2:
3558 unregister_memory_notifier(&ehea_mem_nb); 3602 ibmebus_unregister_driver(&ehea_driver);
3559 unregister_reboot_notifier(&ehea_reboot_nb);
3560 crash_shutdown_unregister(ehea_crash_handler);
3561out: 3603out:
3562 return ret; 3604 return ret;
3563} 3605}
3564 3606
3565static void __exit ehea_module_exit(void) 3607static void __exit ehea_module_exit(void)
3566{ 3608{
3567 int ret;
3568
3569 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); 3609 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3570 ibmebus_unregister_driver(&ehea_driver); 3610 ibmebus_unregister_driver(&ehea_driver);
3571 unregister_reboot_notifier(&ehea_reboot_nb); 3611 ehea_unregister_memory_hooks();
3572 ret = crash_shutdown_unregister(ehea_crash_handler);
3573 if (ret)
3574 pr_info("failed unregistering crash handler\n");
3575 unregister_memory_notifier(&ehea_mem_nb);
3576 kfree(ehea_fw_handles.arr); 3612 kfree(ehea_fw_handles.arr);
3577 kfree(ehea_bcmc_regs.arr); 3613 kfree(ehea_bcmc_regs.arr);
3578 ehea_destroy_busmap(); 3614 ehea_destroy_busmap();
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 21978cc019e7..072426a72745 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1327,6 +1327,28 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1327 return ret; 1327 return ret;
1328} 1328}
1329 1329
1330static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
1331{
1332 struct ibmveth_adapter *adapter = netdev_priv(dev);
1333 struct sockaddr *addr = p;
1334 u64 mac_address;
1335 int rc;
1336
1337 if (!is_valid_ether_addr(addr->sa_data))
1338 return -EADDRNOTAVAIL;
1339
1340 mac_address = ibmveth_encode_mac_addr(addr->sa_data);
1341 rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
1342 if (rc) {
1343 netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
1344 return rc;
1345 }
1346
1347 ether_addr_copy(dev->dev_addr, addr->sa_data);
1348
1349 return 0;
1350}
1351
1330static const struct net_device_ops ibmveth_netdev_ops = { 1352static const struct net_device_ops ibmveth_netdev_ops = {
1331 .ndo_open = ibmveth_open, 1353 .ndo_open = ibmveth_open,
1332 .ndo_stop = ibmveth_close, 1354 .ndo_stop = ibmveth_close,
@@ -1337,7 +1359,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
1337 .ndo_fix_features = ibmveth_fix_features, 1359 .ndo_fix_features = ibmveth_fix_features,
1338 .ndo_set_features = ibmveth_set_features, 1360 .ndo_set_features = ibmveth_set_features,
1339 .ndo_validate_addr = eth_validate_addr, 1361 .ndo_validate_addr = eth_validate_addr,
1340 .ndo_set_mac_address = eth_mac_addr, 1362 .ndo_set_mac_address = ibmveth_set_mac_addr,
1341#ifdef CONFIG_NET_POLL_CONTROLLER 1363#ifdef CONFIG_NET_POLL_CONTROLLER
1342 .ndo_poll_controller = ibmveth_poll_controller, 1364 .ndo_poll_controller = ibmveth_poll_controller,
1343#endif 1365#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index cb19c377e0cc..1da7d05abd38 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -875,8 +875,9 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
875 * The grst delay value is in 100ms units, and we'll wait a 875 * The grst delay value is in 100ms units, and we'll wait a
876 * couple counts longer to be sure we don't just miss the end. 876 * couple counts longer to be sure we don't just miss the end.
877 */ 877 */
878 grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK 878 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
879 >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 879 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
880 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
880 for (cnt = 0; cnt < grst_del + 2; cnt++) { 881 for (cnt = 0; cnt < grst_del + 2; cnt++) {
881 reg = rd32(hw, I40E_GLGEN_RSTAT); 882 reg = rd32(hw, I40E_GLGEN_RSTAT);
882 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 883 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
@@ -2828,7 +2829,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
2828 2829
2829 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2830 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2830 2831
2831 if (!status) 2832 if (!status && filter_index)
2832 *filter_index = resp->index; 2833 *filter_index = resp->index;
2833 2834
2834 return status; 2835 return status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index b0665509eae6..2f583554a260 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -40,7 +40,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
40 u32 val; 40 u32 val;
41 41
42 val = rd32(hw, I40E_PRTDCB_GENC); 42 val = rd32(hw, I40E_PRTDCB_GENC);
43 *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >> 43 *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >>
44 I40E_PRTDCB_GENC_PFCLDA_SHIFT); 44 I40E_PRTDCB_GENC_PFCLDA_SHIFT);
45} 45}
46 46
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 30cf0be7d1b2..e802b6bc067d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -990,8 +990,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
990 if (!cmd_buf) 990 if (!cmd_buf)
991 return count; 991 return count;
992 bytes_not_copied = copy_from_user(cmd_buf, buffer, count); 992 bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
993 if (bytes_not_copied < 0) 993 if (bytes_not_copied < 0) {
994 kfree(cmd_buf);
994 return bytes_not_copied; 995 return bytes_not_copied;
996 }
995 if (bytes_not_copied > 0) 997 if (bytes_not_copied > 0)
996 count -= bytes_not_copied; 998 count -= bytes_not_copied;
997 cmd_buf[count] = '\0'; 999 cmd_buf[count] = '\0';
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index c3858e7f0e66..56bdaff9f27e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1507,7 +1507,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1507 vsi->tc_config.numtc = numtc; 1507 vsi->tc_config.numtc = numtc;
1508 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1508 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1509 /* Number of queues per enabled TC */ 1509 /* Number of queues per enabled TC */
1510 num_tc_qps = vsi->alloc_queue_pairs/numtc; 1510 /* In MFP case we can have a much lower count of MSIx
1511 * vectors available and so we need to lower the used
1512 * q count.
1513 */
1514 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
1515 num_tc_qps = qcount / numtc;
1511 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); 1516 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
1512 1517
1513 /* Setup queue offset/count for all TCs for given VSI */ 1518 /* Setup queue offset/count for all TCs for given VSI */
@@ -2690,8 +2695,15 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2690 u16 qoffset, qcount; 2695 u16 qoffset, qcount;
2691 int i, n; 2696 int i, n;
2692 2697
2693 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) 2698 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2694 return; 2699 /* Reset the TC information */
2700 for (i = 0; i < vsi->num_queue_pairs; i++) {
2701 rx_ring = vsi->rx_rings[i];
2702 tx_ring = vsi->tx_rings[i];
2703 rx_ring->dcb_tc = 0;
2704 tx_ring->dcb_tc = 0;
2705 }
2706 }
2695 2707
2696 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { 2708 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2697 if (!(vsi->tc_config.enabled_tc & (1 << n))) 2709 if (!(vsi->tc_config.enabled_tc & (1 << n)))
@@ -3836,6 +3848,12 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3836{ 3848{
3837 int i; 3849 int i;
3838 3850
3851 i40e_stop_misc_vector(pf);
3852 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3853 synchronize_irq(pf->msix_entries[0].vector);
3854 free_irq(pf->msix_entries[0].vector, pf);
3855 }
3856
3839 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 3857 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3840 for (i = 0; i < pf->num_alloc_vsi; i++) 3858 for (i = 0; i < pf->num_alloc_vsi; i++)
3841 if (pf->vsi[i]) 3859 if (pf->vsi[i])
@@ -5246,8 +5264,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
5246 5264
5247 /* Wait for the PF's Tx queues to be disabled */ 5265 /* Wait for the PF's Tx queues to be disabled */
5248 ret = i40e_pf_wait_txq_disabled(pf); 5266 ret = i40e_pf_wait_txq_disabled(pf);
5249 if (!ret) 5267 if (ret) {
5268 /* Schedule PF reset to recover */
5269 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5270 i40e_service_event_schedule(pf);
5271 } else {
5250 i40e_pf_unquiesce_all_vsi(pf); 5272 i40e_pf_unquiesce_all_vsi(pf);
5273 }
5274
5251exit: 5275exit:
5252 return ret; 5276 return ret;
5253} 5277}
@@ -5579,7 +5603,8 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
5579 int i, v; 5603 int i, v;
5580 5604
5581 /* If we're down or resetting, just bail */ 5605 /* If we're down or resetting, just bail */
5582 if (test_bit(__I40E_CONFIG_BUSY, &pf->state)) 5606 if (test_bit(__I40E_DOWN, &pf->state) ||
5607 test_bit(__I40E_CONFIG_BUSY, &pf->state))
5583 return; 5608 return;
5584 5609
5585 /* for each VSI/netdev 5610 /* for each VSI/netdev
@@ -9849,6 +9874,7 @@ static void i40e_remove(struct pci_dev *pdev)
9849 set_bit(__I40E_DOWN, &pf->state); 9874 set_bit(__I40E_DOWN, &pf->state);
9850 del_timer_sync(&pf->service_timer); 9875 del_timer_sync(&pf->service_timer);
9851 cancel_work_sync(&pf->service_task); 9876 cancel_work_sync(&pf->service_task);
9877 i40e_fdir_teardown(pf);
9852 9878
9853 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 9879 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
9854 i40e_free_vfs(pf); 9880 i40e_free_vfs(pf);
@@ -9875,12 +9901,6 @@ static void i40e_remove(struct pci_dev *pdev)
9875 if (pf->vsi[pf->lan_vsi]) 9901 if (pf->vsi[pf->lan_vsi])
9876 i40e_vsi_release(pf->vsi[pf->lan_vsi]); 9902 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
9877 9903
9878 i40e_stop_misc_vector(pf);
9879 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
9880 synchronize_irq(pf->msix_entries[0].vector);
9881 free_irq(pf->msix_entries[0].vector, pf);
9882 }
9883
9884 /* shutdown and destroy the HMC */ 9904 /* shutdown and destroy the HMC */
9885 if (pf->hw.hmc.hmc_obj) { 9905 if (pf->hw.hmc.hmc_obj) {
9886 ret_code = i40e_shutdown_lan_hmc(&pf->hw); 9906 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
@@ -10034,6 +10054,8 @@ static void i40e_shutdown(struct pci_dev *pdev)
10034 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 10054 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
10035 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 10055 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
10036 10056
10057 i40e_clear_interrupt_scheme(pf);
10058
10037 if (system_state == SYSTEM_POWER_OFF) { 10059 if (system_state == SYSTEM_POWER_OFF) {
10038 pci_wake_from_d3(pdev, pf->wol_en); 10060 pci_wake_from_d3(pdev, pf->wol_en);
10039 pci_set_power_state(pdev, PCI_D3hot); 10061 pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 28429c8fbc98..039018abad4a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -725,9 +725,11 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
725{ 725{
726 i40e_status status; 726 i40e_status status;
727 enum i40e_nvmupd_cmd upd_cmd; 727 enum i40e_nvmupd_cmd upd_cmd;
728 bool retry_attempt = false;
728 729
729 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); 730 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
730 731
732retry:
731 switch (upd_cmd) { 733 switch (upd_cmd) {
732 case I40E_NVMUPD_WRITE_CON: 734 case I40E_NVMUPD_WRITE_CON:
733 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); 735 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
@@ -771,6 +773,39 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
771 *errno = -ESRCH; 773 *errno = -ESRCH;
772 break; 774 break;
773 } 775 }
776
777 /* In some circumstances, a multi-write transaction takes longer
778 * than the default 3 minute timeout on the write semaphore. If
779 * the write failed with an EBUSY status, this is likely the problem,
780 * so here we try to reacquire the semaphore then retry the write.
781 * We only do one retry, then give up.
782 */
783 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
784 !retry_attempt) {
785 i40e_status old_status = status;
786 u32 old_asq_status = hw->aq.asq_last_status;
787 u32 gtime;
788
789 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
790 if (gtime >= hw->nvm.hw_semaphore_timeout) {
791 i40e_debug(hw, I40E_DEBUG_ALL,
792 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
793 gtime, hw->nvm.hw_semaphore_timeout);
794 i40e_release_nvm(hw);
795 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
796 if (status) {
797 i40e_debug(hw, I40E_DEBUG_ALL,
798 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
799 hw->aq.asq_last_status);
800 status = old_status;
801 hw->aq.asq_last_status = old_asq_status;
802 } else {
803 retry_attempt = true;
804 goto retry;
805 }
806 }
807 }
808
774 return status; 809 return status;
775} 810}
776 811
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index af350626843e..d4b4aa7c204e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -587,6 +587,20 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
587} 587}
588 588
589/** 589/**
590 * i40e_get_head - Retrieve head from head writeback
591 * @tx_ring: tx ring to fetch head of
592 *
593 * Returns value of Tx ring head based on value stored
594 * in head write-back location
595 **/
596static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
597{
598 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
599
600 return le32_to_cpu(*(volatile __le32 *)head);
601}
602
603/**
590 * i40e_get_tx_pending - how many tx descriptors not processed 604 * i40e_get_tx_pending - how many tx descriptors not processed
591 * @tx_ring: the ring of descriptors 605 * @tx_ring: the ring of descriptors
592 * 606 *
@@ -595,10 +609,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
595 **/ 609 **/
596static u32 i40e_get_tx_pending(struct i40e_ring *ring) 610static u32 i40e_get_tx_pending(struct i40e_ring *ring)
597{ 611{
598 u32 ntu = ((ring->next_to_clean <= ring->next_to_use) 612 u32 head, tail;
599 ? ring->next_to_use 613
600 : ring->next_to_use + ring->count); 614 head = i40e_get_head(ring);
601 return ntu - ring->next_to_clean; 615 tail = readl(ring->tail);
616
617 if (head != tail)
618 return (head < tail) ?
619 tail - head : (tail + ring->count - head);
620
621 return 0;
602} 622}
603 623
604/** 624/**
@@ -607,6 +627,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
607 **/ 627 **/
608static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) 628static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
609{ 629{
630 u32 tx_done = tx_ring->stats.packets;
631 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
610 u32 tx_pending = i40e_get_tx_pending(tx_ring); 632 u32 tx_pending = i40e_get_tx_pending(tx_ring);
611 struct i40e_pf *pf = tx_ring->vsi->back; 633 struct i40e_pf *pf = tx_ring->vsi->back;
612 bool ret = false; 634 bool ret = false;
@@ -624,41 +646,25 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
624 * run the check_tx_hang logic with a transmit completion 646 * run the check_tx_hang logic with a transmit completion
625 * pending but without time to complete it yet. 647 * pending but without time to complete it yet.
626 */ 648 */
627 if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && 649 if ((tx_done_old == tx_done) && tx_pending) {
628 (tx_pending >= I40E_MIN_DESC_PENDING)) {
629 /* make sure it is true for two checks in a row */ 650 /* make sure it is true for two checks in a row */
630 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, 651 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
631 &tx_ring->state); 652 &tx_ring->state);
632 } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && 653 } else if (tx_done_old == tx_done &&
633 (tx_pending < I40E_MIN_DESC_PENDING) && 654 (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
634 (tx_pending > 0)) {
635 if (I40E_DEBUG_FLOW & pf->hw.debug_mask) 655 if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
636 dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d", 656 dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
637 tx_pending, tx_ring->queue_index); 657 tx_pending, tx_ring->queue_index);
638 pf->tx_sluggish_count++; 658 pf->tx_sluggish_count++;
639 } else { 659 } else {
640 /* update completed stats and disarm the hang check */ 660 /* update completed stats and disarm the hang check */
641 tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; 661 tx_ring->tx_stats.tx_done_old = tx_done;
642 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); 662 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
643 } 663 }
644 664
645 return ret; 665 return ret;
646} 666}
647 667
648/**
649 * i40e_get_head - Retrieve head from head writeback
650 * @tx_ring: tx ring to fetch head of
651 *
652 * Returns value of Tx ring head based on value stored
653 * in head write-back location
654 **/
655static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
656{
657 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
658
659 return le32_to_cpu(*(volatile __le32 *)head);
660}
661
662#define WB_STRIDE 0x3 668#define WB_STRIDE 0x3
663 669
664/** 670/**
@@ -2356,6 +2362,67 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2356} 2362}
2357 2363
2358/** 2364/**
2365 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2366 * @skb: send buffer
2367 * @tx_flags: collected send information
2368 * @hdr_len: size of the packet header
2369 *
2370 * Note: Our HW can't scatter-gather more than 8 fragments to build
2371 * a packet on the wire and so we need to figure out the cases where we
2372 * need to linearize the skb.
2373 **/
2374static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
2375 const u8 hdr_len)
2376{
2377 struct skb_frag_struct *frag;
2378 bool linearize = false;
2379 unsigned int size = 0;
2380 u16 num_frags;
2381 u16 gso_segs;
2382
2383 num_frags = skb_shinfo(skb)->nr_frags;
2384 gso_segs = skb_shinfo(skb)->gso_segs;
2385
2386 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
2387 u16 j = 1;
2388
2389 if (num_frags < (I40E_MAX_BUFFER_TXD))
2390 goto linearize_chk_done;
2391 /* try the simple math, if we have too many frags per segment */
2392 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
2393 I40E_MAX_BUFFER_TXD) {
2394 linearize = true;
2395 goto linearize_chk_done;
2396 }
2397 frag = &skb_shinfo(skb)->frags[0];
2398 size = hdr_len;
2399 /* we might still have more fragments per segment */
2400 do {
2401 size += skb_frag_size(frag);
2402 frag++; j++;
2403 if (j == I40E_MAX_BUFFER_TXD) {
2404 if (size < skb_shinfo(skb)->gso_size) {
2405 linearize = true;
2406 break;
2407 }
2408 j = 1;
2409 size -= skb_shinfo(skb)->gso_size;
2410 if (size)
2411 j++;
2412 size += hdr_len;
2413 }
2414 num_frags--;
2415 } while (num_frags);
2416 } else {
2417 if (num_frags >= I40E_MAX_BUFFER_TXD)
2418 linearize = true;
2419 }
2420
2421linearize_chk_done:
2422 return linearize;
2423}
2424
2425/**
2359 * i40e_tx_map - Build the Tx descriptor 2426 * i40e_tx_map - Build the Tx descriptor
2360 * @tx_ring: ring to send buffer on 2427 * @tx_ring: ring to send buffer on
2361 * @skb: send buffer 2428 * @skb: send buffer
@@ -2612,6 +2679,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2612 if (tsyn) 2679 if (tsyn)
2613 tx_flags |= I40E_TX_FLAGS_TSYN; 2680 tx_flags |= I40E_TX_FLAGS_TSYN;
2614 2681
2682 if (i40e_chk_linearize(skb, tx_flags, hdr_len))
2683 if (skb_linearize(skb))
2684 goto out_drop;
2685
2615 skb_tx_timestamp(skb); 2686 skb_tx_timestamp(skb);
2616 2687
2617 /* always enable CRC insertion offload */ 2688 /* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 38449b230d60..4b0b8102cdc3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -120,6 +120,7 @@ enum i40e_dyn_idx_t {
120 120
121#define i40e_rx_desc i40e_32byte_rx_desc 121#define i40e_rx_desc i40e_32byte_rx_desc
122 122
123#define I40E_MAX_BUFFER_TXD 8
123#define I40E_MIN_TX_LEN 17 124#define I40E_MIN_TX_LEN 17
124#define I40E_MAX_DATA_PER_TXD 8192 125#define I40E_MAX_DATA_PER_TXD 8192
125 126
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index d2ff862f0726..fe13ad2def46 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -127,6 +127,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
127} 127}
128 128
129/** 129/**
130 * i40e_get_head - Retrieve head from head writeback
131 * @tx_ring: tx ring to fetch head of
132 *
133 * Returns value of Tx ring head based on value stored
134 * in head write-back location
135 **/
136static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
137{
138 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
139
140 return le32_to_cpu(*(volatile __le32 *)head);
141}
142
143/**
130 * i40e_get_tx_pending - how many tx descriptors not processed 144 * i40e_get_tx_pending - how many tx descriptors not processed
131 * @tx_ring: the ring of descriptors 145 * @tx_ring: the ring of descriptors
132 * 146 *
@@ -135,10 +149,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
135 **/ 149 **/
136static u32 i40e_get_tx_pending(struct i40e_ring *ring) 150static u32 i40e_get_tx_pending(struct i40e_ring *ring)
137{ 151{
138 u32 ntu = ((ring->next_to_clean <= ring->next_to_use) 152 u32 head, tail;
139 ? ring->next_to_use 153
140 : ring->next_to_use + ring->count); 154 head = i40e_get_head(ring);
141 return ntu - ring->next_to_clean; 155 tail = readl(ring->tail);
156
157 if (head != tail)
158 return (head < tail) ?
159 tail - head : (tail + ring->count - head);
160
161 return 0;
142} 162}
143 163
144/** 164/**
@@ -147,6 +167,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
147 **/ 167 **/
148static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) 168static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
149{ 169{
170 u32 tx_done = tx_ring->stats.packets;
171 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
150 u32 tx_pending = i40e_get_tx_pending(tx_ring); 172 u32 tx_pending = i40e_get_tx_pending(tx_ring);
151 bool ret = false; 173 bool ret = false;
152 174
@@ -163,36 +185,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
163 * run the check_tx_hang logic with a transmit completion 185 * run the check_tx_hang logic with a transmit completion
164 * pending but without time to complete it yet. 186 * pending but without time to complete it yet.
165 */ 187 */
166 if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && 188 if ((tx_done_old == tx_done) && tx_pending) {
167 (tx_pending >= I40E_MIN_DESC_PENDING)) {
168 /* make sure it is true for two checks in a row */ 189 /* make sure it is true for two checks in a row */
169 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, 190 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
170 &tx_ring->state); 191 &tx_ring->state);
171 } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) || 192 } else if (tx_done_old == tx_done &&
172 !(tx_pending < I40E_MIN_DESC_PENDING) || 193 (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
173 !(tx_pending > 0)) {
174 /* update completed stats and disarm the hang check */ 194 /* update completed stats and disarm the hang check */
175 tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; 195 tx_ring->tx_stats.tx_done_old = tx_done;
176 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); 196 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
177 } 197 }
178 198
179 return ret; 199 return ret;
180} 200}
181 201
182/**
183 * i40e_get_head - Retrieve head from head writeback
184 * @tx_ring: tx ring to fetch head of
185 *
186 * Returns value of Tx ring head based on value stored
187 * in head write-back location
188 **/
189static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
190{
191 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
192
193 return le32_to_cpu(*(volatile __le32 *)head);
194}
195
196#define WB_STRIDE 0x3 202#define WB_STRIDE 0x3
197 203
198/** 204/**
@@ -1405,17 +1411,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1405 if (err < 0) 1411 if (err < 0)
1406 return err; 1412 return err;
1407 1413
1408 if (protocol == htons(ETH_P_IP)) { 1414 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
1409 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); 1415 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
1416
1417 if (iph->version == 4) {
1410 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1418 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1411 iph->tot_len = 0; 1419 iph->tot_len = 0;
1412 iph->check = 0; 1420 iph->check = 0;
1413 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 1421 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1414 0, IPPROTO_TCP, 0); 1422 0, IPPROTO_TCP, 0);
1415 } else if (skb_is_gso_v6(skb)) { 1423 } else if (ipv6h->version == 6) {
1416
1417 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
1418 : ipv6_hdr(skb);
1419 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1424 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1420 ipv6h->payload_len = 0; 1425 ipv6h->payload_len = 0;
1421 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 1426 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1473,13 +1478,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1473 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; 1478 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1474 } 1479 }
1475 } else if (tx_flags & I40E_TX_FLAGS_IPV6) { 1480 } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
1476 if (tx_flags & I40E_TX_FLAGS_TSO) { 1481 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
1477 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; 1482 if (tx_flags & I40E_TX_FLAGS_TSO)
1478 ip_hdr(skb)->check = 0; 1483 ip_hdr(skb)->check = 0;
1479 } else {
1480 *cd_tunneling |=
1481 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1482 }
1483 } 1484 }
1484 1485
1485 /* Now set the ctx descriptor fields */ 1486 /* Now set the ctx descriptor fields */
@@ -1489,6 +1490,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1489 ((skb_inner_network_offset(skb) - 1490 ((skb_inner_network_offset(skb) -
1490 skb_transport_offset(skb)) >> 1) << 1491 skb_transport_offset(skb)) >> 1) <<
1491 I40E_TXD_CTX_QW0_NATLEN_SHIFT; 1492 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
1493 if (this_ip_hdr->version == 6) {
1494 tx_flags &= ~I40E_TX_FLAGS_IPV4;
1495 tx_flags |= I40E_TX_FLAGS_IPV6;
1496 }
1497
1492 1498
1493 } else { 1499 } else {
1494 network_hdr_len = skb_network_header_len(skb); 1500 network_hdr_len = skb_network_header_len(skb);
@@ -1579,6 +1585,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1579 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); 1585 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
1580} 1586}
1581 1587
1588 /**
1589 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
1590 * @skb: send buffer
1591 * @tx_flags: collected send information
1592 * @hdr_len: size of the packet header
1593 *
1594 * Note: Our HW can't scatter-gather more than 8 fragments to build
1595 * a packet on the wire and so we need to figure out the cases where we
1596 * need to linearize the skb.
1597 **/
1598static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
1599 const u8 hdr_len)
1600{
1601 struct skb_frag_struct *frag;
1602 bool linearize = false;
1603 unsigned int size = 0;
1604 u16 num_frags;
1605 u16 gso_segs;
1606
1607 num_frags = skb_shinfo(skb)->nr_frags;
1608 gso_segs = skb_shinfo(skb)->gso_segs;
1609
1610 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
1611 u16 j = 1;
1612
1613 if (num_frags < (I40E_MAX_BUFFER_TXD))
1614 goto linearize_chk_done;
1615 /* try the simple math, if we have too many frags per segment */
1616 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
1617 I40E_MAX_BUFFER_TXD) {
1618 linearize = true;
1619 goto linearize_chk_done;
1620 }
1621 frag = &skb_shinfo(skb)->frags[0];
1622 size = hdr_len;
1623 /* we might still have more fragments per segment */
1624 do {
1625 size += skb_frag_size(frag);
1626 frag++; j++;
1627 if (j == I40E_MAX_BUFFER_TXD) {
1628 if (size < skb_shinfo(skb)->gso_size) {
1629 linearize = true;
1630 break;
1631 }
1632 j = 1;
1633 size -= skb_shinfo(skb)->gso_size;
1634 if (size)
1635 j++;
1636 size += hdr_len;
1637 }
1638 num_frags--;
1639 } while (num_frags);
1640 } else {
1641 if (num_frags >= I40E_MAX_BUFFER_TXD)
1642 linearize = true;
1643 }
1644
1645linearize_chk_done:
1646 return linearize;
1647}
1648
1582/** 1649/**
1583 * i40e_tx_map - Build the Tx descriptor 1650 * i40e_tx_map - Build the Tx descriptor
1584 * @tx_ring: ring to send buffer on 1651 * @tx_ring: ring to send buffer on
@@ -1853,6 +1920,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
1853 else if (tso) 1920 else if (tso)
1854 tx_flags |= I40E_TX_FLAGS_TSO; 1921 tx_flags |= I40E_TX_FLAGS_TSO;
1855 1922
1923 if (i40e_chk_linearize(skb, tx_flags, hdr_len))
1924 if (skb_linearize(skb))
1925 goto out_drop;
1926
1856 skb_tx_timestamp(skb); 1927 skb_tx_timestamp(skb);
1857 1928
1858 /* always enable CRC insertion offload */ 1929 /* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index ffdda716813e..1e49bb1fbac1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -120,6 +120,7 @@ enum i40e_dyn_idx_t {
120 120
121#define i40e_rx_desc i40e_32byte_rx_desc 121#define i40e_rx_desc i40e_32byte_rx_desc
122 122
123#define I40E_MAX_BUFFER_TXD 8
123#define I40E_MIN_TX_LEN 17 124#define I40E_MIN_TX_LEN 17
124#define I40E_MAX_DATA_PER_TXD 8192 125#define I40E_MAX_DATA_PER_TXD 8192
125 126
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 4e789479f00f..b66e03d9711f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -81,12 +81,14 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
81{ 81{
82 u32 loopback_ok = 0; 82 u32 loopback_ok = 0;
83 int i; 83 int i;
84 84 bool gro_enabled;
85 85
86 priv->loopback_ok = 0; 86 priv->loopback_ok = 0;
87 priv->validate_loopback = 1; 87 priv->validate_loopback = 1;
88 gro_enabled = priv->dev->features & NETIF_F_GRO;
88 89
89 mlx4_en_update_loopback_state(priv->dev, priv->dev->features); 90 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
91 priv->dev->features &= ~NETIF_F_GRO;
90 92
91 /* xmit */ 93 /* xmit */
92 if (mlx4_en_test_loopback_xmit(priv)) { 94 if (mlx4_en_test_loopback_xmit(priv)) {
@@ -108,6 +110,10 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
108mlx4_en_test_loopback_exit: 110mlx4_en_test_loopback_exit:
109 111
110 priv->validate_loopback = 0; 112 priv->validate_loopback = 0;
113
114 if (gro_enabled)
115 priv->dev->features |= NETIF_F_GRO;
116
111 mlx4_en_update_loopback_state(priv->dev, priv->dev->features); 117 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
112 return !loopback_ok; 118 return !loopback_ok;
113} 119}
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2bb8553bd905..eda29dbbfcd2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -412,7 +412,6 @@ err_icm:
412 412
413EXPORT_SYMBOL_GPL(mlx4_qp_alloc); 413EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
414 414
415#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
416int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, 415int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
417 enum mlx4_update_qp_attr attr, 416 enum mlx4_update_qp_attr attr,
418 struct mlx4_update_qp_params *params) 417 struct mlx4_update_qp_params *params)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 486e3d26cd4a..d97ca88c55b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -713,7 +713,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
713 struct mlx4_vport_oper_state *vp_oper; 713 struct mlx4_vport_oper_state *vp_oper;
714 struct mlx4_priv *priv; 714 struct mlx4_priv *priv;
715 u32 qp_type; 715 u32 qp_type;
716 int port; 716 int port, err = 0;
717 717
718 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; 718 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
719 priv = mlx4_priv(dev); 719 priv = mlx4_priv(dev);
@@ -738,7 +738,9 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
738 } else { 738 } else {
739 struct mlx4_update_qp_params params = {.flags = 0}; 739 struct mlx4_update_qp_params params = {.flags = 0};
740 740
741 mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params); 741 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
742 if (err)
743 goto out;
742 } 744 }
743 } 745 }
744 746
@@ -773,7 +775,8 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
773 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; 775 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
774 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; 776 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
775 } 777 }
776 return 0; 778out:
779 return err;
777} 780}
778 781
779static int mpt_mask(struct mlx4_dev *dev) 782static int mpt_mask(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 44e8d7d25547..57a6e6cd74fc 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1239,11 +1239,9 @@ static int pasemi_mac_open(struct net_device *dev)
1239 if (mac->phydev) 1239 if (mac->phydev)
1240 phy_start(mac->phydev); 1240 phy_start(mac->phydev);
1241 1241
1242 init_timer(&mac->tx->clean_timer); 1242 setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
1243 mac->tx->clean_timer.function = pasemi_mac_tx_timer; 1243 (unsigned long)mac->tx);
1244 mac->tx->clean_timer.data = (unsigned long)mac->tx; 1244 mod_timer(&mac->tx->clean_timer, jiffies + HZ);
1245 mac->tx->clean_timer.expires = jiffies+HZ;
1246 add_timer(&mac->tx->clean_timer);
1247 1245
1248 return 0; 1246 return 0;
1249 1247
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 6e426ae94692..0a5e204a0179 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -354,7 +354,7 @@ struct cmd_desc_type0 {
354 354
355} __attribute__ ((aligned(64))); 355} __attribute__ ((aligned(64)));
356 356
357/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ 357/* Note: sizeof(rcv_desc) should always be a multiple of 2 */
358struct rcv_desc { 358struct rcv_desc {
359 __le16 reference_handle; 359 __le16 reference_handle;
360 __le16 reserved; 360 __le16 reserved;
@@ -499,7 +499,7 @@ struct uni_data_desc{
499#define NETXEN_IMAGE_START 0x43000 /* compressed image */ 499#define NETXEN_IMAGE_START 0x43000 /* compressed image */
500#define NETXEN_SECONDARY_START 0x200000 /* backup images */ 500#define NETXEN_SECONDARY_START 0x200000 /* backup images */
501#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */ 501#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */
502#define NETXEN_USER_START 0x3E8000 /* Firmare info */ 502#define NETXEN_USER_START 0x3E8000 /* Firmware info */
503#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */ 503#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */
504#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */ 504#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */
505 505
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index fa4317611fd6..f221126a5c4e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -314,7 +314,7 @@ struct qlcnic_fdt {
314#define QLCNIC_BRDCFG_START 0x4000 /* board config */ 314#define QLCNIC_BRDCFG_START 0x4000 /* board config */
315#define QLCNIC_BOOTLD_START 0x10000 /* bootld */ 315#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
316#define QLCNIC_IMAGE_START 0x43000 /* compressed image */ 316#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
317#define QLCNIC_USER_START 0x3E8000 /* Firmare info */ 317#define QLCNIC_USER_START 0x3E8000 /* Firmware info */
318 318
319#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408) 319#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
320#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c) 320#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index ad0020af2193..c70ab40d8698 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2561,7 +2561,7 @@ static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2561 int rc = -EINVAL; 2561 int rc = -EINVAL;
2562 2562
2563 if (!rtl_fw_format_ok(tp, rtl_fw)) { 2563 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2564 netif_err(tp, ifup, dev, "invalid firwmare\n"); 2564 netif_err(tp, ifup, dev, "invalid firmware\n");
2565 goto out; 2565 goto out;
2566 } 2566 }
2567 2567
@@ -5067,8 +5067,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
5067 RTL_W8(ChipCmd, CmdReset); 5067 RTL_W8(ChipCmd, CmdReset);
5068 5068
5069 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); 5069 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
5070
5071 netdev_reset_queue(tp->dev);
5072} 5070}
5073 5071
5074static void rtl_request_uncached_firmware(struct rtl8169_private *tp) 5072static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
@@ -7049,7 +7047,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
7049 u32 status, len; 7047 u32 status, len;
7050 u32 opts[2]; 7048 u32 opts[2];
7051 int frags; 7049 int frags;
7052 bool stop_queue;
7053 7050
7054 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { 7051 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
7055 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); 7052 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
@@ -7090,8 +7087,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
7090 7087
7091 txd->opts2 = cpu_to_le32(opts[1]); 7088 txd->opts2 = cpu_to_le32(opts[1]);
7092 7089
7093 netdev_sent_queue(dev, skb->len);
7094
7095 skb_tx_timestamp(skb); 7090 skb_tx_timestamp(skb);
7096 7091
7097 /* Force memory writes to complete before releasing descriptor */ 7092 /* Force memory writes to complete before releasing descriptor */
@@ -7106,16 +7101,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
7106 7101
7107 tp->cur_tx += frags + 1; 7102 tp->cur_tx += frags + 1;
7108 7103
7109 stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS); 7104 RTL_W8(TxPoll, NPQ);
7110 7105
7111 if (!skb->xmit_more || stop_queue || 7106 mmiowb();
7112 netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) {
7113 RTL_W8(TxPoll, NPQ);
7114
7115 mmiowb();
7116 }
7117 7107
7118 if (stop_queue) { 7108 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
7119 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must 7109 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
7120 * not miss a ring update when it notices a stopped queue. 7110 * not miss a ring update when it notices a stopped queue.
7121 */ 7111 */
@@ -7198,7 +7188,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
7198static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) 7188static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
7199{ 7189{
7200 unsigned int dirty_tx, tx_left; 7190 unsigned int dirty_tx, tx_left;
7201 unsigned int bytes_compl = 0, pkts_compl = 0;
7202 7191
7203 dirty_tx = tp->dirty_tx; 7192 dirty_tx = tp->dirty_tx;
7204 smp_rmb(); 7193 smp_rmb();
@@ -7222,8 +7211,10 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
7222 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, 7211 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
7223 tp->TxDescArray + entry); 7212 tp->TxDescArray + entry);
7224 if (status & LastFrag) { 7213 if (status & LastFrag) {
7225 pkts_compl++; 7214 u64_stats_update_begin(&tp->tx_stats.syncp);
7226 bytes_compl += tx_skb->skb->len; 7215 tp->tx_stats.packets++;
7216 tp->tx_stats.bytes += tx_skb->skb->len;
7217 u64_stats_update_end(&tp->tx_stats.syncp);
7227 dev_kfree_skb_any(tx_skb->skb); 7218 dev_kfree_skb_any(tx_skb->skb);
7228 tx_skb->skb = NULL; 7219 tx_skb->skb = NULL;
7229 } 7220 }
@@ -7232,13 +7223,6 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
7232 } 7223 }
7233 7224
7234 if (tp->dirty_tx != dirty_tx) { 7225 if (tp->dirty_tx != dirty_tx) {
7235 netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
7236
7237 u64_stats_update_begin(&tp->tx_stats.syncp);
7238 tp->tx_stats.packets += pkts_compl;
7239 tp->tx_stats.bytes += bytes_compl;
7240 u64_stats_update_end(&tp->tx_stats.syncp);
7241
7242 tp->dirty_tx = dirty_tx; 7226 tp->dirty_tx = dirty_tx;
7243 /* Sync with rtl8169_start_xmit: 7227 /* Sync with rtl8169_start_xmit:
7244 * - publish dirty_tx ring index (write barrier) 7228 * - publish dirty_tx ring index (write barrier)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 4da8bd263997..736d5d1624a1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -508,7 +508,6 @@ static struct sh_eth_cpu_data r8a779x_data = {
508 .tpauser = 1, 508 .tpauser = 1,
509 .hw_swap = 1, 509 .hw_swap = 1,
510 .rmiimode = 1, 510 .rmiimode = 1,
511 .shift_rd0 = 1,
512}; 511};
513 512
514static void sh_eth_set_rate_sh7724(struct net_device *ndev) 513static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -1392,6 +1391,9 @@ static void sh_eth_dev_exit(struct net_device *ndev)
1392 msleep(2); /* max frame time at 10 Mbps < 1250 us */ 1391 msleep(2); /* max frame time at 10 Mbps < 1250 us */
1393 sh_eth_get_stats(ndev); 1392 sh_eth_get_stats(ndev);
1394 sh_eth_reset(ndev); 1393 sh_eth_reset(ndev);
1394
1395 /* Set MAC address again */
1396 update_mac_address(ndev);
1395} 1397}
1396 1398
1397/* free Tx skb function */ 1399/* free Tx skb function */
@@ -1407,6 +1409,8 @@ static int sh_eth_txfree(struct net_device *ndev)
1407 txdesc = &mdp->tx_ring[entry]; 1409 txdesc = &mdp->tx_ring[entry];
1408 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) 1410 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1409 break; 1411 break;
1412 /* TACT bit must be checked before all the following reads */
1413 rmb();
1410 /* Free the original skb. */ 1414 /* Free the original skb. */
1411 if (mdp->tx_skbuff[entry]) { 1415 if (mdp->tx_skbuff[entry]) {
1412 dma_unmap_single(&ndev->dev, txdesc->addr, 1416 dma_unmap_single(&ndev->dev, txdesc->addr,
@@ -1444,6 +1448,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1444 limit = boguscnt; 1448 limit = boguscnt;
1445 rxdesc = &mdp->rx_ring[entry]; 1449 rxdesc = &mdp->rx_ring[entry];
1446 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { 1450 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1451 /* RACT bit must be checked before all the following reads */
1452 rmb();
1447 desc_status = edmac_to_cpu(mdp, rxdesc->status); 1453 desc_status = edmac_to_cpu(mdp, rxdesc->status);
1448 pkt_len = rxdesc->frame_length; 1454 pkt_len = rxdesc->frame_length;
1449 1455
@@ -1455,8 +1461,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1455 1461
1456 /* In case of almost all GETHER/ETHERs, the Receive Frame State 1462 /* In case of almost all GETHER/ETHERs, the Receive Frame State
1457 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to 1463 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1458 * bit 0. However, in case of the R8A7740, R8A779x, and 1464 * bit 0. However, in case of the R8A7740 and R7S72100
1459 * R7S72100 the RFS bits are from bit 25 to bit 16. So, the 1465 * the RFS bits are from bit 25 to bit 16. So, the
1460 * driver needs right shifting by 16. 1466 * driver needs right shifting by 16.
1461 */ 1467 */
1462 if (mdp->cd->shift_rd0) 1468 if (mdp->cd->shift_rd0)
@@ -1523,6 +1529,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1523 skb_checksum_none_assert(skb); 1529 skb_checksum_none_assert(skb);
1524 rxdesc->addr = dma_addr; 1530 rxdesc->addr = dma_addr;
1525 } 1531 }
1532 wmb(); /* RACT bit must be set after all the above writes */
1526 if (entry >= mdp->num_rx_ring - 1) 1533 if (entry >= mdp->num_rx_ring - 1)
1527 rxdesc->status |= 1534 rxdesc->status |=
1528 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); 1535 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
@@ -1535,7 +1542,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1535 /* If we don't need to check status, don't. -KDU */ 1542 /* If we don't need to check status, don't. -KDU */
1536 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { 1543 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1537 /* fix the values for the next receiving if RDE is set */ 1544 /* fix the values for the next receiving if RDE is set */
1538 if (intr_status & EESR_RDE) { 1545 if (intr_status & EESR_RDE && mdp->reg_offset[RDFAR] != 0) {
1539 u32 count = (sh_eth_read(ndev, RDFAR) - 1546 u32 count = (sh_eth_read(ndev, RDFAR) -
1540 sh_eth_read(ndev, RDLAR)) >> 4; 1547 sh_eth_read(ndev, RDLAR)) >> 4;
1541 1548
@@ -2174,7 +2181,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2174 } 2181 }
2175 spin_unlock_irqrestore(&mdp->lock, flags); 2182 spin_unlock_irqrestore(&mdp->lock, flags);
2176 2183
2177 if (skb_padto(skb, ETH_ZLEN)) 2184 if (skb_put_padto(skb, ETH_ZLEN))
2178 return NETDEV_TX_OK; 2185 return NETDEV_TX_OK;
2179 2186
2180 entry = mdp->cur_tx % mdp->num_tx_ring; 2187 entry = mdp->cur_tx % mdp->num_tx_ring;
@@ -2192,6 +2199,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2192 } 2199 }
2193 txdesc->buffer_length = skb->len; 2200 txdesc->buffer_length = skb->len;
2194 2201
2202 wmb(); /* TACT bit must be set after all the above writes */
2195 if (entry >= mdp->num_tx_ring - 1) 2203 if (entry >= mdp->num_tx_ring - 1)
2196 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 2204 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
2197 else 2205 else
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index e5a15a4c4e8f..a5d1e6ea7d58 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -1280,9 +1280,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
1280 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); 1280 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1281 1281
1282 if (enable) 1282 if (enable)
1283 val |= 1 << rocker_port->pport; 1283 val |= 1ULL << rocker_port->pport;
1284 else 1284 else
1285 val &= ~(1 << rocker_port->pport); 1285 val &= ~(1ULL << rocker_port->pport);
1286 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); 1286 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1287} 1287}
1288 1288
@@ -4241,6 +4241,8 @@ static int rocker_probe_ports(struct rocker *rocker)
4241 4241
4242 alloc_size = sizeof(struct rocker_port *) * rocker->port_count; 4242 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
4243 rocker->ports = kmalloc(alloc_size, GFP_KERNEL); 4243 rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
4244 if (!rocker->ports)
4245 return -ENOMEM;
4244 for (i = 0; i < rocker->port_count; i++) { 4246 for (i = 0; i < rocker->port_count; i++) {
4245 err = rocker_probe_port(rocker, i); 4247 err = rocker_probe_port(rocker, i);
4246 if (err) 4248 if (err)
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 6b33127ab352..3449893aea8d 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1070,11 +1070,8 @@ static int smc_open(struct net_device *dev)
1070 smc->packets_waiting = 0; 1070 smc->packets_waiting = 0;
1071 1071
1072 smc_reset(dev); 1072 smc_reset(dev);
1073 init_timer(&smc->media); 1073 setup_timer(&smc->media, media_check, (u_long)dev);
1074 smc->media.function = media_check; 1074 mod_timer(&smc->media, jiffies + HZ);
1075 smc->media.data = (u_long) dev;
1076 smc->media.expires = jiffies + HZ;
1077 add_timer(&smc->media);
1078 1075
1079 return 0; 1076 return 0;
1080} /* smc_open */ 1077} /* smc_open */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 88a55f95fe09..209ee1b27f8d 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -91,6 +91,10 @@ static const char version[] =
91 91
92#include "smc91x.h" 92#include "smc91x.h"
93 93
94#if defined(CONFIG_ASSABET_NEPONSET)
95#include <mach/neponset.h>
96#endif
97
94#ifndef SMC_NOWAIT 98#ifndef SMC_NOWAIT
95# define SMC_NOWAIT 0 99# define SMC_NOWAIT 0
96#endif 100#endif
@@ -2355,8 +2359,9 @@ static int smc_drv_probe(struct platform_device *pdev)
2355 ret = smc_request_attrib(pdev, ndev); 2359 ret = smc_request_attrib(pdev, ndev);
2356 if (ret) 2360 if (ret)
2357 goto out_release_io; 2361 goto out_release_io;
2358#if defined(CONFIG_SA1100_ASSABET) 2362#if defined(CONFIG_ASSABET_NEPONSET)
2359 neponset_ncr_set(NCR_ENET_OSC_EN); 2363 if (machine_is_assabet() && machine_has_neponset())
2364 neponset_ncr_set(NCR_ENET_OSC_EN);
2360#endif 2365#endif
2361 platform_set_drvdata(pdev, ndev); 2366 platform_set_drvdata(pdev, ndev);
2362 ret = smc_enable_device(pdev); 2367 ret = smc_enable_device(pdev);
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index be67baf5f677..3a18501d1068 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -39,14 +39,7 @@
39 * Define your architecture specific bus configuration parameters here. 39 * Define your architecture specific bus configuration parameters here.
40 */ 40 */
41 41
42#if defined(CONFIG_ARCH_LUBBOCK) ||\ 42#if defined(CONFIG_ARM)
43 defined(CONFIG_MACH_MAINSTONE) ||\
44 defined(CONFIG_MACH_ZYLONITE) ||\
45 defined(CONFIG_MACH_LITTLETON) ||\
46 defined(CONFIG_MACH_ZYLONITE2) ||\
47 defined(CONFIG_ARCH_VIPER) ||\
48 defined(CONFIG_MACH_STARGATE2) ||\
49 defined(CONFIG_ARCH_VERSATILE)
50 43
51#include <asm/mach-types.h> 44#include <asm/mach-types.h>
52 45
@@ -74,95 +67,8 @@
74/* We actually can't write halfwords properly if not word aligned */ 67/* We actually can't write halfwords properly if not word aligned */
75static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) 68static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
76{ 69{
77 if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) { 70 if ((machine_is_mainstone() || machine_is_stargate2() ||
78 unsigned int v = val << 16; 71 machine_is_pxa_idp()) && reg & 2) {
79 v |= readl(ioaddr + (reg & ~2)) & 0xffff;
80 writel(v, ioaddr + (reg & ~2));
81 } else {
82 writew(val, ioaddr + reg);
83 }
84}
85
86#elif defined(CONFIG_SA1100_PLEB)
87/* We can only do 16-bit reads and writes in the static memory space. */
88#define SMC_CAN_USE_8BIT 1
89#define SMC_CAN_USE_16BIT 1
90#define SMC_CAN_USE_32BIT 0
91#define SMC_IO_SHIFT 0
92#define SMC_NOWAIT 1
93
94#define SMC_inb(a, r) readb((a) + (r))
95#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
96#define SMC_inw(a, r) readw((a) + (r))
97#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
98#define SMC_outb(v, a, r) writeb(v, (a) + (r))
99#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
100#define SMC_outw(v, a, r) writew(v, (a) + (r))
101#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
102
103#define SMC_IRQ_FLAGS (-1)
104
105#elif defined(CONFIG_SA1100_ASSABET)
106
107#include <mach/neponset.h>
108
109/* We can only do 8-bit reads and writes in the static memory space. */
110#define SMC_CAN_USE_8BIT 1
111#define SMC_CAN_USE_16BIT 0
112#define SMC_CAN_USE_32BIT 0
113#define SMC_NOWAIT 1
114
115/* The first two address lines aren't connected... */
116#define SMC_IO_SHIFT 2
117
118#define SMC_inb(a, r) readb((a) + (r))
119#define SMC_outb(v, a, r) writeb(v, (a) + (r))
120#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
121#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
122#define SMC_IRQ_FLAGS (-1) /* from resource */
123
124#elif defined(CONFIG_MACH_LOGICPD_PXA270) || \
125 defined(CONFIG_MACH_NOMADIK_8815NHK)
126
127#define SMC_CAN_USE_8BIT 0
128#define SMC_CAN_USE_16BIT 1
129#define SMC_CAN_USE_32BIT 0
130#define SMC_IO_SHIFT 0
131#define SMC_NOWAIT 1
132
133#define SMC_inw(a, r) readw((a) + (r))
134#define SMC_outw(v, a, r) writew(v, (a) + (r))
135#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
136#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
137
138#elif defined(CONFIG_ARCH_INNOKOM) || \
139 defined(CONFIG_ARCH_PXA_IDP) || \
140 defined(CONFIG_ARCH_RAMSES) || \
141 defined(CONFIG_ARCH_PCM027)
142
143#define SMC_CAN_USE_8BIT 1
144#define SMC_CAN_USE_16BIT 1
145#define SMC_CAN_USE_32BIT 1
146#define SMC_IO_SHIFT 0
147#define SMC_NOWAIT 1
148#define SMC_USE_PXA_DMA 1
149
150#define SMC_inb(a, r) readb((a) + (r))
151#define SMC_inw(a, r) readw((a) + (r))
152#define SMC_inl(a, r) readl((a) + (r))
153#define SMC_outb(v, a, r) writeb(v, (a) + (r))
154#define SMC_outl(v, a, r) writel(v, (a) + (r))
155#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
156#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
157#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
158#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
159#define SMC_IRQ_FLAGS (-1) /* from resource */
160
161/* We actually can't write halfwords properly if not word aligned */
162static inline void
163SMC_outw(u16 val, void __iomem *ioaddr, int reg)
164{
165 if (reg & 2) {
166 unsigned int v = val << 16; 72 unsigned int v = val << 16;
167 v |= readl(ioaddr + (reg & ~2)) & 0xffff; 73 v |= readl(ioaddr + (reg & ~2)) & 0xffff;
168 writel(v, ioaddr + (reg & ~2)); 74 writel(v, ioaddr + (reg & ~2));
@@ -237,20 +143,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
237#define RPC_LSA_DEFAULT RPC_LED_100_10 143#define RPC_LSA_DEFAULT RPC_LED_100_10
238#define RPC_LSB_DEFAULT RPC_LED_TX_RX 144#define RPC_LSB_DEFAULT RPC_LED_TX_RX
239 145
240#elif defined(CONFIG_ARCH_MSM)
241
242#define SMC_CAN_USE_8BIT 0
243#define SMC_CAN_USE_16BIT 1
244#define SMC_CAN_USE_32BIT 0
245#define SMC_NOWAIT 1
246
247#define SMC_inw(a, r) readw((a) + (r))
248#define SMC_outw(v, a, r) writew(v, (a) + (r))
249#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
250#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
251
252#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH
253
254#elif defined(CONFIG_COLDFIRE) 146#elif defined(CONFIG_COLDFIRE)
255 147
256#define SMC_CAN_USE_8BIT 0 148#define SMC_CAN_USE_8BIT 0
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 55e89b3838f1..a0ea84fe6519 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -310,11 +310,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
310 spin_lock_irqsave(&priv->lock, flags); 310 spin_lock_irqsave(&priv->lock, flags);
311 if (!priv->eee_active) { 311 if (!priv->eee_active) {
312 priv->eee_active = 1; 312 priv->eee_active = 1;
313 init_timer(&priv->eee_ctrl_timer); 313 setup_timer(&priv->eee_ctrl_timer,
314 priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; 314 stmmac_eee_ctrl_timer,
315 priv->eee_ctrl_timer.data = (unsigned long)priv; 315 (unsigned long)priv);
316 priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer); 316 mod_timer(&priv->eee_ctrl_timer,
317 add_timer(&priv->eee_ctrl_timer); 317 STMMAC_LPI_T(eee_timer));
318 318
319 priv->hw->mac->set_eee_timer(priv->hw, 319 priv->hw->mac->set_eee_timer(priv->hw,
320 STMMAC_DEFAULT_LIT_LS, 320 STMMAC_DEFAULT_LIT_LS,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 4b51f903fb73..0c5842aeb807 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6989,10 +6989,10 @@ static int niu_class_to_ethflow(u64 class, int *flow_type)
6989 *flow_type = IP_USER_FLOW; 6989 *flow_type = IP_USER_FLOW;
6990 break; 6990 break;
6991 default: 6991 default:
6992 return 0; 6992 return -EINVAL;
6993 } 6993 }
6994 6994
6995 return 1; 6995 return 0;
6996} 6996}
6997 6997
6998static int niu_ethflow_to_class(int flow_type, u64 *class) 6998static int niu_ethflow_to_class(int flow_type, u64 *class)
@@ -7198,11 +7198,9 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
7198 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> 7198 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
7199 TCAM_V4KEY0_CLASS_CODE_SHIFT; 7199 TCAM_V4KEY0_CLASS_CODE_SHIFT;
7200 ret = niu_class_to_ethflow(class, &fsp->flow_type); 7200 ret = niu_class_to_ethflow(class, &fsp->flow_type);
7201
7202 if (ret < 0) { 7201 if (ret < 0) {
7203 netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", 7202 netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
7204 parent->index); 7203 parent->index);
7205 ret = -EINVAL;
7206 goto out; 7204 goto out;
7207 } 7205 }
7208 7206
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 3bc992cd70b7..f6a71092e135 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -50,7 +50,7 @@ config TI_DAVINCI_CPDMA
50 will be called davinci_cpdma. This is recommended. 50 will be called davinci_cpdma. This is recommended.
51 51
52config TI_CPSW_PHY_SEL 52config TI_CPSW_PHY_SEL
53 boolean "TI CPSW Switch Phy sel Support" 53 bool "TI CPSW Switch Phy sel Support"
54 depends on TI_CPSW 54 depends on TI_CPSW
55 ---help--- 55 ---help---
56 This driver supports configuring of the phy mode connected to 56 This driver supports configuring of the phy mode connected to
@@ -77,7 +77,7 @@ config TI_CPSW
77 will be called cpsw. 77 will be called cpsw.
78 78
79config TI_CPTS 79config TI_CPTS
80 boolean "TI Common Platform Time Sync (CPTS) Support" 80 bool "TI Common Platform Time Sync (CPTS) Support"
81 depends on TI_CPSW 81 depends on TI_CPSW
82 select PTP_1588_CLOCK 82 select PTP_1588_CLOCK
83 ---help--- 83 ---help---
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7d8dd0d2182e..a1bbaf6352ba 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1103,7 +1103,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
1103 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 1103 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1104 port_mask, ALE_VLAN, slave->port_vlan, 0); 1104 port_mask, ALE_VLAN, slave->port_vlan, 0);
1105 cpsw_ale_add_ucast(priv->ale, priv->mac_addr, 1105 cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
1106 priv->host_port, ALE_VLAN, slave->port_vlan); 1106 priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan);
1107} 1107}
1108 1108
1109static void soft_reset_slave(struct cpsw_slave *slave) 1109static void soft_reset_slave(struct cpsw_slave *slave)
@@ -2466,6 +2466,7 @@ static int cpsw_remove(struct platform_device *pdev)
2466 return 0; 2466 return 0;
2467} 2467}
2468 2468
2469#ifdef CONFIG_PM_SLEEP
2469static int cpsw_suspend(struct device *dev) 2470static int cpsw_suspend(struct device *dev)
2470{ 2471{
2471 struct platform_device *pdev = to_platform_device(dev); 2472 struct platform_device *pdev = to_platform_device(dev);
@@ -2518,11 +2519,9 @@ static int cpsw_resume(struct device *dev)
2518 } 2519 }
2519 return 0; 2520 return 0;
2520} 2521}
2522#endif
2521 2523
2522static const struct dev_pm_ops cpsw_pm_ops = { 2524static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
2523 .suspend = cpsw_suspend,
2524 .resume = cpsw_resume,
2525};
2526 2525
2527static const struct of_device_id cpsw_of_mtable[] = { 2526static const struct of_device_id cpsw_of_mtable[] = {
2528 { .compatible = "ti,cpsw", }, 2527 { .compatible = "ti,cpsw", },
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 98655b44b97e..c00084d689f3 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -423,6 +423,7 @@ static int davinci_mdio_remove(struct platform_device *pdev)
423 return 0; 423 return 0;
424} 424}
425 425
426#ifdef CONFIG_PM_SLEEP
426static int davinci_mdio_suspend(struct device *dev) 427static int davinci_mdio_suspend(struct device *dev)
427{ 428{
428 struct davinci_mdio_data *data = dev_get_drvdata(dev); 429 struct davinci_mdio_data *data = dev_get_drvdata(dev);
@@ -464,10 +465,10 @@ static int davinci_mdio_resume(struct device *dev)
464 465
465 return 0; 466 return 0;
466} 467}
468#endif
467 469
468static const struct dev_pm_ops davinci_mdio_pm_ops = { 470static const struct dev_pm_ops davinci_mdio_pm_ops = {
469 .suspend_late = davinci_mdio_suspend, 471 SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
470 .resume_early = davinci_mdio_resume,
471}; 472};
472 473
473#if IS_ENABLED(CONFIG_OF) 474#if IS_ENABLED(CONFIG_OF)
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 44ff8d7c64a5..5138407941cf 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -938,7 +938,7 @@ static void eth_set_mcast_list(struct net_device *dev)
938 int i; 938 int i;
939 static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; 939 static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
940 940
941 if (dev->flags & IFF_ALLMULTI) { 941 if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) {
942 for (i = 0; i < ETH_ALEN; i++) { 942 for (i = 0; i < ETH_ALEN; i++) {
943 __raw_writel(allmulti[i], &port->regs->mcast_addr[i]); 943 __raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
944 __raw_writel(allmulti[i], &port->regs->mcast_mask[i]); 944 __raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 1e51c6bf3ae1..8362aef0c15e 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -654,11 +654,14 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
654 } /* else everything is zero */ 654 } /* else everything is zero */
655} 655}
656 656
657/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
658#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
659
657/* Get packet from user space buffer */ 660/* Get packet from user space buffer */
658static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, 661static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
659 struct iov_iter *from, int noblock) 662 struct iov_iter *from, int noblock)
660{ 663{
661 int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN); 664 int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
662 struct sk_buff *skb; 665 struct sk_buff *skb;
663 struct macvlan_dev *vlan; 666 struct macvlan_dev *vlan;
664 unsigned long total_len = iov_iter_count(from); 667 unsigned long total_len = iov_iter_count(from);
@@ -722,7 +725,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
722 linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len); 725 linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
723 } 726 }
724 727
725 skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, 728 skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
726 linear, noblock, &err); 729 linear, noblock, &err);
727 if (!skb) 730 if (!skb)
728 goto err; 731 goto err;
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
index 9e3af54c9010..32efbd48f326 100644
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -92,6 +92,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
92#define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate" 92#define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
93#define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew" 93#define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
94#define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp" 94#define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp"
95#define XGBE_PHY_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
96#define XGBE_PHY_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
95 97
96#define XGBE_PHY_SPEEDS 3 98#define XGBE_PHY_SPEEDS 3
97#define XGBE_PHY_SPEED_1000 0 99#define XGBE_PHY_SPEED_1000 0
@@ -177,10 +179,12 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
177#define SPEED_10000_BLWC 0 179#define SPEED_10000_BLWC 0
178#define SPEED_10000_CDR 0x7 180#define SPEED_10000_CDR 0x7
179#define SPEED_10000_PLL 0x1 181#define SPEED_10000_PLL 0x1
180#define SPEED_10000_PQ 0x1e 182#define SPEED_10000_PQ 0x12
181#define SPEED_10000_RATE 0x0 183#define SPEED_10000_RATE 0x0
182#define SPEED_10000_TXAMP 0xa 184#define SPEED_10000_TXAMP 0xa
183#define SPEED_10000_WORD 0x7 185#define SPEED_10000_WORD 0x7
186#define SPEED_10000_DFE_TAP_CONFIG 0x1
187#define SPEED_10000_DFE_TAP_ENABLE 0x7f
184 188
185#define SPEED_2500_BLWC 1 189#define SPEED_2500_BLWC 1
186#define SPEED_2500_CDR 0x2 190#define SPEED_2500_CDR 0x2
@@ -189,6 +193,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
189#define SPEED_2500_RATE 0x1 193#define SPEED_2500_RATE 0x1
190#define SPEED_2500_TXAMP 0xf 194#define SPEED_2500_TXAMP 0xf
191#define SPEED_2500_WORD 0x1 195#define SPEED_2500_WORD 0x1
196#define SPEED_2500_DFE_TAP_CONFIG 0x3
197#define SPEED_2500_DFE_TAP_ENABLE 0x0
192 198
193#define SPEED_1000_BLWC 1 199#define SPEED_1000_BLWC 1
194#define SPEED_1000_CDR 0x2 200#define SPEED_1000_CDR 0x2
@@ -197,16 +203,25 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
197#define SPEED_1000_RATE 0x3 203#define SPEED_1000_RATE 0x3
198#define SPEED_1000_TXAMP 0xf 204#define SPEED_1000_TXAMP 0xf
199#define SPEED_1000_WORD 0x1 205#define SPEED_1000_WORD 0x1
206#define SPEED_1000_DFE_TAP_CONFIG 0x3
207#define SPEED_1000_DFE_TAP_ENABLE 0x0
200 208
201/* SerDes RxTx register offsets */ 209/* SerDes RxTx register offsets */
210#define RXTX_REG6 0x0018
202#define RXTX_REG20 0x0050 211#define RXTX_REG20 0x0050
212#define RXTX_REG22 0x0058
203#define RXTX_REG114 0x01c8 213#define RXTX_REG114 0x01c8
214#define RXTX_REG129 0x0204
204 215
205/* SerDes RxTx register entry bit positions and sizes */ 216/* SerDes RxTx register entry bit positions and sizes */
217#define RXTX_REG6_RESETB_RXD_INDEX 8
218#define RXTX_REG6_RESETB_RXD_WIDTH 1
206#define RXTX_REG20_BLWC_ENA_INDEX 2 219#define RXTX_REG20_BLWC_ENA_INDEX 2
207#define RXTX_REG20_BLWC_ENA_WIDTH 1 220#define RXTX_REG20_BLWC_ENA_WIDTH 1
208#define RXTX_REG114_PQ_REG_INDEX 9 221#define RXTX_REG114_PQ_REG_INDEX 9
209#define RXTX_REG114_PQ_REG_WIDTH 7 222#define RXTX_REG114_PQ_REG_WIDTH 7
223#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
224#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
210 225
211/* Bit setting and getting macros 226/* Bit setting and getting macros
212 * The get macro will extract the current bit field value from within 227 * The get macro will extract the current bit field value from within
@@ -333,6 +348,18 @@ static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
333 SPEED_10000_TXAMP, 348 SPEED_10000_TXAMP,
334}; 349};
335 350
351static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
352 SPEED_1000_DFE_TAP_CONFIG,
353 SPEED_2500_DFE_TAP_CONFIG,
354 SPEED_10000_DFE_TAP_CONFIG,
355};
356
357static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
358 SPEED_1000_DFE_TAP_ENABLE,
359 SPEED_2500_DFE_TAP_ENABLE,
360 SPEED_10000_DFE_TAP_ENABLE,
361};
362
336enum amd_xgbe_phy_an { 363enum amd_xgbe_phy_an {
337 AMD_XGBE_AN_READY = 0, 364 AMD_XGBE_AN_READY = 0,
338 AMD_XGBE_AN_PAGE_RECEIVED, 365 AMD_XGBE_AN_PAGE_RECEIVED,
@@ -393,6 +420,8 @@ struct amd_xgbe_phy_priv {
393 u32 serdes_cdr_rate[XGBE_PHY_SPEEDS]; 420 u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
394 u32 serdes_pq_skew[XGBE_PHY_SPEEDS]; 421 u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
395 u32 serdes_tx_amp[XGBE_PHY_SPEEDS]; 422 u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
423 u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
424 u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
396 425
397 /* Auto-negotiation state machine support */ 426 /* Auto-negotiation state machine support */
398 struct mutex an_mutex; 427 struct mutex an_mutex;
@@ -481,11 +510,16 @@ static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
481 status = XSIR0_IOREAD(priv, SIR0_STATUS); 510 status = XSIR0_IOREAD(priv, SIR0_STATUS);
482 if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) && 511 if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
483 XSIR_GET_BITS(status, SIR0_STATUS, TX_READY)) 512 XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
484 return; 513 goto rx_reset;
485 } 514 }
486 515
487 netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n", 516 netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
488 status); 517 status);
518
519rx_reset:
520 /* Perform Rx reset for the DFE changes */
521 XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
522 XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
489} 523}
490 524
491static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev) 525static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
@@ -534,6 +568,10 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
534 priv->serdes_blwc[XGBE_PHY_SPEED_10000]); 568 priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
535 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, 569 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
536 priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]); 570 priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
571 XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
572 priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
573 XRXTX_IOWRITE(priv, RXTX_REG22,
574 priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
537 575
538 amd_xgbe_phy_serdes_complete_ratechange(phydev); 576 amd_xgbe_phy_serdes_complete_ratechange(phydev);
539 577
@@ -586,6 +624,10 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
586 priv->serdes_blwc[XGBE_PHY_SPEED_2500]); 624 priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
587 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, 625 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
588 priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]); 626 priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
627 XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
628 priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
629 XRXTX_IOWRITE(priv, RXTX_REG22,
630 priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
589 631
590 amd_xgbe_phy_serdes_complete_ratechange(phydev); 632 amd_xgbe_phy_serdes_complete_ratechange(phydev);
591 633
@@ -638,6 +680,10 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
638 priv->serdes_blwc[XGBE_PHY_SPEED_1000]); 680 priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
639 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, 681 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
640 priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]); 682 priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
683 XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
684 priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
685 XRXTX_IOWRITE(priv, RXTX_REG22,
686 priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
641 687
642 amd_xgbe_phy_serdes_complete_ratechange(phydev); 688 amd_xgbe_phy_serdes_complete_ratechange(phydev);
643 689
@@ -1668,6 +1714,38 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
1668 sizeof(priv->serdes_tx_amp)); 1714 sizeof(priv->serdes_tx_amp));
1669 } 1715 }
1670 1716
1717 if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
1718 ret = device_property_read_u32_array(phy_dev,
1719 XGBE_PHY_DFE_CFG_PROPERTY,
1720 priv->serdes_dfe_tap_cfg,
1721 XGBE_PHY_SPEEDS);
1722 if (ret) {
1723 dev_err(dev, "invalid %s property\n",
1724 XGBE_PHY_DFE_CFG_PROPERTY);
1725 goto err_sir1;
1726 }
1727 } else {
1728 memcpy(priv->serdes_dfe_tap_cfg,
1729 amd_xgbe_phy_serdes_dfe_tap_cfg,
1730 sizeof(priv->serdes_dfe_tap_cfg));
1731 }
1732
1733 if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
1734 ret = device_property_read_u32_array(phy_dev,
1735 XGBE_PHY_DFE_ENA_PROPERTY,
1736 priv->serdes_dfe_tap_ena,
1737 XGBE_PHY_SPEEDS);
1738 if (ret) {
1739 dev_err(dev, "invalid %s property\n",
1740 XGBE_PHY_DFE_ENA_PROPERTY);
1741 goto err_sir1;
1742 }
1743 } else {
1744 memcpy(priv->serdes_dfe_tap_ena,
1745 amd_xgbe_phy_serdes_dfe_tap_ena,
1746 sizeof(priv->serdes_dfe_tap_ena));
1747 }
1748
1671 phydev->priv = priv; 1749 phydev->priv = priv;
1672 1750
1673 if (!priv->adev || acpi_disabled) 1751 if (!priv->adev || acpi_disabled)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index cdcac6aa4260..52cd8db2c57d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -236,6 +236,25 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
236} 236}
237 237
238/** 238/**
239 * phy_check_valid - check if there is a valid PHY setting which matches
240 * speed, duplex, and feature mask
241 * @speed: speed to match
242 * @duplex: duplex to match
243 * @features: A mask of the valid settings
244 *
245 * Description: Returns true if there is a valid setting, false otherwise.
246 */
247static inline bool phy_check_valid(int speed, int duplex, u32 features)
248{
249 unsigned int idx;
250
251 idx = phy_find_valid(phy_find_setting(speed, duplex), features);
252
253 return settings[idx].speed == speed && settings[idx].duplex == duplex &&
254 (settings[idx].setting & features);
255}
256
257/**
239 * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex 258 * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
240 * @phydev: the target phy_device struct 259 * @phydev: the target phy_device struct
241 * 260 *
@@ -1045,7 +1064,6 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1045 int eee_lp, eee_cap, eee_adv; 1064 int eee_lp, eee_cap, eee_adv;
1046 u32 lp, cap, adv; 1065 u32 lp, cap, adv;
1047 int status; 1066 int status;
1048 unsigned int idx;
1049 1067
1050 /* Read phy status to properly get the right settings */ 1068 /* Read phy status to properly get the right settings */
1051 status = phy_read_status(phydev); 1069 status = phy_read_status(phydev);
@@ -1077,8 +1095,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1077 1095
1078 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); 1096 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
1079 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); 1097 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
1080 idx = phy_find_setting(phydev->speed, phydev->duplex); 1098 if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
1081 if (!(lp & adv & settings[idx].setting))
1082 goto eee_exit_err; 1099 goto eee_exit_err;
1083 1100
1084 if (clk_stop_enable) { 1101 if (clk_stop_enable) {
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index a7d163bf5bbb..9d3366f7c9ad 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -43,9 +43,7 @@
43 43
44static struct team_port *team_port_get_rcu(const struct net_device *dev) 44static struct team_port *team_port_get_rcu(const struct net_device *dev)
45{ 45{
46 struct team_port *port = rcu_dereference(dev->rx_handler_data); 46 return rcu_dereference(dev->rx_handler_data);
47
48 return team_port_exists(dev) ? port : NULL;
49} 47}
50 48
51static struct team_port *team_port_get_rtnl(const struct net_device *dev) 49static struct team_port *team_port_get_rtnl(const struct net_device *dev)
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 37eed4d84e9c..7ba8d0885f12 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -161,6 +161,7 @@ config USB_NET_AX8817X
161 * Linksys USB200M 161 * Linksys USB200M
162 * Netgear FA120 162 * Netgear FA120
163 * Sitecom LN-029 163 * Sitecom LN-029
164 * Sitecom LN-028
164 * Intellinet USB 2.0 Ethernet 165 * Intellinet USB 2.0 Ethernet
165 * ST Lab USB 2.0 Ethernet 166 * ST Lab USB 2.0 Ethernet
166 * TrendNet TU2-ET100 167 * TrendNet TU2-ET100
@@ -397,14 +398,14 @@ config USB_NET_CDC_SUBSET
397 not generally have permanently assigned Ethernet addresses. 398 not generally have permanently assigned Ethernet addresses.
398 399
399config USB_ALI_M5632 400config USB_ALI_M5632
400 boolean "ALi M5632 based 'USB 2.0 Data Link' cables" 401 bool "ALi M5632 based 'USB 2.0 Data Link' cables"
401 depends on USB_NET_CDC_SUBSET 402 depends on USB_NET_CDC_SUBSET
402 help 403 help
403 Choose this option if you're using a host-to-host cable 404 Choose this option if you're using a host-to-host cable
404 based on this design, which supports USB 2.0 high speed. 405 based on this design, which supports USB 2.0 high speed.
405 406
406config USB_AN2720 407config USB_AN2720
407 boolean "AnchorChips 2720 based cables (Xircom PGUNET, ...)" 408 bool "AnchorChips 2720 based cables (Xircom PGUNET, ...)"
408 depends on USB_NET_CDC_SUBSET 409 depends on USB_NET_CDC_SUBSET
409 help 410 help
410 Choose this option if you're using a host-to-host cable 411 Choose this option if you're using a host-to-host cable
@@ -412,7 +413,7 @@ config USB_AN2720
412 Cypress brand. 413 Cypress brand.
413 414
414config USB_BELKIN 415config USB_BELKIN
415 boolean "eTEK based host-to-host cables (Advance, Belkin, ...)" 416 bool "eTEK based host-to-host cables (Advance, Belkin, ...)"
416 depends on USB_NET_CDC_SUBSET 417 depends on USB_NET_CDC_SUBSET
417 default y 418 default y
418 help 419 help
@@ -421,7 +422,7 @@ config USB_BELKIN
421 microcontroller, with LEDs that indicate traffic. 422 microcontroller, with LEDs that indicate traffic.
422 423
423config USB_ARMLINUX 424config USB_ARMLINUX
424 boolean "Embedded ARM Linux links (iPaq, ...)" 425 bool "Embedded ARM Linux links (iPaq, ...)"
425 depends on USB_NET_CDC_SUBSET 426 depends on USB_NET_CDC_SUBSET
426 default y 427 default y
427 help 428 help
@@ -438,14 +439,14 @@ config USB_ARMLINUX
438 this simpler protocol by installing a different kernel. 439 this simpler protocol by installing a different kernel.
439 440
440config USB_EPSON2888 441config USB_EPSON2888
441 boolean "Epson 2888 based firmware (DEVELOPMENT)" 442 bool "Epson 2888 based firmware (DEVELOPMENT)"
442 depends on USB_NET_CDC_SUBSET 443 depends on USB_NET_CDC_SUBSET
443 help 444 help
444 Choose this option to support the usb networking links used 445 Choose this option to support the usb networking links used
445 by some sample firmware from Epson. 446 by some sample firmware from Epson.
446 447
447config USB_KC2190 448config USB_KC2190
448 boolean "KT Technology KC2190 based cables (InstaNet)" 449 bool "KT Technology KC2190 based cables (InstaNet)"
449 depends on USB_NET_CDC_SUBSET 450 depends on USB_NET_CDC_SUBSET
450 help 451 help
451 Choose this option if you're using a host-to-host cable 452 Choose this option if you're using a host-to-host cable
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index bf49792062a2..1173a24feda3 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -979,6 +979,10 @@ static const struct usb_device_id products [] = {
979 USB_DEVICE (0x0df6, 0x0056), 979 USB_DEVICE (0x0df6, 0x0056),
980 .driver_info = (unsigned long) &ax88178_info, 980 .driver_info = (unsigned long) &ax88178_info,
981}, { 981}, {
982 // Sitecom LN-028 "USB 2.0 10/100/1000 Ethernet adapter"
983 USB_DEVICE (0x0df6, 0x061c),
984 .driver_info = (unsigned long) &ax88178_info,
985}, {
982 // corega FEther USB2-TX 986 // corega FEther USB2-TX
983 USB_DEVICE (0x07aa, 0x0017), 987 USB_DEVICE (0x07aa, 0x0017),
984 .driver_info = (unsigned long) &ax8817x_info, 988 .driver_info = (unsigned long) &ax8817x_info,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 3c8dfe5e46ed..111d907e0c11 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1597,7 +1597,7 @@ hso_wait_modem_status(struct hso_serial *serial, unsigned long arg)
1597 } 1597 }
1598 cprev = cnow; 1598 cprev = cnow;
1599 } 1599 }
1600 current->state = TASK_RUNNING; 1600 __set_current_state(TASK_RUNNING);
1601 remove_wait_queue(&tiocmget->waitq, &wait); 1601 remove_wait_queue(&tiocmget->waitq, &wait);
1602 1602
1603 return ret; 1603 return ret;
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 3d18bb0eee85..1bfe0fcaccf5 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -134,6 +134,11 @@ static const struct usb_device_id products [] = {
134}, { 134}, {
135 USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */ 135 USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */
136 .driver_info = (unsigned long) &prolific_info, 136 .driver_info = (unsigned long) &prolific_info,
137}, {
138 USB_DEVICE(0x3923, 0x7825), /* National Instruments USB
139 * Host-to-Host Cable
140 */
141 .driver_info = (unsigned long) &prolific_info,
137}, 142},
138 143
139 { }, // END 144 { }, // END
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 110a2cf67244..f1ff3666f090 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1710,6 +1710,12 @@ static int virtnet_probe(struct virtio_device *vdev)
1710 struct virtnet_info *vi; 1710 struct virtnet_info *vi;
1711 u16 max_queue_pairs; 1711 u16 max_queue_pairs;
1712 1712
1713 if (!vdev->config->get) {
1714 dev_err(&vdev->dev, "%s failure: config access disabled\n",
1715 __func__);
1716 return -EINVAL;
1717 }
1718
1713 if (!virtnet_validate_features(vdev)) 1719 if (!virtnet_validate_features(vdev))
1714 return -EINVAL; 1720 return -EINVAL;
1715 1721
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 83c39e2858bf..88d121d43c08 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -806,21 +806,21 @@ static ssize_t cosa_read(struct file *file,
806 spin_lock_irqsave(&cosa->lock, flags); 806 spin_lock_irqsave(&cosa->lock, flags);
807 add_wait_queue(&chan->rxwaitq, &wait); 807 add_wait_queue(&chan->rxwaitq, &wait);
808 while (!chan->rx_status) { 808 while (!chan->rx_status) {
809 current->state = TASK_INTERRUPTIBLE; 809 set_current_state(TASK_INTERRUPTIBLE);
810 spin_unlock_irqrestore(&cosa->lock, flags); 810 spin_unlock_irqrestore(&cosa->lock, flags);
811 schedule(); 811 schedule();
812 spin_lock_irqsave(&cosa->lock, flags); 812 spin_lock_irqsave(&cosa->lock, flags);
813 if (signal_pending(current) && chan->rx_status == 0) { 813 if (signal_pending(current) && chan->rx_status == 0) {
814 chan->rx_status = 1; 814 chan->rx_status = 1;
815 remove_wait_queue(&chan->rxwaitq, &wait); 815 remove_wait_queue(&chan->rxwaitq, &wait);
816 current->state = TASK_RUNNING; 816 __set_current_state(TASK_RUNNING);
817 spin_unlock_irqrestore(&cosa->lock, flags); 817 spin_unlock_irqrestore(&cosa->lock, flags);
818 mutex_unlock(&chan->rlock); 818 mutex_unlock(&chan->rlock);
819 return -ERESTARTSYS; 819 return -ERESTARTSYS;
820 } 820 }
821 } 821 }
822 remove_wait_queue(&chan->rxwaitq, &wait); 822 remove_wait_queue(&chan->rxwaitq, &wait);
823 current->state = TASK_RUNNING; 823 __set_current_state(TASK_RUNNING);
824 kbuf = chan->rxdata; 824 kbuf = chan->rxdata;
825 count = chan->rxsize; 825 count = chan->rxsize;
826 spin_unlock_irqrestore(&cosa->lock, flags); 826 spin_unlock_irqrestore(&cosa->lock, flags);
@@ -890,14 +890,14 @@ static ssize_t cosa_write(struct file *file,
890 spin_lock_irqsave(&cosa->lock, flags); 890 spin_lock_irqsave(&cosa->lock, flags);
891 add_wait_queue(&chan->txwaitq, &wait); 891 add_wait_queue(&chan->txwaitq, &wait);
892 while (!chan->tx_status) { 892 while (!chan->tx_status) {
893 current->state = TASK_INTERRUPTIBLE; 893 set_current_state(TASK_INTERRUPTIBLE);
894 spin_unlock_irqrestore(&cosa->lock, flags); 894 spin_unlock_irqrestore(&cosa->lock, flags);
895 schedule(); 895 schedule();
896 spin_lock_irqsave(&cosa->lock, flags); 896 spin_lock_irqsave(&cosa->lock, flags);
897 if (signal_pending(current) && chan->tx_status == 0) { 897 if (signal_pending(current) && chan->tx_status == 0) {
898 chan->tx_status = 1; 898 chan->tx_status = 1;
899 remove_wait_queue(&chan->txwaitq, &wait); 899 remove_wait_queue(&chan->txwaitq, &wait);
900 current->state = TASK_RUNNING; 900 __set_current_state(TASK_RUNNING);
901 chan->tx_status = 1; 901 chan->tx_status = 1;
902 spin_unlock_irqrestore(&cosa->lock, flags); 902 spin_unlock_irqrestore(&cosa->lock, flags);
903 up(&chan->wsem); 903 up(&chan->wsem);
@@ -905,7 +905,7 @@ static ssize_t cosa_write(struct file *file,
905 } 905 }
906 } 906 }
907 remove_wait_queue(&chan->txwaitq, &wait); 907 remove_wait_queue(&chan->txwaitq, &wait);
908 current->state = TASK_RUNNING; 908 __set_current_state(TASK_RUNNING);
909 up(&chan->wsem); 909 up(&chan->wsem);
910 spin_unlock_irqrestore(&cosa->lock, flags); 910 spin_unlock_irqrestore(&cosa->lock, flags);
911 kfree(kbuf); 911 kfree(kbuf);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index c1947c5915eb..d56b7859a437 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -946,7 +946,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
946 goto nla_put_failure; 946 goto nla_put_failure;
947 947
948 genlmsg_end(skb, msg_head); 948 genlmsg_end(skb, msg_head);
949 genlmsg_unicast(&init_net, skb, dst_portid); 949 if (genlmsg_unicast(&init_net, skb, dst_portid))
950 goto err_free_txskb;
950 951
951 /* Enqueue the packet */ 952 /* Enqueue the packet */
952 skb_queue_tail(&data->pending, my_skb); 953 skb_queue_tail(&data->pending, my_skb);
@@ -955,6 +956,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
955 return; 956 return;
956 957
957nla_put_failure: 958nla_put_failure:
959 nlmsg_free(skb);
960err_free_txskb:
958 printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__); 961 printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
959 ieee80211_free_txskb(hw, my_skb); 962 ieee80211_free_txskb(hw, my_skb);
960 data->tx_failed++; 963 data->tx_failed++;
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 006b8bcb2e31..2b4ef256c6b9 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -243,14 +243,14 @@ config RT2X00_LIB
243 select AVERAGE 243 select AVERAGE
244 244
245config RT2X00_LIB_FIRMWARE 245config RT2X00_LIB_FIRMWARE
246 boolean 246 bool
247 select FW_LOADER 247 select FW_LOADER
248 248
249config RT2X00_LIB_CRYPTO 249config RT2X00_LIB_CRYPTO
250 boolean 250 bool
251 251
252config RT2X00_LIB_LEDS 252config RT2X00_LIB_LEDS
253 boolean 253 bool
254 default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n) 254 default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n)
255 255
256config RT2X00_LIB_DEBUGFS 256config RT2X00_LIB_DEBUGFS
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f7a31d2cb3f1..c4d68d768408 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -655,9 +655,15 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
655 unsigned long flags; 655 unsigned long flags;
656 656
657 do { 657 do {
658 int notify;
659
658 spin_lock_irqsave(&queue->response_lock, flags); 660 spin_lock_irqsave(&queue->response_lock, flags);
659 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); 661 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
662 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
660 spin_unlock_irqrestore(&queue->response_lock, flags); 663 spin_unlock_irqrestore(&queue->response_lock, flags);
664 if (notify)
665 notify_remote_via_irq(queue->tx_irq);
666
661 if (cons == end) 667 if (cons == end)
662 break; 668 break;
663 txp = RING_GET_REQUEST(&queue->tx, cons++); 669 txp = RING_GET_REQUEST(&queue->tx, cons++);
@@ -1649,17 +1655,28 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1649{ 1655{
1650 struct pending_tx_info *pending_tx_info; 1656 struct pending_tx_info *pending_tx_info;
1651 pending_ring_idx_t index; 1657 pending_ring_idx_t index;
1658 int notify;
1652 unsigned long flags; 1659 unsigned long flags;
1653 1660
1654 pending_tx_info = &queue->pending_tx_info[pending_idx]; 1661 pending_tx_info = &queue->pending_tx_info[pending_idx];
1662
1655 spin_lock_irqsave(&queue->response_lock, flags); 1663 spin_lock_irqsave(&queue->response_lock, flags);
1664
1656 make_tx_response(queue, &pending_tx_info->req, status); 1665 make_tx_response(queue, &pending_tx_info->req, status);
1657 index = pending_index(queue->pending_prod); 1666
1667 /* Release the pending index before pusing the Tx response so
1668 * its available before a new Tx request is pushed by the
1669 * frontend.
1670 */
1671 index = pending_index(queue->pending_prod++);
1658 queue->pending_ring[index] = pending_idx; 1672 queue->pending_ring[index] = pending_idx;
1659 /* TX shouldn't use the index before we give it back here */ 1673
1660 mb(); 1674 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1661 queue->pending_prod++; 1675
1662 spin_unlock_irqrestore(&queue->response_lock, flags); 1676 spin_unlock_irqrestore(&queue->response_lock, flags);
1677
1678 if (notify)
1679 notify_remote_via_irq(queue->tx_irq);
1663} 1680}
1664 1681
1665 1682
@@ -1669,7 +1686,6 @@ static void make_tx_response(struct xenvif_queue *queue,
1669{ 1686{
1670 RING_IDX i = queue->tx.rsp_prod_pvt; 1687 RING_IDX i = queue->tx.rsp_prod_pvt;
1671 struct xen_netif_tx_response *resp; 1688 struct xen_netif_tx_response *resp;
1672 int notify;
1673 1689
1674 resp = RING_GET_RESPONSE(&queue->tx, i); 1690 resp = RING_GET_RESPONSE(&queue->tx, i);
1675 resp->id = txp->id; 1691 resp->id = txp->id;
@@ -1679,9 +1695,6 @@ static void make_tx_response(struct xenvif_queue *queue,
1679 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; 1695 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1680 1696
1681 queue->tx.rsp_prod_pvt = ++i; 1697 queue->tx.rsp_prod_pvt = ++i;
1682 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1683 if (notify)
1684 notify_remote_via_irq(queue->tx_irq);
1685} 1698}
1686 1699
1687static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, 1700static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index 110fece2ff53..62426d81a4d6 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -229,7 +229,6 @@ parse_failed:
229 resource_list_for_each_entry(window, resources) 229 resource_list_for_each_entry(window, resources)
230 kfree(window->res); 230 kfree(window->res);
231 pci_free_resource_list(resources); 231 pci_free_resource_list(resources);
232 kfree(bus_range);
233 return err; 232 return err;
234} 233}
235EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources); 234EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
diff --git a/drivers/pci/pcie/aer/Kconfig b/drivers/pci/pcie/aer/Kconfig
index 389440228c1d..7d1437b01fdd 100644
--- a/drivers/pci/pcie/aer/Kconfig
+++ b/drivers/pci/pcie/aer/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5config PCIEAER 5config PCIEAER
6 boolean "Root Port Advanced Error Reporting support" 6 bool "Root Port Advanced Error Reporting support"
7 depends on PCIEPORTBUS 7 depends on PCIEPORTBUS
8 select RAS 8 select RAS
9 default y 9 default y
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 638e797037da..97527614141b 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -735,6 +735,31 @@ config INTEL_IPS
735 functionality. If in doubt, say Y here; it will only load on 735 functionality. If in doubt, say Y here; it will only load on
736 supported platforms. 736 supported platforms.
737 737
738config INTEL_IMR
739 bool "Intel Isolated Memory Region support"
740 default n
741 depends on X86_INTEL_QUARK && IOSF_MBI
742 ---help---
743 This option provides a means to manipulate Isolated Memory Regions.
744 IMRs are a set of registers that define read and write access masks
745 to prohibit certain system agents from accessing memory with 1 KiB
746 granularity.
747
748 IMRs make it possible to control read/write access to an address
749 by hardware agents inside the SoC. Read and write masks can be
750 defined for:
751 - eSRAM flush
752 - Dirty CPU snoop (write only)
753 - RMU access
754 - PCI Virtual Channel 0/Virtual Channel 1
755 - SMM mode
756 - Non SMM mode
757
758 Quark contains a set of eight IMR registers and makes use of those
759 registers during its bootup process.
760
761 If you are running on a Galileo/Quark say Y here.
762
738config IBM_RTL 763config IBM_RTL
739 tristate "Device driver to enable PRTL support" 764 tristate "Device driver to enable PRTL support"
740 depends on X86 && PCI 765 depends on X86 && PCI
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index f71700e0d132..46b274693872 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -856,8 +856,8 @@ static void asus_backlight_exit(struct asus_laptop *asus)
856 * than count bytes. We set eof to 1 if we handle those 2 values. We return the 856 * than count bytes. We set eof to 1 if we handle those 2 values. We return the
857 * number of bytes written in page 857 * number of bytes written in page
858 */ 858 */
859static ssize_t show_infos(struct device *dev, 859static ssize_t infos_show(struct device *dev, struct device_attribute *attr,
860 struct device_attribute *attr, char *page) 860 char *page)
861{ 861{
862 struct asus_laptop *asus = dev_get_drvdata(dev); 862 struct asus_laptop *asus = dev_get_drvdata(dev);
863 int len = 0; 863 int len = 0;
@@ -926,6 +926,7 @@ static ssize_t show_infos(struct device *dev,
926 926
927 return len; 927 return len;
928} 928}
929static DEVICE_ATTR_RO(infos);
929 930
930static int parse_arg(const char *buf, unsigned long count, int *val) 931static int parse_arg(const char *buf, unsigned long count, int *val)
931{ 932{
@@ -957,15 +958,15 @@ static ssize_t sysfs_acpi_set(struct asus_laptop *asus,
957/* 958/*
958 * LEDD display 959 * LEDD display
959 */ 960 */
960static ssize_t show_ledd(struct device *dev, 961static ssize_t ledd_show(struct device *dev, struct device_attribute *attr,
961 struct device_attribute *attr, char *buf) 962 char *buf)
962{ 963{
963 struct asus_laptop *asus = dev_get_drvdata(dev); 964 struct asus_laptop *asus = dev_get_drvdata(dev);
964 965
965 return sprintf(buf, "0x%08x\n", asus->ledd_status); 966 return sprintf(buf, "0x%08x\n", asus->ledd_status);
966} 967}
967 968
968static ssize_t store_ledd(struct device *dev, struct device_attribute *attr, 969static ssize_t ledd_store(struct device *dev, struct device_attribute *attr,
969 const char *buf, size_t count) 970 const char *buf, size_t count)
970{ 971{
971 struct asus_laptop *asus = dev_get_drvdata(dev); 972 struct asus_laptop *asus = dev_get_drvdata(dev);
@@ -981,6 +982,7 @@ static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
981 } 982 }
982 return rv; 983 return rv;
983} 984}
985static DEVICE_ATTR_RW(ledd);
984 986
985/* 987/*
986 * Wireless 988 * Wireless
@@ -1014,21 +1016,22 @@ static int asus_wlan_set(struct asus_laptop *asus, int status)
1014 return 0; 1016 return 0;
1015} 1017}
1016 1018
1017static ssize_t show_wlan(struct device *dev, 1019static ssize_t wlan_show(struct device *dev, struct device_attribute *attr,
1018 struct device_attribute *attr, char *buf) 1020 char *buf)
1019{ 1021{
1020 struct asus_laptop *asus = dev_get_drvdata(dev); 1022 struct asus_laptop *asus = dev_get_drvdata(dev);
1021 1023
1022 return sprintf(buf, "%d\n", asus_wireless_status(asus, WL_RSTS)); 1024 return sprintf(buf, "%d\n", asus_wireless_status(asus, WL_RSTS));
1023} 1025}
1024 1026
1025static ssize_t store_wlan(struct device *dev, struct device_attribute *attr, 1027static ssize_t wlan_store(struct device *dev, struct device_attribute *attr,
1026 const char *buf, size_t count) 1028 const char *buf, size_t count)
1027{ 1029{
1028 struct asus_laptop *asus = dev_get_drvdata(dev); 1030 struct asus_laptop *asus = dev_get_drvdata(dev);
1029 1031
1030 return sysfs_acpi_set(asus, buf, count, METHOD_WLAN); 1032 return sysfs_acpi_set(asus, buf, count, METHOD_WLAN);
1031} 1033}
1034static DEVICE_ATTR_RW(wlan);
1032 1035
1033/*e 1036/*e
1034 * Bluetooth 1037 * Bluetooth
@@ -1042,15 +1045,15 @@ static int asus_bluetooth_set(struct asus_laptop *asus, int status)
1042 return 0; 1045 return 0;
1043} 1046}
1044 1047
1045static ssize_t show_bluetooth(struct device *dev, 1048static ssize_t bluetooth_show(struct device *dev, struct device_attribute *attr,
1046 struct device_attribute *attr, char *buf) 1049 char *buf)
1047{ 1050{
1048 struct asus_laptop *asus = dev_get_drvdata(dev); 1051 struct asus_laptop *asus = dev_get_drvdata(dev);
1049 1052
1050 return sprintf(buf, "%d\n", asus_wireless_status(asus, BT_RSTS)); 1053 return sprintf(buf, "%d\n", asus_wireless_status(asus, BT_RSTS));
1051} 1054}
1052 1055
1053static ssize_t store_bluetooth(struct device *dev, 1056static ssize_t bluetooth_store(struct device *dev,
1054 struct device_attribute *attr, const char *buf, 1057 struct device_attribute *attr, const char *buf,
1055 size_t count) 1058 size_t count)
1056{ 1059{
@@ -1058,6 +1061,7 @@ static ssize_t store_bluetooth(struct device *dev,
1058 1061
1059 return sysfs_acpi_set(asus, buf, count, METHOD_BLUETOOTH); 1062 return sysfs_acpi_set(asus, buf, count, METHOD_BLUETOOTH);
1060} 1063}
1064static DEVICE_ATTR_RW(bluetooth);
1061 1065
1062/* 1066/*
1063 * Wimax 1067 * Wimax
@@ -1071,22 +1075,22 @@ static int asus_wimax_set(struct asus_laptop *asus, int status)
1071 return 0; 1075 return 0;
1072} 1076}
1073 1077
1074static ssize_t show_wimax(struct device *dev, 1078static ssize_t wimax_show(struct device *dev, struct device_attribute *attr,
1075 struct device_attribute *attr, char *buf) 1079 char *buf)
1076{ 1080{
1077 struct asus_laptop *asus = dev_get_drvdata(dev); 1081 struct asus_laptop *asus = dev_get_drvdata(dev);
1078 1082
1079 return sprintf(buf, "%d\n", asus_wireless_status(asus, WM_RSTS)); 1083 return sprintf(buf, "%d\n", asus_wireless_status(asus, WM_RSTS));
1080} 1084}
1081 1085
1082static ssize_t store_wimax(struct device *dev, 1086static ssize_t wimax_store(struct device *dev, struct device_attribute *attr,
1083 struct device_attribute *attr, const char *buf, 1087 const char *buf, size_t count)
1084 size_t count)
1085{ 1088{
1086 struct asus_laptop *asus = dev_get_drvdata(dev); 1089 struct asus_laptop *asus = dev_get_drvdata(dev);
1087 1090
1088 return sysfs_acpi_set(asus, buf, count, METHOD_WIMAX); 1091 return sysfs_acpi_set(asus, buf, count, METHOD_WIMAX);
1089} 1092}
1093static DEVICE_ATTR_RW(wimax);
1090 1094
1091/* 1095/*
1092 * Wwan 1096 * Wwan
@@ -1100,22 +1104,22 @@ static int asus_wwan_set(struct asus_laptop *asus, int status)
1100 return 0; 1104 return 0;
1101} 1105}
1102 1106
1103static ssize_t show_wwan(struct device *dev, 1107static ssize_t wwan_show(struct device *dev, struct device_attribute *attr,
1104 struct device_attribute *attr, char *buf) 1108 char *buf)
1105{ 1109{
1106 struct asus_laptop *asus = dev_get_drvdata(dev); 1110 struct asus_laptop *asus = dev_get_drvdata(dev);
1107 1111
1108 return sprintf(buf, "%d\n", asus_wireless_status(asus, WW_RSTS)); 1112 return sprintf(buf, "%d\n", asus_wireless_status(asus, WW_RSTS));
1109} 1113}
1110 1114
1111static ssize_t store_wwan(struct device *dev, 1115static ssize_t wwan_store(struct device *dev, struct device_attribute *attr,
1112 struct device_attribute *attr, const char *buf, 1116 const char *buf, size_t count)
1113 size_t count)
1114{ 1117{
1115 struct asus_laptop *asus = dev_get_drvdata(dev); 1118 struct asus_laptop *asus = dev_get_drvdata(dev);
1116 1119
1117 return sysfs_acpi_set(asus, buf, count, METHOD_WWAN); 1120 return sysfs_acpi_set(asus, buf, count, METHOD_WWAN);
1118} 1121}
1122static DEVICE_ATTR_RW(wwan);
1119 1123
1120/* 1124/*
1121 * Display 1125 * Display
@@ -1135,8 +1139,8 @@ static void asus_set_display(struct asus_laptop *asus, int value)
1135 * displays hooked up simultaneously, so be warned. See the acpi4asus README 1139 * displays hooked up simultaneously, so be warned. See the acpi4asus README
1136 * for more info. 1140 * for more info.
1137 */ 1141 */
1138static ssize_t store_disp(struct device *dev, struct device_attribute *attr, 1142static ssize_t display_store(struct device *dev, struct device_attribute *attr,
1139 const char *buf, size_t count) 1143 const char *buf, size_t count)
1140{ 1144{
1141 struct asus_laptop *asus = dev_get_drvdata(dev); 1145 struct asus_laptop *asus = dev_get_drvdata(dev);
1142 int rv, value; 1146 int rv, value;
@@ -1146,6 +1150,7 @@ static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
1146 asus_set_display(asus, value); 1150 asus_set_display(asus, value);
1147 return rv; 1151 return rv;
1148} 1152}
1153static DEVICE_ATTR_WO(display);
1149 1154
1150/* 1155/*
1151 * Light Sens 1156 * Light Sens
@@ -1167,16 +1172,17 @@ static void asus_als_switch(struct asus_laptop *asus, int value)
1167 asus->light_switch = value; 1172 asus->light_switch = value;
1168} 1173}
1169 1174
1170static ssize_t show_lssw(struct device *dev, 1175static ssize_t ls_switch_show(struct device *dev, struct device_attribute *attr,
1171 struct device_attribute *attr, char *buf) 1176 char *buf)
1172{ 1177{
1173 struct asus_laptop *asus = dev_get_drvdata(dev); 1178 struct asus_laptop *asus = dev_get_drvdata(dev);
1174 1179
1175 return sprintf(buf, "%d\n", asus->light_switch); 1180 return sprintf(buf, "%d\n", asus->light_switch);
1176} 1181}
1177 1182
1178static ssize_t store_lssw(struct device *dev, struct device_attribute *attr, 1183static ssize_t ls_switch_store(struct device *dev,
1179 const char *buf, size_t count) 1184 struct device_attribute *attr, const char *buf,
1185 size_t count)
1180{ 1186{
1181 struct asus_laptop *asus = dev_get_drvdata(dev); 1187 struct asus_laptop *asus = dev_get_drvdata(dev);
1182 int rv, value; 1188 int rv, value;
@@ -1187,6 +1193,7 @@ static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
1187 1193
1188 return rv; 1194 return rv;
1189} 1195}
1196static DEVICE_ATTR_RW(ls_switch);
1190 1197
1191static void asus_als_level(struct asus_laptop *asus, int value) 1198static void asus_als_level(struct asus_laptop *asus, int value)
1192{ 1199{
@@ -1195,16 +1202,16 @@ static void asus_als_level(struct asus_laptop *asus, int value)
1195 asus->light_level = value; 1202 asus->light_level = value;
1196} 1203}
1197 1204
1198static ssize_t show_lslvl(struct device *dev, 1205static ssize_t ls_level_show(struct device *dev, struct device_attribute *attr,
1199 struct device_attribute *attr, char *buf) 1206 char *buf)
1200{ 1207{
1201 struct asus_laptop *asus = dev_get_drvdata(dev); 1208 struct asus_laptop *asus = dev_get_drvdata(dev);
1202 1209
1203 return sprintf(buf, "%d\n", asus->light_level); 1210 return sprintf(buf, "%d\n", asus->light_level);
1204} 1211}
1205 1212
1206static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr, 1213static ssize_t ls_level_store(struct device *dev, struct device_attribute *attr,
1207 const char *buf, size_t count) 1214 const char *buf, size_t count)
1208{ 1215{
1209 struct asus_laptop *asus = dev_get_drvdata(dev); 1216 struct asus_laptop *asus = dev_get_drvdata(dev);
1210 int rv, value; 1217 int rv, value;
@@ -1218,6 +1225,7 @@ static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
1218 1225
1219 return rv; 1226 return rv;
1220} 1227}
1228static DEVICE_ATTR_RW(ls_level);
1221 1229
1222static int pega_int_read(struct asus_laptop *asus, int arg, int *result) 1230static int pega_int_read(struct asus_laptop *asus, int arg, int *result)
1223{ 1231{
@@ -1234,8 +1242,8 @@ static int pega_int_read(struct asus_laptop *asus, int arg, int *result)
1234 return err; 1242 return err;
1235} 1243}
1236 1244
1237static ssize_t show_lsvalue(struct device *dev, 1245static ssize_t ls_value_show(struct device *dev, struct device_attribute *attr,
1238 struct device_attribute *attr, char *buf) 1246 char *buf)
1239{ 1247{
1240 struct asus_laptop *asus = dev_get_drvdata(dev); 1248 struct asus_laptop *asus = dev_get_drvdata(dev);
1241 int err, hi, lo; 1249 int err, hi, lo;
@@ -1247,6 +1255,7 @@ static ssize_t show_lsvalue(struct device *dev,
1247 return sprintf(buf, "%d\n", 10 * hi + lo); 1255 return sprintf(buf, "%d\n", 10 * hi + lo);
1248 return err; 1256 return err;
1249} 1257}
1258static DEVICE_ATTR_RO(ls_value);
1250 1259
1251/* 1260/*
1252 * GPS 1261 * GPS
@@ -1274,15 +1283,15 @@ static int asus_gps_switch(struct asus_laptop *asus, int status)
1274 return 0; 1283 return 0;
1275} 1284}
1276 1285
1277static ssize_t show_gps(struct device *dev, 1286static ssize_t gps_show(struct device *dev, struct device_attribute *attr,
1278 struct device_attribute *attr, char *buf) 1287 char *buf)
1279{ 1288{
1280 struct asus_laptop *asus = dev_get_drvdata(dev); 1289 struct asus_laptop *asus = dev_get_drvdata(dev);
1281 1290
1282 return sprintf(buf, "%d\n", asus_gps_status(asus)); 1291 return sprintf(buf, "%d\n", asus_gps_status(asus));
1283} 1292}
1284 1293
1285static ssize_t store_gps(struct device *dev, struct device_attribute *attr, 1294static ssize_t gps_store(struct device *dev, struct device_attribute *attr,
1286 const char *buf, size_t count) 1295 const char *buf, size_t count)
1287{ 1296{
1288 struct asus_laptop *asus = dev_get_drvdata(dev); 1297 struct asus_laptop *asus = dev_get_drvdata(dev);
@@ -1298,6 +1307,7 @@ static ssize_t store_gps(struct device *dev, struct device_attribute *attr,
1298 rfkill_set_sw_state(asus->gps.rfkill, !value); 1307 rfkill_set_sw_state(asus->gps.rfkill, !value);
1299 return rv; 1308 return rv;
1300} 1309}
1310static DEVICE_ATTR_RW(gps);
1301 1311
1302/* 1312/*
1303 * rfkill 1313 * rfkill
@@ -1569,19 +1579,6 @@ static void asus_acpi_notify(struct acpi_device *device, u32 event)
1569 asus_input_notify(asus, event); 1579 asus_input_notify(asus, event);
1570} 1580}
1571 1581
1572static DEVICE_ATTR(infos, S_IRUGO, show_infos, NULL);
1573static DEVICE_ATTR(wlan, S_IRUGO | S_IWUSR, show_wlan, store_wlan);
1574static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR,
1575 show_bluetooth, store_bluetooth);
1576static DEVICE_ATTR(wimax, S_IRUGO | S_IWUSR, show_wimax, store_wimax);
1577static DEVICE_ATTR(wwan, S_IRUGO | S_IWUSR, show_wwan, store_wwan);
1578static DEVICE_ATTR(display, S_IWUSR, NULL, store_disp);
1579static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd);
1580static DEVICE_ATTR(ls_value, S_IRUGO, show_lsvalue, NULL);
1581static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl);
1582static DEVICE_ATTR(ls_switch, S_IRUGO | S_IWUSR, show_lssw, store_lssw);
1583static DEVICE_ATTR(gps, S_IRUGO | S_IWUSR, show_gps, store_gps);
1584
1585static struct attribute *asus_attributes[] = { 1582static struct attribute *asus_attributes[] = {
1586 &dev_attr_infos.attr, 1583 &dev_attr_infos.attr,
1587 &dev_attr_wlan.attr, 1584 &dev_attr_wlan.attr,
@@ -1616,7 +1613,7 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
1616 else 1613 else
1617 goto normal; 1614 goto normal;
1618 1615
1619 return supported; 1616 return supported ? attr->mode : 0;
1620 } 1617 }
1621 1618
1622normal: 1619normal:
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 70d355a9ae2c..55cf10bc7817 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -520,7 +520,7 @@ static acpi_status cmpc_get_accel(acpi_handle handle,
520{ 520{
521 union acpi_object param[2]; 521 union acpi_object param[2];
522 struct acpi_object_list input; 522 struct acpi_object_list input;
523 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, 0 }; 523 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
524 unsigned char *locs; 524 unsigned char *locs;
525 acpi_status status; 525 acpi_status status;
526 526
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 7c21c1c44dfa..2a9afa261c61 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -64,6 +64,7 @@
64#include <linux/acpi.h> 64#include <linux/acpi.h>
65#include <linux/dmi.h> 65#include <linux/dmi.h>
66#include <linux/backlight.h> 66#include <linux/backlight.h>
67#include <linux/fb.h>
67#include <linux/input.h> 68#include <linux/input.h>
68#include <linux/kfifo.h> 69#include <linux/kfifo.h>
69#include <linux/platform_device.h> 70#include <linux/platform_device.h>
@@ -398,7 +399,7 @@ static int bl_get_brightness(struct backlight_device *b)
398static int bl_update_status(struct backlight_device *b) 399static int bl_update_status(struct backlight_device *b)
399{ 400{
400 int ret; 401 int ret;
401 if (b->props.power == 4) 402 if (b->props.power == FB_BLANK_POWERDOWN)
402 ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x3); 403 ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x3);
403 else 404 else
404 ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x0); 405 ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x0);
@@ -1139,9 +1140,9 @@ static int __init fujitsu_init(void)
1139 1140
1140 if (!acpi_video_backlight_support()) { 1141 if (!acpi_video_backlight_support()) {
1141 if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3) 1142 if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3)
1142 fujitsu->bl_device->props.power = 4; 1143 fujitsu->bl_device->props.power = FB_BLANK_POWERDOWN;
1143 else 1144 else
1144 fujitsu->bl_device->props.power = 0; 1145 fujitsu->bl_device->props.power = FB_BLANK_UNBLANK;
1145 } 1146 }
1146 1147
1147 pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n"); 1148 pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n");
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 66a4d3284aab..001b199a8c33 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism 2 * intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism
3 * 3 *
4 * (C) Copyright 2008-2010 Intel Corporation 4 * (C) Copyright 2008-2010,2015 Intel Corporation
5 * Author: Sreedhara DS (sreedhara.ds@intel.com) 5 * Author: Sreedhara DS (sreedhara.ds@intel.com)
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
@@ -43,10 +43,9 @@
43/* 43/*
44 * IPC register summary 44 * IPC register summary
45 * 45 *
46 * IPC register blocks are memory mapped at fixed address of 0xFF11C000 46 * IPC register blocks are memory mapped at fixed address of PCI BAR 0.
47 * To read or write information to the SCU, driver writes to IPC-1 memory 47 * To read or write information to the SCU, driver writes to IPC-1 memory
48 * mapped registers (base address 0xFF11C000). The following is the IPC 48 * mapped registers. The following is the IPC mechanism
49 * mechanism
50 * 49 *
51 * 1. IA core cDMI interface claims this transaction and converts it to a 50 * 1. IA core cDMI interface claims this transaction and converts it to a
52 * Transaction Layer Packet (TLP) message which is sent across the cDMI. 51 * Transaction Layer Packet (TLP) message which is sent across the cDMI.
@@ -67,36 +66,28 @@
67#define PCI_DEVICE_ID_CLOVERVIEW 0x08ea 66#define PCI_DEVICE_ID_CLOVERVIEW 0x08ea
68#define PCI_DEVICE_ID_TANGIER 0x11a0 67#define PCI_DEVICE_ID_TANGIER 0x11a0
69 68
70/* intel scu ipc driver data*/ 69/* intel scu ipc driver data */
71struct intel_scu_ipc_pdata_t { 70struct intel_scu_ipc_pdata_t {
72 u32 ipc_base;
73 u32 i2c_base; 71 u32 i2c_base;
74 u32 ipc_len;
75 u32 i2c_len; 72 u32 i2c_len;
76 u8 irq_mode; 73 u8 irq_mode;
77}; 74};
78 75
79static struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = { 76static struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = {
80 .ipc_base = 0xff11c000,
81 .i2c_base = 0xff12b000, 77 .i2c_base = 0xff12b000,
82 .ipc_len = 0x100,
83 .i2c_len = 0x10, 78 .i2c_len = 0x10,
84 .irq_mode = 0, 79 .irq_mode = 0,
85}; 80};
86 81
87/* Penwell and Cloverview */ 82/* Penwell and Cloverview */
88static struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = { 83static struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = {
89 .ipc_base = 0xff11c000,
90 .i2c_base = 0xff12b000, 84 .i2c_base = 0xff12b000,
91 .ipc_len = 0x100,
92 .i2c_len = 0x10, 85 .i2c_len = 0x10,
93 .irq_mode = 1, 86 .irq_mode = 1,
94}; 87};
95 88
96static struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = { 89static struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = {
97 .ipc_base = 0xff009000,
98 .i2c_base = 0xff00d000, 90 .i2c_base = 0xff00d000,
99 .ipc_len = 0x100,
100 .i2c_len = 0x10, 91 .i2c_len = 0x10,
101 .irq_mode = 0, 92 .irq_mode = 0,
102}; 93};
@@ -114,8 +105,6 @@ struct intel_scu_ipc_dev {
114 105
115static struct intel_scu_ipc_dev ipcdev; /* Only one for now */ 106static struct intel_scu_ipc_dev ipcdev; /* Only one for now */
116 107
117static int platform; /* Platform type */
118
119/* 108/*
120 * IPC Read Buffer (Read Only): 109 * IPC Read Buffer (Read Only):
121 * 16 byte buffer for receiving data from SCU, if IPC command 110 * 16 byte buffer for receiving data from SCU, if IPC command
@@ -160,7 +149,6 @@ static inline void ipc_data_writel(u32 data, u32 offset) /* Write ipc data */
160 * Format: 149 * Format:
161 * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)| 150 * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
162 */ 151 */
163
164static inline u8 ipc_read_status(void) 152static inline u8 ipc_read_status(void)
165{ 153{
166 return __raw_readl(ipcdev.ipc_base + 0x04); 154 return __raw_readl(ipcdev.ipc_base + 0x04);
@@ -176,23 +164,24 @@ static inline u32 ipc_data_readl(u32 offset) /* Read ipc u32 data */
176 return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset); 164 return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
177} 165}
178 166
179static inline int busy_loop(void) /* Wait till scu status is busy */ 167/* Wait till scu status is busy */
168static inline int busy_loop(void)
180{ 169{
181 u32 status = 0; 170 u32 status = ipc_read_status();
182 u32 loop_count = 0; 171 u32 loop_count = 100000;
183 172
184 status = ipc_read_status(); 173 /* break if scu doesn't reset busy bit after huge retry */
185 while (status & 1) { 174 while ((status & BIT(0)) && --loop_count) {
186 udelay(1); /* scu processing time is in few u secods */ 175 udelay(1); /* scu processing time is in few u secods */
187 status = ipc_read_status(); 176 status = ipc_read_status();
188 loop_count++;
189 /* break if scu doesn't reset busy bit after huge retry */
190 if (loop_count > 100000) {
191 dev_err(&ipcdev.pdev->dev, "IPC timed out");
192 return -ETIMEDOUT;
193 }
194 } 177 }
195 if ((status >> 1) & 1) 178
179 if (status & BIT(0)) {
180 dev_err(&ipcdev.pdev->dev, "IPC timed out");
181 return -ETIMEDOUT;
182 }
183
184 if (status & BIT(1))
196 return -EIO; 185 return -EIO;
197 186
198 return 0; 187 return 0;
@@ -210,14 +199,13 @@ static inline int ipc_wait_for_interrupt(void)
210 } 199 }
211 200
212 status = ipc_read_status(); 201 status = ipc_read_status();
213 202 if (status & BIT(1))
214 if ((status >> 1) & 1)
215 return -EIO; 203 return -EIO;
216 204
217 return 0; 205 return 0;
218} 206}
219 207
220int intel_scu_ipc_check_status(void) 208static int intel_scu_ipc_check_status(void)
221{ 209{
222 return ipcdev.irq_mode ? ipc_wait_for_interrupt() : busy_loop(); 210 return ipcdev.irq_mode ? ipc_wait_for_interrupt() : busy_loop();
223} 211}
@@ -248,18 +236,18 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
248 if (id == IPC_CMD_PCNTRL_R) { 236 if (id == IPC_CMD_PCNTRL_R) {
249 for (nc = 0, offset = 0; nc < count; nc++, offset += 4) 237 for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
250 ipc_data_writel(wbuf[nc], offset); 238 ipc_data_writel(wbuf[nc], offset);
251 ipc_command((count*2) << 16 | id << 12 | 0 << 8 | op); 239 ipc_command((count * 2) << 16 | id << 12 | 0 << 8 | op);
252 } else if (id == IPC_CMD_PCNTRL_W) { 240 } else if (id == IPC_CMD_PCNTRL_W) {
253 for (nc = 0; nc < count; nc++, offset += 1) 241 for (nc = 0; nc < count; nc++, offset += 1)
254 cbuf[offset] = data[nc]; 242 cbuf[offset] = data[nc];
255 for (nc = 0, offset = 0; nc < count; nc++, offset += 4) 243 for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
256 ipc_data_writel(wbuf[nc], offset); 244 ipc_data_writel(wbuf[nc], offset);
257 ipc_command((count*3) << 16 | id << 12 | 0 << 8 | op); 245 ipc_command((count * 3) << 16 | id << 12 | 0 << 8 | op);
258 } else if (id == IPC_CMD_PCNTRL_M) { 246 } else if (id == IPC_CMD_PCNTRL_M) {
259 cbuf[offset] = data[0]; 247 cbuf[offset] = data[0];
260 cbuf[offset + 1] = data[1]; 248 cbuf[offset + 1] = data[1];
261 ipc_data_writel(wbuf[0], 0); /* Write wbuff */ 249 ipc_data_writel(wbuf[0], 0); /* Write wbuff */
262 ipc_command(4 << 16 | id << 12 | 0 << 8 | op); 250 ipc_command(4 << 16 | id << 12 | 0 << 8 | op);
263 } 251 }
264 252
265 err = intel_scu_ipc_check_status(); 253 err = intel_scu_ipc_check_status();
@@ -301,7 +289,7 @@ EXPORT_SYMBOL(intel_scu_ipc_ioread8);
301 */ 289 */
302int intel_scu_ipc_ioread16(u16 addr, u16 *data) 290int intel_scu_ipc_ioread16(u16 addr, u16 *data)
303{ 291{
304 u16 x[2] = {addr, addr + 1 }; 292 u16 x[2] = {addr, addr + 1};
305 return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R); 293 return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
306} 294}
307EXPORT_SYMBOL(intel_scu_ipc_ioread16); 295EXPORT_SYMBOL(intel_scu_ipc_ioread16);
@@ -351,7 +339,7 @@ EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
351 */ 339 */
352int intel_scu_ipc_iowrite16(u16 addr, u16 data) 340int intel_scu_ipc_iowrite16(u16 addr, u16 data)
353{ 341{
354 u16 x[2] = {addr, addr + 1 }; 342 u16 x[2] = {addr, addr + 1};
355 return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W); 343 return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
356} 344}
357EXPORT_SYMBOL(intel_scu_ipc_iowrite16); 345EXPORT_SYMBOL(intel_scu_ipc_iowrite16);
@@ -412,7 +400,6 @@ int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
412} 400}
413EXPORT_SYMBOL(intel_scu_ipc_writev); 401EXPORT_SYMBOL(intel_scu_ipc_writev);
414 402
415
416/** 403/**
417 * intel_scu_ipc_update_register - r/m/w a register 404 * intel_scu_ipc_update_register - r/m/w a register
418 * @addr: register address 405 * @addr: register address
@@ -475,9 +462,8 @@ EXPORT_SYMBOL(intel_scu_ipc_simple_command);
475 * Issue a command to the SCU which involves data transfers. Do the 462 * Issue a command to the SCU which involves data transfers. Do the
476 * data copies under the lock but leave it for the caller to interpret 463 * data copies under the lock but leave it for the caller to interpret
477 */ 464 */
478
479int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen, 465int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
480 u32 *out, int outlen) 466 u32 *out, int outlen)
481{ 467{
482 int i, err; 468 int i, err;
483 469
@@ -503,7 +489,7 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
503} 489}
504EXPORT_SYMBOL(intel_scu_ipc_command); 490EXPORT_SYMBOL(intel_scu_ipc_command);
505 491
506/*I2C commands */ 492/* I2C commands */
507#define IPC_I2C_WRITE 1 /* I2C Write command */ 493#define IPC_I2C_WRITE 1 /* I2C Write command */
508#define IPC_I2C_READ 2 /* I2C Read command */ 494#define IPC_I2C_READ 2 /* I2C Read command */
509 495
@@ -577,7 +563,7 @@ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
577{ 563{
578 int err; 564 int err;
579 struct intel_scu_ipc_pdata_t *pdata; 565 struct intel_scu_ipc_pdata_t *pdata;
580 resource_size_t pci_resource; 566 resource_size_t base;
581 567
582 if (ipcdev.pdev) /* We support only one SCU */ 568 if (ipcdev.pdev) /* We support only one SCU */
583 return -EBUSY; 569 return -EBUSY;
@@ -595,8 +581,8 @@ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
595 if (err) 581 if (err)
596 return err; 582 return err;
597 583
598 pci_resource = pci_resource_start(dev, 0); 584 base = pci_resource_start(dev, 0);
599 if (!pci_resource) 585 if (!base)
600 return -ENOMEM; 586 return -ENOMEM;
601 587
602 init_completion(&ipcdev.cmd_complete); 588 init_completion(&ipcdev.cmd_complete);
@@ -604,7 +590,7 @@ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
604 if (request_irq(dev->irq, ioc, 0, "intel_scu_ipc", &ipcdev)) 590 if (request_irq(dev->irq, ioc, 0, "intel_scu_ipc", &ipcdev))
605 return -EBUSY; 591 return -EBUSY;
606 592
607 ipcdev.ipc_base = ioremap_nocache(pdata->ipc_base, pdata->ipc_len); 593 ipcdev.ipc_base = ioremap_nocache(base, pci_resource_len(dev, 0));
608 if (!ipcdev.ipc_base) 594 if (!ipcdev.ipc_base)
609 return -ENOMEM; 595 return -ENOMEM;
610 596
@@ -666,9 +652,10 @@ static struct pci_driver ipc_driver = {
666 .remove = ipc_remove, 652 .remove = ipc_remove,
667}; 653};
668 654
669
670static int __init intel_scu_ipc_init(void) 655static int __init intel_scu_ipc_init(void)
671{ 656{
657 int platform; /* Platform type */
658
672 platform = intel_mid_identify_cpu(); 659 platform = intel_mid_identify_cpu();
673 if (platform == 0) 660 if (platform == 0)
674 return -ENODEV; 661 return -ENODEV;
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index ff765d8e1a09..9e701b2256f9 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -124,6 +124,10 @@ struct sabi_commands {
124 u16 get_wireless_status; 124 u16 get_wireless_status;
125 u16 set_wireless_status; 125 u16 set_wireless_status;
126 126
127 /* 0x80 is off, 0x81 is on */
128 u16 get_lid_handling;
129 u16 set_lid_handling;
130
127 /* 0x81 to read, (0x82 | level << 8) to set, 0xaabb to enable */ 131 /* 0x81 to read, (0x82 | level << 8) to set, 0xaabb to enable */
128 u16 kbd_backlight; 132 u16 kbd_backlight;
129 133
@@ -194,6 +198,9 @@ static const struct sabi_config sabi_configs[] = {
194 .get_wireless_status = 0xFFFF, 198 .get_wireless_status = 0xFFFF,
195 .set_wireless_status = 0xFFFF, 199 .set_wireless_status = 0xFFFF,
196 200
201 .get_lid_handling = 0xFFFF,
202 .set_lid_handling = 0xFFFF,
203
197 .kbd_backlight = 0xFFFF, 204 .kbd_backlight = 0xFFFF,
198 205
199 .set_linux = 0x0a, 206 .set_linux = 0x0a,
@@ -254,6 +261,9 @@ static const struct sabi_config sabi_configs[] = {
254 .get_wireless_status = 0x69, 261 .get_wireless_status = 0x69,
255 .set_wireless_status = 0x6a, 262 .set_wireless_status = 0x6a,
256 263
264 .get_lid_handling = 0x6d,
265 .set_lid_handling = 0x6e,
266
257 .kbd_backlight = 0x78, 267 .kbd_backlight = 0x78,
258 268
259 .set_linux = 0xff, 269 .set_linux = 0xff,
@@ -353,6 +363,8 @@ struct samsung_quirks {
353 bool broken_acpi_video; 363 bool broken_acpi_video;
354 bool four_kbd_backlight_levels; 364 bool four_kbd_backlight_levels;
355 bool enable_kbd_backlight; 365 bool enable_kbd_backlight;
366 bool use_native_backlight;
367 bool lid_handling;
356}; 368};
357 369
358static struct samsung_quirks samsung_unknown = {}; 370static struct samsung_quirks samsung_unknown = {};
@@ -361,11 +373,19 @@ static struct samsung_quirks samsung_broken_acpi_video = {
361 .broken_acpi_video = true, 373 .broken_acpi_video = true,
362}; 374};
363 375
376static struct samsung_quirks samsung_use_native_backlight = {
377 .use_native_backlight = true,
378};
379
364static struct samsung_quirks samsung_np740u3e = { 380static struct samsung_quirks samsung_np740u3e = {
365 .four_kbd_backlight_levels = true, 381 .four_kbd_backlight_levels = true,
366 .enable_kbd_backlight = true, 382 .enable_kbd_backlight = true,
367}; 383};
368 384
385static struct samsung_quirks samsung_lid_handling = {
386 .lid_handling = true,
387};
388
369static bool force; 389static bool force;
370module_param(force, bool, 0); 390module_param(force, bool, 0);
371MODULE_PARM_DESC(force, 391MODULE_PARM_DESC(force,
@@ -748,7 +768,7 @@ static ssize_t set_battery_life_extender(struct device *dev,
748 struct samsung_laptop *samsung = dev_get_drvdata(dev); 768 struct samsung_laptop *samsung = dev_get_drvdata(dev);
749 int ret, value; 769 int ret, value;
750 770
751 if (!count || sscanf(buf, "%i", &value) != 1) 771 if (!count || kstrtoint(buf, 0, &value) != 0)
752 return -EINVAL; 772 return -EINVAL;
753 773
754 ret = write_battery_life_extender(samsung, !!value); 774 ret = write_battery_life_extender(samsung, !!value);
@@ -817,7 +837,7 @@ static ssize_t set_usb_charge(struct device *dev,
817 struct samsung_laptop *samsung = dev_get_drvdata(dev); 837 struct samsung_laptop *samsung = dev_get_drvdata(dev);
818 int ret, value; 838 int ret, value;
819 839
820 if (!count || sscanf(buf, "%i", &value) != 1) 840 if (!count || kstrtoint(buf, 0, &value) != 0)
821 return -EINVAL; 841 return -EINVAL;
822 842
823 ret = write_usb_charge(samsung, !!value); 843 ret = write_usb_charge(samsung, !!value);
@@ -830,10 +850,76 @@ static ssize_t set_usb_charge(struct device *dev,
830static DEVICE_ATTR(usb_charge, S_IWUSR | S_IRUGO, 850static DEVICE_ATTR(usb_charge, S_IWUSR | S_IRUGO,
831 get_usb_charge, set_usb_charge); 851 get_usb_charge, set_usb_charge);
832 852
853static int read_lid_handling(struct samsung_laptop *samsung)
854{
855 const struct sabi_commands *commands = &samsung->config->commands;
856 struct sabi_data data;
857 int retval;
858
859 if (commands->get_lid_handling == 0xFFFF)
860 return -ENODEV;
861
862 memset(&data, 0, sizeof(data));
863 retval = sabi_command(samsung, commands->get_lid_handling,
864 &data, &data);
865
866 if (retval)
867 return retval;
868
869 return data.data[0] & 0x1;
870}
871
872static int write_lid_handling(struct samsung_laptop *samsung,
873 int enabled)
874{
875 const struct sabi_commands *commands = &samsung->config->commands;
876 struct sabi_data data;
877
878 memset(&data, 0, sizeof(data));
879 data.data[0] = 0x80 | enabled;
880 return sabi_command(samsung, commands->set_lid_handling,
881 &data, NULL);
882}
883
884static ssize_t get_lid_handling(struct device *dev,
885 struct device_attribute *attr,
886 char *buf)
887{
888 struct samsung_laptop *samsung = dev_get_drvdata(dev);
889 int ret;
890
891 ret = read_lid_handling(samsung);
892 if (ret < 0)
893 return ret;
894
895 return sprintf(buf, "%d\n", ret);
896}
897
898static ssize_t set_lid_handling(struct device *dev,
899 struct device_attribute *attr,
900 const char *buf, size_t count)
901{
902 struct samsung_laptop *samsung = dev_get_drvdata(dev);
903 int ret, value;
904
905 if (!count || kstrtoint(buf, 0, &value) != 0)
906 return -EINVAL;
907
908 ret = write_lid_handling(samsung, !!value);
909 if (ret < 0)
910 return ret;
911
912 return count;
913}
914
915static DEVICE_ATTR(lid_handling, S_IWUSR | S_IRUGO,
916 get_lid_handling, set_lid_handling);
917
833static struct attribute *platform_attributes[] = { 918static struct attribute *platform_attributes[] = {
834 &dev_attr_performance_level.attr, 919 &dev_attr_performance_level.attr,
835 &dev_attr_battery_life_extender.attr, 920 &dev_attr_battery_life_extender.attr,
836 &dev_attr_usb_charge.attr, 921 &dev_attr_usb_charge.attr,
922 &dev_attr_lid_handling.attr,
837 NULL 923 NULL
838}; 924};
839 925
@@ -956,6 +1042,22 @@ static int __init samsung_rfkill_init(struct samsung_laptop *samsung)
956 return 0; 1042 return 0;
957} 1043}
958 1044
1045static void samsung_lid_handling_exit(struct samsung_laptop *samsung)
1046{
1047 if (samsung->quirks->lid_handling)
1048 write_lid_handling(samsung, 0);
1049}
1050
1051static int __init samsung_lid_handling_init(struct samsung_laptop *samsung)
1052{
1053 int retval = 0;
1054
1055 if (samsung->quirks->lid_handling)
1056 retval = write_lid_handling(samsung, 1);
1057
1058 return retval;
1059}
1060
959static int kbd_backlight_enable(struct samsung_laptop *samsung) 1061static int kbd_backlight_enable(struct samsung_laptop *samsung)
960{ 1062{
961 const struct sabi_commands *commands = &samsung->config->commands; 1063 const struct sabi_commands *commands = &samsung->config->commands;
@@ -1111,7 +1213,7 @@ static int __init samsung_backlight_init(struct samsung_laptop *samsung)
1111} 1213}
1112 1214
1113static umode_t samsung_sysfs_is_visible(struct kobject *kobj, 1215static umode_t samsung_sysfs_is_visible(struct kobject *kobj,
1114 struct attribute *attr, int idx) 1216 struct attribute *attr, int idx)
1115{ 1217{
1116 struct device *dev = container_of(kobj, struct device, kobj); 1218 struct device *dev = container_of(kobj, struct device, kobj);
1117 struct platform_device *pdev = to_platform_device(dev); 1219 struct platform_device *pdev = to_platform_device(dev);
@@ -1124,6 +1226,8 @@ static umode_t samsung_sysfs_is_visible(struct kobject *kobj,
1124 ok = !!(read_battery_life_extender(samsung) >= 0); 1226 ok = !!(read_battery_life_extender(samsung) >= 0);
1125 if (attr == &dev_attr_usb_charge.attr) 1227 if (attr == &dev_attr_usb_charge.attr)
1126 ok = !!(read_usb_charge(samsung) >= 0); 1228 ok = !!(read_usb_charge(samsung) >= 0);
1229 if (attr == &dev_attr_lid_handling.attr)
1230 ok = !!(read_lid_handling(samsung) >= 0);
1127 1231
1128 return ok ? attr->mode : 0; 1232 return ok ? attr->mode : 0;
1129} 1233}
@@ -1357,7 +1461,7 @@ static int __init samsung_sabi_init(struct samsung_laptop *samsung)
1357 samsung_sabi_diag(samsung); 1461 samsung_sabi_diag(samsung);
1358 1462
1359 /* Try to find one of the signatures in memory to find the header */ 1463 /* Try to find one of the signatures in memory to find the header */
1360 for (i = 0; sabi_configs[i].test_string != 0; ++i) { 1464 for (i = 0; sabi_configs[i].test_string != NULL; ++i) {
1361 samsung->config = &sabi_configs[i]; 1465 samsung->config = &sabi_configs[i];
1362 loca = find_signature(samsung->f0000_segment, 1466 loca = find_signature(samsung->f0000_segment,
1363 samsung->config->test_string); 1467 samsung->config->test_string);
@@ -1436,6 +1540,9 @@ static int samsung_pm_notification(struct notifier_block *nb,
1436 samsung->quirks->enable_kbd_backlight) 1540 samsung->quirks->enable_kbd_backlight)
1437 kbd_backlight_enable(samsung); 1541 kbd_backlight_enable(samsung);
1438 1542
1543 if (val == PM_POST_HIBERNATION && samsung->quirks->lid_handling)
1544 write_lid_handling(samsung, 1);
1545
1439 return 0; 1546 return 0;
1440} 1547}
1441 1548
@@ -1507,7 +1614,7 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
1507 DMI_MATCH(DMI_PRODUCT_NAME, "N150P"), 1614 DMI_MATCH(DMI_PRODUCT_NAME, "N150P"),
1508 DMI_MATCH(DMI_BOARD_NAME, "N150P"), 1615 DMI_MATCH(DMI_BOARD_NAME, "N150P"),
1509 }, 1616 },
1510 .driver_data = &samsung_broken_acpi_video, 1617 .driver_data = &samsung_use_native_backlight,
1511 }, 1618 },
1512 { 1619 {
1513 .callback = samsung_dmi_matched, 1620 .callback = samsung_dmi_matched,
@@ -1517,7 +1624,7 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
1517 DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"), 1624 DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
1518 DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"), 1625 DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
1519 }, 1626 },
1520 .driver_data = &samsung_broken_acpi_video, 1627 .driver_data = &samsung_use_native_backlight,
1521 }, 1628 },
1522 { 1629 {
1523 .callback = samsung_dmi_matched, 1630 .callback = samsung_dmi_matched,
@@ -1557,7 +1664,7 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
1557 DMI_MATCH(DMI_PRODUCT_NAME, "N250P"), 1664 DMI_MATCH(DMI_PRODUCT_NAME, "N250P"),
1558 DMI_MATCH(DMI_BOARD_NAME, "N250P"), 1665 DMI_MATCH(DMI_BOARD_NAME, "N250P"),
1559 }, 1666 },
1560 .driver_data = &samsung_broken_acpi_video, 1667 .driver_data = &samsung_use_native_backlight,
1561 }, 1668 },
1562 { 1669 {
1563 .callback = samsung_dmi_matched, 1670 .callback = samsung_dmi_matched,
@@ -1578,6 +1685,15 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
1578 }, 1685 },
1579 .driver_data = &samsung_np740u3e, 1686 .driver_data = &samsung_np740u3e,
1580 }, 1687 },
1688 {
1689 .callback = samsung_dmi_matched,
1690 .ident = "300V3Z/300V4Z/300V5Z",
1691 .matches = {
1692 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
1693 DMI_MATCH(DMI_PRODUCT_NAME, "300V3Z/300V4Z/300V5Z"),
1694 },
1695 .driver_data = &samsung_lid_handling,
1696 },
1581 { }, 1697 { },
1582}; 1698};
1583MODULE_DEVICE_TABLE(dmi, samsung_dmi_table); 1699MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
@@ -1616,6 +1732,15 @@ static int __init samsung_init(void)
1616 pr_info("Disabling ACPI video driver\n"); 1732 pr_info("Disabling ACPI video driver\n");
1617 acpi_video_unregister(); 1733 acpi_video_unregister();
1618 } 1734 }
1735
1736 if (samsung->quirks->use_native_backlight) {
1737 pr_info("Using native backlight driver\n");
1738 /* Tell acpi-video to not handle the backlight */
1739 acpi_video_dmi_promote_vendor();
1740 acpi_video_unregister();
1741 /* And also do not handle it ourselves */
1742 samsung->handle_backlight = false;
1743 }
1619#endif 1744#endif
1620 1745
1621 ret = samsung_platform_init(samsung); 1746 ret = samsung_platform_init(samsung);
@@ -1648,6 +1773,10 @@ static int __init samsung_init(void)
1648 if (ret) 1773 if (ret)
1649 goto error_leds; 1774 goto error_leds;
1650 1775
1776 ret = samsung_lid_handling_init(samsung);
1777 if (ret)
1778 goto error_lid_handling;
1779
1651 ret = samsung_debugfs_init(samsung); 1780 ret = samsung_debugfs_init(samsung);
1652 if (ret) 1781 if (ret)
1653 goto error_debugfs; 1782 goto error_debugfs;
@@ -1659,6 +1788,8 @@ static int __init samsung_init(void)
1659 return ret; 1788 return ret;
1660 1789
1661error_debugfs: 1790error_debugfs:
1791 samsung_lid_handling_exit(samsung);
1792error_lid_handling:
1662 samsung_leds_exit(samsung); 1793 samsung_leds_exit(samsung);
1663error_leds: 1794error_leds:
1664 samsung_rfkill_exit(samsung); 1795 samsung_rfkill_exit(samsung);
@@ -1683,6 +1814,7 @@ static void __exit samsung_exit(void)
1683 unregister_pm_notifier(&samsung->pm_nb); 1814 unregister_pm_notifier(&samsung->pm_nb);
1684 1815
1685 samsung_debugfs_exit(samsung); 1816 samsung_debugfs_exit(samsung);
1817 samsung_lid_handling_exit(samsung);
1686 samsung_leds_exit(samsung); 1818 samsung_leds_exit(samsung);
1687 samsung_rfkill_exit(samsung); 1819 samsung_rfkill_exit(samsung);
1688 samsung_backlight_exit(samsung); 1820 samsung_backlight_exit(samsung);
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 6dd1c0e7dcd9..e51c1e753607 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1032,7 +1032,7 @@ struct sony_backlight_props {
1032 u8 offset; 1032 u8 offset;
1033 u8 maxlvl; 1033 u8 maxlvl;
1034}; 1034};
1035struct sony_backlight_props sony_bl_props; 1035static struct sony_backlight_props sony_bl_props;
1036 1036
1037static int sony_backlight_update_status(struct backlight_device *bd) 1037static int sony_backlight_update_status(struct backlight_device *bd)
1038{ 1038{
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index c3d11fabc46f..3b8ceee7c5cb 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -196,6 +196,7 @@ enum tpacpi_hkey_event_t {
196 /* Key-related user-interface events */ 196 /* Key-related user-interface events */
197 TP_HKEY_EV_KEY_NUMLOCK = 0x6000, /* NumLock key pressed */ 197 TP_HKEY_EV_KEY_NUMLOCK = 0x6000, /* NumLock key pressed */
198 TP_HKEY_EV_KEY_FN = 0x6005, /* Fn key pressed? E420 */ 198 TP_HKEY_EV_KEY_FN = 0x6005, /* Fn key pressed? E420 */
199 TP_HKEY_EV_KEY_FN_ESC = 0x6060, /* Fn+Esc key pressed X240 */
199 200
200 /* Thermal events */ 201 /* Thermal events */
201 TP_HKEY_EV_ALARM_BAT_HOT = 0x6011, /* battery too hot */ 202 TP_HKEY_EV_ALARM_BAT_HOT = 0x6011, /* battery too hot */
@@ -3456,7 +3457,7 @@ enum ADAPTIVE_KEY_MODE {
3456 LAYFLAT_MODE 3457 LAYFLAT_MODE
3457}; 3458};
3458 3459
3459const int adaptive_keyboard_modes[] = { 3460static const int adaptive_keyboard_modes[] = {
3460 HOME_MODE, 3461 HOME_MODE,
3461/* WEB_BROWSER_MODE = 2, 3462/* WEB_BROWSER_MODE = 2,
3462 WEB_CONFERENCE_MODE = 3, */ 3463 WEB_CONFERENCE_MODE = 3, */
@@ -3712,6 +3713,7 @@ static bool hotkey_notify_6xxx(const u32 hkey,
3712 3713
3713 case TP_HKEY_EV_KEY_NUMLOCK: 3714 case TP_HKEY_EV_KEY_NUMLOCK:
3714 case TP_HKEY_EV_KEY_FN: 3715 case TP_HKEY_EV_KEY_FN:
3716 case TP_HKEY_EV_KEY_FN_ESC:
3715 /* key press events, we just ignore them as long as the EC 3717 /* key press events, we just ignore them as long as the EC
3716 * is still reporting them in the normal keyboard stream */ 3718 * is still reporting them in the normal keyboard stream */
3717 *send_acpi_ev = false; 3719 *send_acpi_ev = false;
@@ -8883,17 +8885,31 @@ static bool __pure __init tpacpi_is_fw_digit(const char c)
8883 return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z'); 8885 return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z');
8884} 8886}
8885 8887
8886/* Most models: xxyTkkWW (#.##c); Ancient 570/600 and -SL lacks (#.##c) */
8887static bool __pure __init tpacpi_is_valid_fw_id(const char * const s, 8888static bool __pure __init tpacpi_is_valid_fw_id(const char * const s,
8888 const char t) 8889 const char t)
8889{ 8890{
8890 return s && strlen(s) >= 8 && 8891 /*
8892 * Most models: xxyTkkWW (#.##c)
8893 * Ancient 570/600 and -SL lacks (#.##c)
8894 */
8895 if (s && strlen(s) >= 8 &&
8891 tpacpi_is_fw_digit(s[0]) && 8896 tpacpi_is_fw_digit(s[0]) &&
8892 tpacpi_is_fw_digit(s[1]) && 8897 tpacpi_is_fw_digit(s[1]) &&
8893 s[2] == t && 8898 s[2] == t &&
8894 (s[3] == 'T' || s[3] == 'N') && 8899 (s[3] == 'T' || s[3] == 'N') &&
8895 tpacpi_is_fw_digit(s[4]) && 8900 tpacpi_is_fw_digit(s[4]) &&
8896 tpacpi_is_fw_digit(s[5]); 8901 tpacpi_is_fw_digit(s[5]))
8902 return true;
8903
8904 /* New models: xxxyTkkW (#.##c); T550 and some others */
8905 return s && strlen(s) >= 8 &&
8906 tpacpi_is_fw_digit(s[0]) &&
8907 tpacpi_is_fw_digit(s[1]) &&
8908 tpacpi_is_fw_digit(s[2]) &&
8909 s[3] == t &&
8910 (s[4] == 'T' || s[4] == 'N') &&
8911 tpacpi_is_fw_digit(s[5]) &&
8912 tpacpi_is_fw_digit(s[6]);
8897} 8913}
8898 8914
8899/* returns 0 - probe ok, or < 0 - probe error. 8915/* returns 0 - probe ok, or < 0 - probe error.
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index fc34a71866ed..dbcb7a8915b8 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1,11 +1,10 @@
1/* 1/*
2 * toshiba_acpi.c - Toshiba Laptop ACPI Extras 2 * toshiba_acpi.c - Toshiba Laptop ACPI Extras
3 * 3 *
4 *
5 * Copyright (C) 2002-2004 John Belmonte 4 * Copyright (C) 2002-2004 John Belmonte
6 * Copyright (C) 2008 Philip Langdale 5 * Copyright (C) 2008 Philip Langdale
7 * Copyright (C) 2010 Pierre Ducroquet 6 * Copyright (C) 2010 Pierre Ducroquet
8 * Copyright (C) 2014 Azael Avalos 7 * Copyright (C) 2014-2015 Azael Avalos
9 * 8 *
10 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -17,10 +16,8 @@
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 17 * GNU General Public License for more details.
19 * 18 *
20 * You should have received a copy of the GNU General Public License 19 * The full GNU General Public License is included in this distribution in
21 * along with this program; if not, write to the Free Software 20 * the file called "COPYING".
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * 21 *
25 * The devolpment page for this driver is located at 22 * The devolpment page for this driver is located at
26 * http://memebeam.org/toys/ToshibaAcpiDriver. 23 * http://memebeam.org/toys/ToshibaAcpiDriver.
@@ -30,15 +27,11 @@
30 * engineering the Windows drivers 27 * engineering the Windows drivers
31 * Yasushi Nagato - changes for linux kernel 2.4 -> 2.5 28 * Yasushi Nagato - changes for linux kernel 2.4 -> 2.5
32 * Rob Miller - TV out and hotkeys help 29 * Rob Miller - TV out and hotkeys help
33 *
34 *
35 * TODO
36 *
37 */ 30 */
38 31
39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 33
41#define TOSHIBA_ACPI_VERSION "0.20" 34#define TOSHIBA_ACPI_VERSION "0.21"
42#define PROC_INTERFACE_VERSION 1 35#define PROC_INTERFACE_VERSION 1
43 36
44#include <linux/kernel.h> 37#include <linux/kernel.h>
@@ -57,7 +50,7 @@
57#include <linux/i8042.h> 50#include <linux/i8042.h>
58#include <linux/acpi.h> 51#include <linux/acpi.h>
59#include <linux/dmi.h> 52#include <linux/dmi.h>
60#include <asm/uaccess.h> 53#include <linux/uaccess.h>
61 54
62MODULE_AUTHOR("John Belmonte"); 55MODULE_AUTHOR("John Belmonte");
63MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver"); 56MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver");
@@ -71,7 +64,8 @@ MODULE_LICENSE("GPL");
71/* Toshiba ACPI method paths */ 64/* Toshiba ACPI method paths */
72#define METHOD_VIDEO_OUT "\\_SB_.VALX.DSSX" 65#define METHOD_VIDEO_OUT "\\_SB_.VALX.DSSX"
73 66
74/* The Toshiba configuration interface is composed of the HCI and the SCI, 67/*
68 * The Toshiba configuration interface is composed of the HCI and the SCI,
75 * which are defined as follows: 69 * which are defined as follows:
76 * 70 *
77 * HCI is Toshiba's "Hardware Control Interface" which is supposed to 71 * HCI is Toshiba's "Hardware Control Interface" which is supposed to
@@ -108,6 +102,7 @@ MODULE_LICENSE("GPL");
108#define TOS_FIFO_EMPTY 0x8c00 102#define TOS_FIFO_EMPTY 0x8c00
109#define TOS_DATA_NOT_AVAILABLE 0x8d20 103#define TOS_DATA_NOT_AVAILABLE 0x8d20
110#define TOS_NOT_INITIALIZED 0x8d50 104#define TOS_NOT_INITIALIZED 0x8d50
105#define TOS_NOT_INSTALLED 0x8e00
111 106
112/* registers */ 107/* registers */
113#define HCI_FAN 0x0004 108#define HCI_FAN 0x0004
@@ -121,9 +116,14 @@ MODULE_LICENSE("GPL");
121#define HCI_KBD_ILLUMINATION 0x0095 116#define HCI_KBD_ILLUMINATION 0x0095
122#define HCI_ECO_MODE 0x0097 117#define HCI_ECO_MODE 0x0097
123#define HCI_ACCELEROMETER2 0x00a6 118#define HCI_ACCELEROMETER2 0x00a6
119#define SCI_PANEL_POWER_ON 0x010d
124#define SCI_ILLUMINATION 0x014e 120#define SCI_ILLUMINATION 0x014e
121#define SCI_USB_SLEEP_CHARGE 0x0150
125#define SCI_KBD_ILLUM_STATUS 0x015c 122#define SCI_KBD_ILLUM_STATUS 0x015c
123#define SCI_USB_SLEEP_MUSIC 0x015e
124#define SCI_USB_THREE 0x0169
126#define SCI_TOUCHPAD 0x050e 125#define SCI_TOUCHPAD 0x050e
126#define SCI_KBD_FUNCTION_KEYS 0x0522
127 127
128/* field definitions */ 128/* field definitions */
129#define HCI_ACCEL_MASK 0x7fff 129#define HCI_ACCEL_MASK 0x7fff
@@ -146,6 +146,15 @@ MODULE_LICENSE("GPL");
146#define SCI_KBD_MODE_ON 0x8 146#define SCI_KBD_MODE_ON 0x8
147#define SCI_KBD_MODE_OFF 0x10 147#define SCI_KBD_MODE_OFF 0x10
148#define SCI_KBD_TIME_MAX 0x3c001a 148#define SCI_KBD_TIME_MAX 0x3c001a
149#define SCI_USB_CHARGE_MODE_MASK 0xff
150#define SCI_USB_CHARGE_DISABLED 0x30000
151#define SCI_USB_CHARGE_ALTERNATE 0x30009
152#define SCI_USB_CHARGE_AUTO 0x30021
153#define SCI_USB_CHARGE_BAT_MASK 0x7
154#define SCI_USB_CHARGE_BAT_LVL_OFF 0x1
155#define SCI_USB_CHARGE_BAT_LVL_ON 0x4
156#define SCI_USB_CHARGE_BAT_LVL 0x0200
157#define SCI_USB_CHARGE_RAPID_DSP 0x0300
149 158
150struct toshiba_acpi_dev { 159struct toshiba_acpi_dev {
151 struct acpi_device *acpi_dev; 160 struct acpi_device *acpi_dev;
@@ -164,6 +173,7 @@ struct toshiba_acpi_dev {
164 int kbd_type; 173 int kbd_type;
165 int kbd_mode; 174 int kbd_mode;
166 int kbd_time; 175 int kbd_time;
176 int usbsc_bat_level;
167 177
168 unsigned int illumination_supported:1; 178 unsigned int illumination_supported:1;
169 unsigned int video_supported:1; 179 unsigned int video_supported:1;
@@ -177,6 +187,12 @@ struct toshiba_acpi_dev {
177 unsigned int touchpad_supported:1; 187 unsigned int touchpad_supported:1;
178 unsigned int eco_supported:1; 188 unsigned int eco_supported:1;
179 unsigned int accelerometer_supported:1; 189 unsigned int accelerometer_supported:1;
190 unsigned int usb_sleep_charge_supported:1;
191 unsigned int usb_rapid_charge_supported:1;
192 unsigned int usb_sleep_music_supported:1;
193 unsigned int kbd_function_keys_supported:1;
194 unsigned int panel_power_on_supported:1;
195 unsigned int usb_three_supported:1;
180 unsigned int sysfs_created:1; 196 unsigned int sysfs_created:1;
181 197
182 struct mutex mutex; 198 struct mutex mutex;
@@ -264,15 +280,17 @@ static const struct key_entry toshiba_acpi_alt_keymap[] = {
264 { KE_END, 0 }, 280 { KE_END, 0 },
265}; 281};
266 282
267/* utility 283/*
284 * Utility
268 */ 285 */
269 286
270static __inline__ void _set_bit(u32 * word, u32 mask, int value) 287static inline void _set_bit(u32 *word, u32 mask, int value)
271{ 288{
272 *word = (*word & ~mask) | (mask * value); 289 *word = (*word & ~mask) | (mask * value);
273} 290}
274 291
275/* acpi interface wrappers 292/*
293 * ACPI interface wrappers
276 */ 294 */
277 295
278static int write_acpi_int(const char *methodName, int val) 296static int write_acpi_int(const char *methodName, int val)
@@ -283,7 +301,8 @@ static int write_acpi_int(const char *methodName, int val)
283 return (status == AE_OK) ? 0 : -EIO; 301 return (status == AE_OK) ? 0 : -EIO;
284} 302}
285 303
286/* Perform a raw configuration call. Here we don't care about input or output 304/*
305 * Perform a raw configuration call. Here we don't care about input or output
287 * buffer format. 306 * buffer format.
288 */ 307 */
289static acpi_status tci_raw(struct toshiba_acpi_dev *dev, 308static acpi_status tci_raw(struct toshiba_acpi_dev *dev,
@@ -310,15 +329,15 @@ static acpi_status tci_raw(struct toshiba_acpi_dev *dev,
310 (char *)dev->method_hci, &params, 329 (char *)dev->method_hci, &params,
311 &results); 330 &results);
312 if ((status == AE_OK) && (out_objs->package.count <= TCI_WORDS)) { 331 if ((status == AE_OK) && (out_objs->package.count <= TCI_WORDS)) {
313 for (i = 0; i < out_objs->package.count; ++i) { 332 for (i = 0; i < out_objs->package.count; ++i)
314 out[i] = out_objs->package.elements[i].integer.value; 333 out[i] = out_objs->package.elements[i].integer.value;
315 }
316 } 334 }
317 335
318 return status; 336 return status;
319} 337}
320 338
321/* common hci tasks (get or set one or two value) 339/*
340 * Common hci tasks (get or set one or two value)
322 * 341 *
323 * In addition to the ACPI status, the HCI system returns a result which 342 * In addition to the ACPI status, the HCI system returns a result which
324 * may be useful (such as "not supported"). 343 * may be useful (such as "not supported").
@@ -338,6 +357,7 @@ static u32 hci_read1(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1)
338 u32 in[TCI_WORDS] = { HCI_GET, reg, 0, 0, 0, 0 }; 357 u32 in[TCI_WORDS] = { HCI_GET, reg, 0, 0, 0, 0 };
339 u32 out[TCI_WORDS]; 358 u32 out[TCI_WORDS];
340 acpi_status status = tci_raw(dev, in, out); 359 acpi_status status = tci_raw(dev, in, out);
360
341 if (ACPI_FAILURE(status)) 361 if (ACPI_FAILURE(status))
342 return TOS_FAILURE; 362 return TOS_FAILURE;
343 363
@@ -355,11 +375,13 @@ static u32 hci_write2(struct toshiba_acpi_dev *dev, u32 reg, u32 in1, u32 in2)
355 return ACPI_SUCCESS(status) ? out[0] : TOS_FAILURE; 375 return ACPI_SUCCESS(status) ? out[0] : TOS_FAILURE;
356} 376}
357 377
358static u32 hci_read2(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1, u32 *out2) 378static u32 hci_read2(struct toshiba_acpi_dev *dev,
379 u32 reg, u32 *out1, u32 *out2)
359{ 380{
360 u32 in[TCI_WORDS] = { HCI_GET, reg, *out1, *out2, 0, 0 }; 381 u32 in[TCI_WORDS] = { HCI_GET, reg, *out1, *out2, 0, 0 };
361 u32 out[TCI_WORDS]; 382 u32 out[TCI_WORDS];
362 acpi_status status = tci_raw(dev, in, out); 383 acpi_status status = tci_raw(dev, in, out);
384
363 if (ACPI_FAILURE(status)) 385 if (ACPI_FAILURE(status))
364 return TOS_FAILURE; 386 return TOS_FAILURE;
365 387
@@ -369,7 +391,8 @@ static u32 hci_read2(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1, u32 *out2
369 return out[0]; 391 return out[0];
370} 392}
371 393
372/* common sci tasks 394/*
395 * Common sci tasks
373 */ 396 */
374 397
375static int sci_open(struct toshiba_acpi_dev *dev) 398static int sci_open(struct toshiba_acpi_dev *dev)
@@ -389,6 +412,20 @@ static int sci_open(struct toshiba_acpi_dev *dev)
389 } else if (out[0] == TOS_ALREADY_OPEN) { 412 } else if (out[0] == TOS_ALREADY_OPEN) {
390 pr_info("Toshiba SCI already opened\n"); 413 pr_info("Toshiba SCI already opened\n");
391 return 1; 414 return 1;
415 } else if (out[0] == TOS_NOT_SUPPORTED) {
416 /*
417 * Some BIOSes do not have the SCI open/close functions
418 * implemented and return 0x8000 (Not Supported), failing to
419 * register some supported features.
420 *
421 * Simply return 1 if we hit those affected laptops to make the
422 * supported features work.
423 *
424 * In the case that some laptops really do not support the SCI,
425 * all the SCI dependent functions check for TOS_NOT_SUPPORTED,
426 * and thus, not registering support for the queried feature.
427 */
428 return 1;
392 } else if (out[0] == TOS_NOT_PRESENT) { 429 } else if (out[0] == TOS_NOT_PRESENT) {
393 pr_info("Toshiba SCI is not present\n"); 430 pr_info("Toshiba SCI is not present\n");
394 } 431 }
@@ -421,6 +458,7 @@ static u32 sci_read(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1)
421 u32 in[TCI_WORDS] = { SCI_GET, reg, 0, 0, 0, 0 }; 458 u32 in[TCI_WORDS] = { SCI_GET, reg, 0, 0, 0, 0 };
422 u32 out[TCI_WORDS]; 459 u32 out[TCI_WORDS];
423 acpi_status status = tci_raw(dev, in, out); 460 acpi_status status = tci_raw(dev, in, out);
461
424 if (ACPI_FAILURE(status)) 462 if (ACPI_FAILURE(status))
425 return TOS_FAILURE; 463 return TOS_FAILURE;
426 464
@@ -529,10 +567,11 @@ static int toshiba_kbd_illum_available(struct toshiba_acpi_dev *dev)
529 return 0; 567 return 0;
530 } 568 }
531 569
532 /* Check for keyboard backlight timeout max value, 570 /*
571 * Check for keyboard backlight timeout max value,
533 * previous kbd backlight implementation set this to 572 * previous kbd backlight implementation set this to
534 * 0x3c0003, and now the new implementation set this 573 * 0x3c0003, and now the new implementation set this
535 * to 0x3c001a, use this to distinguish between them 574 * to 0x3c001a, use this to distinguish between them.
536 */ 575 */
537 if (out[3] == SCI_KBD_TIME_MAX) 576 if (out[3] == SCI_KBD_TIME_MAX)
538 dev->kbd_type = 2; 577 dev->kbd_type = 2;
@@ -667,19 +706,37 @@ static int toshiba_touchpad_get(struct toshiba_acpi_dev *dev, u32 *state)
667static int toshiba_eco_mode_available(struct toshiba_acpi_dev *dev) 706static int toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
668{ 707{
669 acpi_status status; 708 acpi_status status;
670 u32 in[TCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 1, 0, 0 }; 709 u32 in[TCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 0, 0, 0 };
671 u32 out[TCI_WORDS]; 710 u32 out[TCI_WORDS];
672 711
673 status = tci_raw(dev, in, out); 712 status = tci_raw(dev, in, out);
674 if (ACPI_FAILURE(status) || out[0] == TOS_INPUT_DATA_ERROR) { 713 if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
675 pr_info("ACPI call to get ECO led failed\n"); 714 pr_err("ACPI call to get ECO led failed\n");
676 return 0; 715 } else if (out[0] == TOS_NOT_INSTALLED) {
716 pr_info("ECO led not installed");
717 } else if (out[0] == TOS_INPUT_DATA_ERROR) {
718 /*
719 * If we receive 0x8300 (Input Data Error), it means that the
720 * LED device is present, but that we just screwed the input
721 * parameters.
722 *
723 * Let's query the status of the LED to see if we really have a
724 * success response, indicating the actual presense of the LED,
725 * bail out otherwise.
726 */
727 in[3] = 1;
728 status = tci_raw(dev, in, out);
729 if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE)
730 pr_err("ACPI call to get ECO led failed\n");
731 else if (out[0] == TOS_SUCCESS)
732 return 1;
677 } 733 }
678 734
679 return 1; 735 return 0;
680} 736}
681 737
682static enum led_brightness toshiba_eco_mode_get_status(struct led_classdev *cdev) 738static enum led_brightness
739toshiba_eco_mode_get_status(struct led_classdev *cdev)
683{ 740{
684 struct toshiba_acpi_dev *dev = container_of(cdev, 741 struct toshiba_acpi_dev *dev = container_of(cdev,
685 struct toshiba_acpi_dev, eco_led); 742 struct toshiba_acpi_dev, eco_led);
@@ -721,7 +778,8 @@ static int toshiba_accelerometer_supported(struct toshiba_acpi_dev *dev)
721 u32 out[TCI_WORDS]; 778 u32 out[TCI_WORDS];
722 acpi_status status; 779 acpi_status status;
723 780
724 /* Check if the accelerometer call exists, 781 /*
782 * Check if the accelerometer call exists,
725 * this call also serves as initialization 783 * this call also serves as initialization
726 */ 784 */
727 status = tci_raw(dev, in, out); 785 status = tci_raw(dev, in, out);
@@ -760,6 +818,337 @@ static int toshiba_accelerometer_get(struct toshiba_acpi_dev *dev,
760 return 0; 818 return 0;
761} 819}
762 820
821/* Sleep (Charge and Music) utilities support */
822static int toshiba_usb_sleep_charge_get(struct toshiba_acpi_dev *dev,
823 u32 *mode)
824{
825 u32 result;
826
827 if (!sci_open(dev))
828 return -EIO;
829
830 result = sci_read(dev, SCI_USB_SLEEP_CHARGE, mode);
831 sci_close(dev);
832 if (result == TOS_FAILURE) {
833 pr_err("ACPI call to set USB S&C mode failed\n");
834 return -EIO;
835 } else if (result == TOS_NOT_SUPPORTED) {
836 pr_info("USB Sleep and Charge not supported\n");
837 return -ENODEV;
838 } else if (result == TOS_INPUT_DATA_ERROR) {
839 return -EIO;
840 }
841
842 return 0;
843}
844
845static int toshiba_usb_sleep_charge_set(struct toshiba_acpi_dev *dev,
846 u32 mode)
847{
848 u32 result;
849
850 if (!sci_open(dev))
851 return -EIO;
852
853 result = sci_write(dev, SCI_USB_SLEEP_CHARGE, mode);
854 sci_close(dev);
855 if (result == TOS_FAILURE) {
856 pr_err("ACPI call to set USB S&C mode failed\n");
857 return -EIO;
858 } else if (result == TOS_NOT_SUPPORTED) {
859 pr_info("USB Sleep and Charge not supported\n");
860 return -ENODEV;
861 } else if (result == TOS_INPUT_DATA_ERROR) {
862 return -EIO;
863 }
864
865 return 0;
866}
867
868static int toshiba_sleep_functions_status_get(struct toshiba_acpi_dev *dev,
869 u32 *mode)
870{
871 u32 in[TCI_WORDS] = { SCI_GET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
872 u32 out[TCI_WORDS];
873 acpi_status status;
874
875 if (!sci_open(dev))
876 return -EIO;
877
878 in[5] = SCI_USB_CHARGE_BAT_LVL;
879 status = tci_raw(dev, in, out);
880 sci_close(dev);
881 if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
882 pr_err("ACPI call to get USB S&C battery level failed\n");
883 return -EIO;
884 } else if (out[0] == TOS_NOT_SUPPORTED) {
885 pr_info("USB Sleep and Charge not supported\n");
886 return -ENODEV;
887 } else if (out[0] == TOS_INPUT_DATA_ERROR) {
888 return -EIO;
889 }
890
891 *mode = out[2];
892
893 return 0;
894}
895
896static int toshiba_sleep_functions_status_set(struct toshiba_acpi_dev *dev,
897 u32 mode)
898{
899 u32 in[TCI_WORDS] = { SCI_SET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
900 u32 out[TCI_WORDS];
901 acpi_status status;
902
903 if (!sci_open(dev))
904 return -EIO;
905
906 in[2] = mode;
907 in[5] = SCI_USB_CHARGE_BAT_LVL;
908 status = tci_raw(dev, in, out);
909 sci_close(dev);
910 if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
911 pr_err("ACPI call to set USB S&C battery level failed\n");
912 return -EIO;
913 } else if (out[0] == TOS_NOT_SUPPORTED) {
914 pr_info("USB Sleep and Charge not supported\n");
915 return -ENODEV;
916 } else if (out[0] == TOS_INPUT_DATA_ERROR) {
917 return -EIO;
918 }
919
920 return 0;
921}
922
923static int toshiba_usb_rapid_charge_get(struct toshiba_acpi_dev *dev,
924 u32 *state)
925{
926 u32 in[TCI_WORDS] = { SCI_GET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
927 u32 out[TCI_WORDS];
928 acpi_status status;
929
930 if (!sci_open(dev))
931 return -EIO;
932
933 in[5] = SCI_USB_CHARGE_RAPID_DSP;
934 status = tci_raw(dev, in, out);
935 sci_close(dev);
936 if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
937 pr_err("ACPI call to get USB S&C battery level failed\n");
938 return -EIO;
939 } else if (out[0] == TOS_NOT_SUPPORTED ||
940 out[0] == TOS_INPUT_DATA_ERROR) {
941 pr_info("USB Sleep and Charge not supported\n");
942 return -ENODEV;
943 }
944
945 *state = out[2];
946
947 return 0;
948}
949
950static int toshiba_usb_rapid_charge_set(struct toshiba_acpi_dev *dev,
951 u32 state)
952{
953 u32 in[TCI_WORDS] = { SCI_SET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
954 u32 out[TCI_WORDS];
955 acpi_status status;
956
957 if (!sci_open(dev))
958 return -EIO;
959
960 in[2] = state;
961 in[5] = SCI_USB_CHARGE_RAPID_DSP;
962 status = tci_raw(dev, in, out);
963 sci_close(dev);
964 if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
965 pr_err("ACPI call to set USB S&C battery level failed\n");
966 return -EIO;
967 } else if (out[0] == TOS_NOT_SUPPORTED) {
968 pr_info("USB Sleep and Charge not supported\n");
969 return -ENODEV;
970 } else if (out[0] == TOS_INPUT_DATA_ERROR) {
971 return -EIO;
972 }
973
974 return 0;
975}
976
977static int toshiba_usb_sleep_music_get(struct toshiba_acpi_dev *dev, u32 *state)
978{
979 u32 result;
980
981 if (!sci_open(dev))
982 return -EIO;
983
984 result = sci_read(dev, SCI_USB_SLEEP_MUSIC, state);
985 sci_close(dev);
986 if (result == TOS_FAILURE) {
987 pr_err("ACPI call to set USB S&C mode failed\n");
988 return -EIO;
989 } else if (result == TOS_NOT_SUPPORTED) {
990 pr_info("USB Sleep and Charge not supported\n");
991 return -ENODEV;
992 } else if (result == TOS_INPUT_DATA_ERROR) {
993 return -EIO;
994 }
995
996 return 0;
997}
998
999static int toshiba_usb_sleep_music_set(struct toshiba_acpi_dev *dev, u32 state)
1000{
1001 u32 result;
1002
1003 if (!sci_open(dev))
1004 return -EIO;
1005
1006 result = sci_write(dev, SCI_USB_SLEEP_MUSIC, state);
1007 sci_close(dev);
1008 if (result == TOS_FAILURE) {
1009 pr_err("ACPI call to set USB S&C mode failed\n");
1010 return -EIO;
1011 } else if (result == TOS_NOT_SUPPORTED) {
1012 pr_info("USB Sleep and Charge not supported\n");
1013 return -ENODEV;
1014 } else if (result == TOS_INPUT_DATA_ERROR) {
1015 return -EIO;
1016 }
1017
1018 return 0;
1019}
1020
1021/* Keyboard function keys */
1022static int toshiba_function_keys_get(struct toshiba_acpi_dev *dev, u32 *mode)
1023{
1024 u32 result;
1025
1026 if (!sci_open(dev))
1027 return -EIO;
1028
1029 result = sci_read(dev, SCI_KBD_FUNCTION_KEYS, mode);
1030 sci_close(dev);
1031 if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
1032 pr_err("ACPI call to get KBD function keys failed\n");
1033 return -EIO;
1034 } else if (result == TOS_NOT_SUPPORTED) {
1035 pr_info("KBD function keys not supported\n");
1036 return -ENODEV;
1037 }
1038
1039 return 0;
1040}
1041
1042static int toshiba_function_keys_set(struct toshiba_acpi_dev *dev, u32 mode)
1043{
1044 u32 result;
1045
1046 if (!sci_open(dev))
1047 return -EIO;
1048
1049 result = sci_write(dev, SCI_KBD_FUNCTION_KEYS, mode);
1050 sci_close(dev);
1051 if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
1052 pr_err("ACPI call to set KBD function keys failed\n");
1053 return -EIO;
1054 } else if (result == TOS_NOT_SUPPORTED) {
1055 pr_info("KBD function keys not supported\n");
1056 return -ENODEV;
1057 }
1058
1059 return 0;
1060}
1061
1062/* Panel Power ON */
1063static int toshiba_panel_power_on_get(struct toshiba_acpi_dev *dev, u32 *state)
1064{
1065 u32 result;
1066
1067 if (!sci_open(dev))
1068 return -EIO;
1069
1070 result = sci_read(dev, SCI_PANEL_POWER_ON, state);
1071 sci_close(dev);
1072 if (result == TOS_FAILURE) {
1073 pr_err("ACPI call to get Panel Power ON failed\n");
1074 return -EIO;
1075 } else if (result == TOS_NOT_SUPPORTED) {
1076 pr_info("Panel Power on not supported\n");
1077 return -ENODEV;
1078 } else if (result == TOS_INPUT_DATA_ERROR) {
1079 return -EIO;
1080 }
1081
1082 return 0;
1083}
1084
1085static int toshiba_panel_power_on_set(struct toshiba_acpi_dev *dev, u32 state)
1086{
1087 u32 result;
1088
1089 if (!sci_open(dev))
1090 return -EIO;
1091
1092 result = sci_write(dev, SCI_PANEL_POWER_ON, state);
1093 sci_close(dev);
1094 if (result == TOS_FAILURE) {
1095 pr_err("ACPI call to set Panel Power ON failed\n");
1096 return -EIO;
1097 } else if (result == TOS_NOT_SUPPORTED) {
1098 pr_info("Panel Power ON not supported\n");
1099 return -ENODEV;
1100 } else if (result == TOS_INPUT_DATA_ERROR) {
1101 return -EIO;
1102 }
1103
1104 return 0;
1105}
1106
1107/* USB Three */
1108static int toshiba_usb_three_get(struct toshiba_acpi_dev *dev, u32 *state)
1109{
1110 u32 result;
1111
1112 if (!sci_open(dev))
1113 return -EIO;
1114
1115 result = sci_read(dev, SCI_USB_THREE, state);
1116 sci_close(dev);
1117 if (result == TOS_FAILURE) {
1118 pr_err("ACPI call to get USB 3 failed\n");
1119 return -EIO;
1120 } else if (result == TOS_NOT_SUPPORTED) {
1121 pr_info("USB 3 not supported\n");
1122 return -ENODEV;
1123 } else if (result == TOS_INPUT_DATA_ERROR) {
1124 return -EIO;
1125 }
1126
1127 return 0;
1128}
1129
1130static int toshiba_usb_three_set(struct toshiba_acpi_dev *dev, u32 state)
1131{
1132 u32 result;
1133
1134 if (!sci_open(dev))
1135 return -EIO;
1136
1137 result = sci_write(dev, SCI_USB_THREE, state);
1138 sci_close(dev);
1139 if (result == TOS_FAILURE) {
1140 pr_err("ACPI call to set USB 3 failed\n");
1141 return -EIO;
1142 } else if (result == TOS_NOT_SUPPORTED) {
1143 pr_info("USB 3 not supported\n");
1144 return -ENODEV;
1145 } else if (result == TOS_INPUT_DATA_ERROR) {
1146 return -EIO;
1147 }
1148
1149 return 0;
1150}
1151
763/* Bluetooth rfkill handlers */ 1152/* Bluetooth rfkill handlers */
764 1153
765static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present) 1154static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present)
@@ -870,7 +1259,7 @@ static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable)
870 return hci_result == TOS_SUCCESS ? 0 : -EIO; 1259 return hci_result == TOS_SUCCESS ? 0 : -EIO;
871} 1260}
872 1261
873static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ; 1262static struct proc_dir_entry *toshiba_proc_dir /*= 0*/;
874 1263
875static int __get_lcd_brightness(struct toshiba_acpi_dev *dev) 1264static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
876{ 1265{
@@ -881,6 +1270,7 @@ static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
881 if (dev->tr_backlight_supported) { 1270 if (dev->tr_backlight_supported) {
882 bool enabled; 1271 bool enabled;
883 int ret = get_tr_backlight_status(dev, &enabled); 1272 int ret = get_tr_backlight_status(dev, &enabled);
1273
884 if (ret) 1274 if (ret)
885 return ret; 1275 return ret;
886 if (enabled) 1276 if (enabled)
@@ -898,6 +1288,7 @@ static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
898static int get_lcd_brightness(struct backlight_device *bd) 1288static int get_lcd_brightness(struct backlight_device *bd)
899{ 1289{
900 struct toshiba_acpi_dev *dev = bl_get_data(bd); 1290 struct toshiba_acpi_dev *dev = bl_get_data(bd);
1291
901 return __get_lcd_brightness(dev); 1292 return __get_lcd_brightness(dev);
902} 1293}
903 1294
@@ -934,6 +1325,7 @@ static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
934 if (dev->tr_backlight_supported) { 1325 if (dev->tr_backlight_supported) {
935 bool enable = !value; 1326 bool enable = !value;
936 int ret = set_tr_backlight_status(dev, enable); 1327 int ret = set_tr_backlight_status(dev, enable);
1328
937 if (ret) 1329 if (ret)
938 return ret; 1330 return ret;
939 if (value) 1331 if (value)
@@ -948,6 +1340,7 @@ static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
948static int set_lcd_status(struct backlight_device *bd) 1340static int set_lcd_status(struct backlight_device *bd)
949{ 1341{
950 struct toshiba_acpi_dev *dev = bl_get_data(bd); 1342 struct toshiba_acpi_dev *dev = bl_get_data(bd);
1343
951 return set_lcd_brightness(dev, bd->props.brightness); 1344 return set_lcd_brightness(dev, bd->props.brightness);
952} 1345}
953 1346
@@ -1005,6 +1398,7 @@ static int video_proc_show(struct seq_file *m, void *v)
1005 int is_lcd = (value & HCI_VIDEO_OUT_LCD) ? 1 : 0; 1398 int is_lcd = (value & HCI_VIDEO_OUT_LCD) ? 1 : 0;
1006 int is_crt = (value & HCI_VIDEO_OUT_CRT) ? 1 : 0; 1399 int is_crt = (value & HCI_VIDEO_OUT_CRT) ? 1 : 0;
1007 int is_tv = (value & HCI_VIDEO_OUT_TV) ? 1 : 0; 1400 int is_tv = (value & HCI_VIDEO_OUT_TV) ? 1 : 0;
1401
1008 seq_printf(m, "lcd_out: %d\n", is_lcd); 1402 seq_printf(m, "lcd_out: %d\n", is_lcd);
1009 seq_printf(m, "crt_out: %d\n", is_crt); 1403 seq_printf(m, "crt_out: %d\n", is_crt);
1010 seq_printf(m, "tv_out: %d\n", is_tv); 1404 seq_printf(m, "tv_out: %d\n", is_tv);
@@ -1042,9 +1436,9 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
1042 1436
1043 buffer = cmd; 1437 buffer = cmd;
1044 1438
1045 /* scan expression. Multiple expressions may be delimited with ; 1439 /*
1046 * 1440 * Scan expression. Multiple expressions may be delimited with ;
1047 * NOTE: to keep scanning simple, invalid fields are ignored 1441 * NOTE: To keep scanning simple, invalid fields are ignored.
1048 */ 1442 */
1049 while (remain) { 1443 while (remain) {
1050 if (sscanf(buffer, " lcd_out : %i", &value) == 1) 1444 if (sscanf(buffer, " lcd_out : %i", &value) == 1)
@@ -1053,12 +1447,11 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
1053 crt_out = value & 1; 1447 crt_out = value & 1;
1054 else if (sscanf(buffer, " tv_out : %i", &value) == 1) 1448 else if (sscanf(buffer, " tv_out : %i", &value) == 1)
1055 tv_out = value & 1; 1449 tv_out = value & 1;
1056 /* advance to one character past the next ; */ 1450 /* Advance to one character past the next ; */
1057 do { 1451 do {
1058 ++buffer; 1452 ++buffer;
1059 --remain; 1453 --remain;
1060 } 1454 } while (remain && *(buffer - 1) != ';');
1061 while (remain && *(buffer - 1) != ';');
1062 } 1455 }
1063 1456
1064 kfree(cmd); 1457 kfree(cmd);
@@ -1066,13 +1459,15 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
1066 ret = get_video_status(dev, &video_out); 1459 ret = get_video_status(dev, &video_out);
1067 if (!ret) { 1460 if (!ret) {
1068 unsigned int new_video_out = video_out; 1461 unsigned int new_video_out = video_out;
1462
1069 if (lcd_out != -1) 1463 if (lcd_out != -1)
1070 _set_bit(&new_video_out, HCI_VIDEO_OUT_LCD, lcd_out); 1464 _set_bit(&new_video_out, HCI_VIDEO_OUT_LCD, lcd_out);
1071 if (crt_out != -1) 1465 if (crt_out != -1)
1072 _set_bit(&new_video_out, HCI_VIDEO_OUT_CRT, crt_out); 1466 _set_bit(&new_video_out, HCI_VIDEO_OUT_CRT, crt_out);
1073 if (tv_out != -1) 1467 if (tv_out != -1)
1074 _set_bit(&new_video_out, HCI_VIDEO_OUT_TV, tv_out); 1468 _set_bit(&new_video_out, HCI_VIDEO_OUT_TV, tv_out);
1075 /* To avoid unnecessary video disruption, only write the new 1469 /*
1470 * To avoid unnecessary video disruption, only write the new
1076 * video setting if something changed. */ 1471 * video setting if something changed. */
1077 if (new_video_out != video_out) 1472 if (new_video_out != video_out)
1078 ret = write_acpi_int(METHOD_VIDEO_OUT, new_video_out); 1473 ret = write_acpi_int(METHOD_VIDEO_OUT, new_video_out);
@@ -1135,10 +1530,10 @@ static ssize_t fan_proc_write(struct file *file, const char __user *buf,
1135 if (sscanf(cmd, " force_on : %i", &value) == 1 && 1530 if (sscanf(cmd, " force_on : %i", &value) == 1 &&
1136 value >= 0 && value <= 1) { 1531 value >= 0 && value <= 1) {
1137 hci_result = hci_write1(dev, HCI_FAN, value); 1532 hci_result = hci_write1(dev, HCI_FAN, value);
1138 if (hci_result != TOS_SUCCESS) 1533 if (hci_result == TOS_SUCCESS)
1139 return -EIO;
1140 else
1141 dev->force_fan = value; 1534 dev->force_fan = value;
1535 else
1536 return -EIO;
1142 } else { 1537 } else {
1143 return -EINVAL; 1538 return -EINVAL;
1144 } 1539 }
@@ -1167,11 +1562,13 @@ static int keys_proc_show(struct seq_file *m, void *v)
1167 dev->key_event_valid = 1; 1562 dev->key_event_valid = 1;
1168 dev->last_key_event = value; 1563 dev->last_key_event = value;
1169 } else if (hci_result == TOS_FIFO_EMPTY) { 1564 } else if (hci_result == TOS_FIFO_EMPTY) {
1170 /* better luck next time */ 1565 /* Better luck next time */
1171 } else if (hci_result == TOS_NOT_SUPPORTED) { 1566 } else if (hci_result == TOS_NOT_SUPPORTED) {
1172 /* This is a workaround for an unresolved issue on 1567 /*
1568 * This is a workaround for an unresolved issue on
1173 * some machines where system events sporadically 1569 * some machines where system events sporadically
1174 * become disabled. */ 1570 * become disabled.
1571 */
1175 hci_result = hci_write1(dev, HCI_SYSTEM_EVENT, 1); 1572 hci_result = hci_write1(dev, HCI_SYSTEM_EVENT, 1);
1176 pr_notice("Re-enabled hotkeys\n"); 1573 pr_notice("Re-enabled hotkeys\n");
1177 } else { 1574 } else {
@@ -1203,11 +1600,10 @@ static ssize_t keys_proc_write(struct file *file, const char __user *buf,
1203 return -EFAULT; 1600 return -EFAULT;
1204 cmd[len] = '\0'; 1601 cmd[len] = '\0';
1205 1602
1206 if (sscanf(cmd, " hotkey_ready : %i", &value) == 1 && value == 0) { 1603 if (sscanf(cmd, " hotkey_ready : %i", &value) == 1 && value == 0)
1207 dev->key_event_valid = 0; 1604 dev->key_event_valid = 0;
1208 } else { 1605 else
1209 return -EINVAL; 1606 return -EINVAL;
1210 }
1211 1607
1212 return count; 1608 return count;
1213} 1609}
@@ -1241,7 +1637,8 @@ static const struct file_operations version_proc_fops = {
1241 .release = single_release, 1637 .release = single_release,
1242}; 1638};
1243 1639
1244/* proc and module init 1640/*
1641 * Proc and module init
1245 */ 1642 */
1246 1643
1247#define PROC_TOSHIBA "toshiba" 1644#define PROC_TOSHIBA "toshiba"
@@ -1286,66 +1683,56 @@ static const struct backlight_ops toshiba_backlight_data = {
1286/* 1683/*
1287 * Sysfs files 1684 * Sysfs files
1288 */ 1685 */
1289static ssize_t toshiba_kbd_bl_mode_store(struct device *dev, 1686static ssize_t version_show(struct device *dev,
1290 struct device_attribute *attr, 1687 struct device_attribute *attr, char *buf)
1291 const char *buf, size_t count); 1688{
1292static ssize_t toshiba_kbd_bl_mode_show(struct device *dev, 1689 return sprintf(buf, "%s\n", TOSHIBA_ACPI_VERSION);
1293 struct device_attribute *attr, 1690}
1294 char *buf); 1691static DEVICE_ATTR_RO(version);
1295static ssize_t toshiba_kbd_type_show(struct device *dev,
1296 struct device_attribute *attr,
1297 char *buf);
1298static ssize_t toshiba_available_kbd_modes_show(struct device *dev,
1299 struct device_attribute *attr,
1300 char *buf);
1301static ssize_t toshiba_kbd_bl_timeout_store(struct device *dev,
1302 struct device_attribute *attr,
1303 const char *buf, size_t count);
1304static ssize_t toshiba_kbd_bl_timeout_show(struct device *dev,
1305 struct device_attribute *attr,
1306 char *buf);
1307static ssize_t toshiba_touchpad_store(struct device *dev,
1308 struct device_attribute *attr,
1309 const char *buf, size_t count);
1310static ssize_t toshiba_touchpad_show(struct device *dev,
1311 struct device_attribute *attr,
1312 char *buf);
1313static ssize_t toshiba_position_show(struct device *dev,
1314 struct device_attribute *attr,
1315 char *buf);
1316
1317static DEVICE_ATTR(kbd_backlight_mode, S_IRUGO | S_IWUSR,
1318 toshiba_kbd_bl_mode_show, toshiba_kbd_bl_mode_store);
1319static DEVICE_ATTR(kbd_type, S_IRUGO, toshiba_kbd_type_show, NULL);
1320static DEVICE_ATTR(available_kbd_modes, S_IRUGO,
1321 toshiba_available_kbd_modes_show, NULL);
1322static DEVICE_ATTR(kbd_backlight_timeout, S_IRUGO | S_IWUSR,
1323 toshiba_kbd_bl_timeout_show, toshiba_kbd_bl_timeout_store);
1324static DEVICE_ATTR(touchpad, S_IRUGO | S_IWUSR,
1325 toshiba_touchpad_show, toshiba_touchpad_store);
1326static DEVICE_ATTR(position, S_IRUGO, toshiba_position_show, NULL);
1327 1692
1328static struct attribute *toshiba_attributes[] = { 1693static ssize_t fan_store(struct device *dev,
1329 &dev_attr_kbd_backlight_mode.attr, 1694 struct device_attribute *attr,
1330 &dev_attr_kbd_type.attr, 1695 const char *buf, size_t count)
1331 &dev_attr_available_kbd_modes.attr, 1696{
1332 &dev_attr_kbd_backlight_timeout.attr, 1697 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1333 &dev_attr_touchpad.attr, 1698 u32 result;
1334 &dev_attr_position.attr, 1699 int state;
1335 NULL, 1700 int ret;
1336};
1337 1701
1338static umode_t toshiba_sysfs_is_visible(struct kobject *, 1702 ret = kstrtoint(buf, 0, &state);
1339 struct attribute *, int); 1703 if (ret)
1704 return ret;
1340 1705
1341static struct attribute_group toshiba_attr_group = { 1706 if (state != 0 && state != 1)
1342 .is_visible = toshiba_sysfs_is_visible, 1707 return -EINVAL;
1343 .attrs = toshiba_attributes,
1344};
1345 1708
1346static ssize_t toshiba_kbd_bl_mode_store(struct device *dev, 1709 result = hci_write1(toshiba, HCI_FAN, state);
1347 struct device_attribute *attr, 1710 if (result == TOS_FAILURE)
1348 const char *buf, size_t count) 1711 return -EIO;
1712 else if (result == TOS_NOT_SUPPORTED)
1713 return -ENODEV;
1714
1715 return count;
1716}
1717
1718static ssize_t fan_show(struct device *dev,
1719 struct device_attribute *attr, char *buf)
1720{
1721 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1722 u32 value;
1723 int ret;
1724
1725 ret = get_fan_status(toshiba, &value);
1726 if (ret)
1727 return ret;
1728
1729 return sprintf(buf, "%d\n", value);
1730}
1731static DEVICE_ATTR_RW(fan);
1732
1733static ssize_t kbd_backlight_mode_store(struct device *dev,
1734 struct device_attribute *attr,
1735 const char *buf, size_t count)
1349{ 1736{
1350 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); 1737 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1351 int mode; 1738 int mode;
@@ -1369,7 +1756,8 @@ static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
1369 return -EINVAL; 1756 return -EINVAL;
1370 } 1757 }
1371 1758
1372 /* Set the Keyboard Backlight Mode where: 1759 /*
1760 * Set the Keyboard Backlight Mode where:
1373 * Auto - KBD backlight turns off automatically in given time 1761 * Auto - KBD backlight turns off automatically in given time
1374 * FN-Z - KBD backlight "toggles" when hotkey pressed 1762 * FN-Z - KBD backlight "toggles" when hotkey pressed
1375 * ON - KBD backlight is always on 1763 * ON - KBD backlight is always on
@@ -1400,9 +1788,9 @@ static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
1400 return count; 1788 return count;
1401} 1789}
1402 1790
1403static ssize_t toshiba_kbd_bl_mode_show(struct device *dev, 1791static ssize_t kbd_backlight_mode_show(struct device *dev,
1404 struct device_attribute *attr, 1792 struct device_attribute *attr,
1405 char *buf) 1793 char *buf)
1406{ 1794{
1407 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); 1795 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1408 u32 time; 1796 u32 time;
@@ -1412,19 +1800,20 @@ static ssize_t toshiba_kbd_bl_mode_show(struct device *dev,
1412 1800
1413 return sprintf(buf, "%i\n", time & SCI_KBD_MODE_MASK); 1801 return sprintf(buf, "%i\n", time & SCI_KBD_MODE_MASK);
1414} 1802}
1803static DEVICE_ATTR_RW(kbd_backlight_mode);
1415 1804
1416static ssize_t toshiba_kbd_type_show(struct device *dev, 1805static ssize_t kbd_type_show(struct device *dev,
1417 struct device_attribute *attr, 1806 struct device_attribute *attr, char *buf)
1418 char *buf)
1419{ 1807{
1420 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); 1808 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1421 1809
1422 return sprintf(buf, "%d\n", toshiba->kbd_type); 1810 return sprintf(buf, "%d\n", toshiba->kbd_type);
1423} 1811}
1812static DEVICE_ATTR_RO(kbd_type);
1424 1813
1425static ssize_t toshiba_available_kbd_modes_show(struct device *dev, 1814static ssize_t available_kbd_modes_show(struct device *dev,
1426 struct device_attribute *attr, 1815 struct device_attribute *attr,
1427 char *buf) 1816 char *buf)
1428{ 1817{
1429 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); 1818 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1430 1819
@@ -1435,10 +1824,11 @@ static ssize_t toshiba_available_kbd_modes_show(struct device *dev,
1435 return sprintf(buf, "%x %x %x\n", 1824 return sprintf(buf, "%x %x %x\n",
1436 SCI_KBD_MODE_AUTO, SCI_KBD_MODE_ON, SCI_KBD_MODE_OFF); 1825 SCI_KBD_MODE_AUTO, SCI_KBD_MODE_ON, SCI_KBD_MODE_OFF);
1437} 1826}
1827static DEVICE_ATTR_RO(available_kbd_modes);
1438 1828
1439static ssize_t toshiba_kbd_bl_timeout_store(struct device *dev, 1829static ssize_t kbd_backlight_timeout_store(struct device *dev,
1440 struct device_attribute *attr, 1830 struct device_attribute *attr,
1441 const char *buf, size_t count) 1831 const char *buf, size_t count)
1442{ 1832{
1443 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); 1833 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1444 int time; 1834 int time;
@@ -1479,9 +1869,9 @@ static ssize_t toshiba_kbd_bl_timeout_store(struct device *dev,
1479 return count; 1869 return count;
1480} 1870}
1481 1871
1482static ssize_t toshiba_kbd_bl_timeout_show(struct device *dev, 1872static ssize_t kbd_backlight_timeout_show(struct device *dev,
1483 struct device_attribute *attr, 1873 struct device_attribute *attr,
1484 char *buf) 1874 char *buf)
1485{ 1875{
1486 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); 1876 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1487 u32 time; 1877 u32 time;
@@ -1491,10 +1881,11 @@ static ssize_t toshiba_kbd_bl_timeout_show(struct device *dev,
1491 1881
1492 return sprintf(buf, "%i\n", time >> HCI_MISC_SHIFT); 1882 return sprintf(buf, "%i\n", time >> HCI_MISC_SHIFT);
1493} 1883}
1884static DEVICE_ATTR_RW(kbd_backlight_timeout);
1494 1885
1495static ssize_t toshiba_touchpad_store(struct device *dev, 1886static ssize_t touchpad_store(struct device *dev,
1496 struct device_attribute *attr, 1887 struct device_attribute *attr,
1497 const char *buf, size_t count) 1888 const char *buf, size_t count)
1498{ 1889{
1499 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); 1890 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1500 int state; 1891 int state;
@@ -1514,8 +1905,8 @@ static ssize_t toshiba_touchpad_store(struct device *dev,
1514 return count; 1905 return count;
1515} 1906}
1516 1907
1517static ssize_t toshiba_touchpad_show(struct device *dev, 1908static ssize_t touchpad_show(struct device *dev,
1518 struct device_attribute *attr, char *buf) 1909 struct device_attribute *attr, char *buf)
1519{ 1910{
1520 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); 1911 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1521 u32 state; 1912 u32 state;
@@ -1527,9 +1918,10 @@ static ssize_t toshiba_touchpad_show(struct device *dev,
1527 1918
1528 return sprintf(buf, "%i\n", state); 1919 return sprintf(buf, "%i\n", state);
1529} 1920}
1921static DEVICE_ATTR_RW(touchpad);
1530 1922
1531static ssize_t toshiba_position_show(struct device *dev, 1923static ssize_t position_show(struct device *dev,
1532 struct device_attribute *attr, char *buf) 1924 struct device_attribute *attr, char *buf)
1533{ 1925{
1534 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); 1926 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1535 u32 xyval, zval, tmp; 1927 u32 xyval, zval, tmp;
@@ -1548,6 +1940,336 @@ static ssize_t toshiba_position_show(struct device *dev,
1548 1940
1549 return sprintf(buf, "%d %d %d\n", x, y, z); 1941 return sprintf(buf, "%d %d %d\n", x, y, z);
1550} 1942}
1943static DEVICE_ATTR_RO(position);
1944
1945static ssize_t usb_sleep_charge_show(struct device *dev,
1946 struct device_attribute *attr, char *buf)
1947{
1948 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1949 u32 mode;
1950 int ret;
1951
1952 ret = toshiba_usb_sleep_charge_get(toshiba, &mode);
1953 if (ret < 0)
1954 return ret;
1955
1956 return sprintf(buf, "%x\n", mode & SCI_USB_CHARGE_MODE_MASK);
1957}
1958
1959static ssize_t usb_sleep_charge_store(struct device *dev,
1960 struct device_attribute *attr,
1961 const char *buf, size_t count)
1962{
1963 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
1964 u32 mode;
1965 int state;
1966 int ret;
1967
1968 ret = kstrtoint(buf, 0, &state);
1969 if (ret)
1970 return ret;
1971 /*
1972 * Check for supported values, where:
1973 * 0 - Disabled
1974 * 1 - Alternate (Non USB conformant devices that require more power)
1975 * 2 - Auto (USB conformant devices)
1976 */
1977 if (state != 0 && state != 1 && state != 2)
1978 return -EINVAL;
1979
1980 /* Set the USB charging mode to internal value */
1981 if (state == 0)
1982 mode = SCI_USB_CHARGE_DISABLED;
1983 else if (state == 1)
1984 mode = SCI_USB_CHARGE_ALTERNATE;
1985 else if (state == 2)
1986 mode = SCI_USB_CHARGE_AUTO;
1987
1988 ret = toshiba_usb_sleep_charge_set(toshiba, mode);
1989 if (ret)
1990 return ret;
1991
1992 return count;
1993}
1994static DEVICE_ATTR_RW(usb_sleep_charge);
1995
1996static ssize_t sleep_functions_on_battery_show(struct device *dev,
1997 struct device_attribute *attr,
1998 char *buf)
1999{
2000 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2001 u32 state;
2002 int bat_lvl;
2003 int status;
2004 int ret;
2005 int tmp;
2006
2007 ret = toshiba_sleep_functions_status_get(toshiba, &state);
2008 if (ret < 0)
2009 return ret;
2010
2011 /* Determine the status: 0x4 - Enabled | 0x1 - Disabled */
2012 tmp = state & SCI_USB_CHARGE_BAT_MASK;
2013 status = (tmp == 0x4) ? 1 : 0;
2014 /* Determine the battery level set */
2015 bat_lvl = state >> HCI_MISC_SHIFT;
2016
2017 return sprintf(buf, "%d %d\n", status, bat_lvl);
2018}
2019
2020static ssize_t sleep_functions_on_battery_store(struct device *dev,
2021 struct device_attribute *attr,
2022 const char *buf, size_t count)
2023{
2024 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2025 u32 status;
2026 int value;
2027 int ret;
2028 int tmp;
2029
2030 ret = kstrtoint(buf, 0, &value);
2031 if (ret)
2032 return ret;
2033
2034 /*
2035 * Set the status of the function:
2036 * 0 - Disabled
2037 * 1-100 - Enabled
2038 */
2039 if (value < 0 || value > 100)
2040 return -EINVAL;
2041
2042 if (value == 0) {
2043 tmp = toshiba->usbsc_bat_level << HCI_MISC_SHIFT;
2044 status = tmp | SCI_USB_CHARGE_BAT_LVL_OFF;
2045 } else {
2046 tmp = value << HCI_MISC_SHIFT;
2047 status = tmp | SCI_USB_CHARGE_BAT_LVL_ON;
2048 }
2049 ret = toshiba_sleep_functions_status_set(toshiba, status);
2050 if (ret < 0)
2051 return ret;
2052
2053 toshiba->usbsc_bat_level = status >> HCI_MISC_SHIFT;
2054
2055 return count;
2056}
2057static DEVICE_ATTR_RW(sleep_functions_on_battery);
2058
2059static ssize_t usb_rapid_charge_show(struct device *dev,
2060 struct device_attribute *attr, char *buf)
2061{
2062 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2063 u32 state;
2064 int ret;
2065
2066 ret = toshiba_usb_rapid_charge_get(toshiba, &state);
2067 if (ret < 0)
2068 return ret;
2069
2070 return sprintf(buf, "%d\n", state);
2071}
2072
2073static ssize_t usb_rapid_charge_store(struct device *dev,
2074 struct device_attribute *attr,
2075 const char *buf, size_t count)
2076{
2077 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2078 int state;
2079 int ret;
2080
2081 ret = kstrtoint(buf, 0, &state);
2082 if (ret)
2083 return ret;
2084 if (state != 0 && state != 1)
2085 return -EINVAL;
2086
2087 ret = toshiba_usb_rapid_charge_set(toshiba, state);
2088 if (ret)
2089 return ret;
2090
2091 return count;
2092}
2093static DEVICE_ATTR_RW(usb_rapid_charge);
2094
2095static ssize_t usb_sleep_music_show(struct device *dev,
2096 struct device_attribute *attr, char *buf)
2097{
2098 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2099 u32 state;
2100 int ret;
2101
2102 ret = toshiba_usb_sleep_music_get(toshiba, &state);
2103 if (ret < 0)
2104 return ret;
2105
2106 return sprintf(buf, "%d\n", state);
2107}
2108
2109static ssize_t usb_sleep_music_store(struct device *dev,
2110 struct device_attribute *attr,
2111 const char *buf, size_t count)
2112{
2113 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2114 int state;
2115 int ret;
2116
2117 ret = kstrtoint(buf, 0, &state);
2118 if (ret)
2119 return ret;
2120 if (state != 0 && state != 1)
2121 return -EINVAL;
2122
2123 ret = toshiba_usb_sleep_music_set(toshiba, state);
2124 if (ret)
2125 return ret;
2126
2127 return count;
2128}
2129static DEVICE_ATTR_RW(usb_sleep_music);
2130
2131static ssize_t kbd_function_keys_show(struct device *dev,
2132 struct device_attribute *attr, char *buf)
2133{
2134 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2135 int mode;
2136 int ret;
2137
2138 ret = toshiba_function_keys_get(toshiba, &mode);
2139 if (ret < 0)
2140 return ret;
2141
2142 return sprintf(buf, "%d\n", mode);
2143}
2144
2145static ssize_t kbd_function_keys_store(struct device *dev,
2146 struct device_attribute *attr,
2147 const char *buf, size_t count)
2148{
2149 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2150 int mode;
2151 int ret;
2152
2153 ret = kstrtoint(buf, 0, &mode);
2154 if (ret)
2155 return ret;
2156 /*
2157 * Check for the function keys mode where:
2158 * 0 - Normal operation (F{1-12} as usual and hotkeys via FN-F{1-12})
2159 * 1 - Special functions (Opposite of the above setting)
2160 */
2161 if (mode != 0 && mode != 1)
2162 return -EINVAL;
2163
2164 ret = toshiba_function_keys_set(toshiba, mode);
2165 if (ret)
2166 return ret;
2167
2168 pr_info("Reboot for changes to KBD Function Keys to take effect");
2169
2170 return count;
2171}
2172static DEVICE_ATTR_RW(kbd_function_keys);
2173
2174static ssize_t panel_power_on_show(struct device *dev,
2175 struct device_attribute *attr, char *buf)
2176{
2177 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2178 u32 state;
2179 int ret;
2180
2181 ret = toshiba_panel_power_on_get(toshiba, &state);
2182 if (ret < 0)
2183 return ret;
2184
2185 return sprintf(buf, "%d\n", state);
2186}
2187
2188static ssize_t panel_power_on_store(struct device *dev,
2189 struct device_attribute *attr,
2190 const char *buf, size_t count)
2191{
2192 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2193 int state;
2194 int ret;
2195
2196 ret = kstrtoint(buf, 0, &state);
2197 if (ret)
2198 return ret;
2199 if (state != 0 && state != 1)
2200 return -EINVAL;
2201
2202 ret = toshiba_panel_power_on_set(toshiba, state);
2203 if (ret)
2204 return ret;
2205
2206 pr_info("Reboot for changes to Panel Power ON to take effect");
2207
2208 return count;
2209}
2210static DEVICE_ATTR_RW(panel_power_on);
2211
2212static ssize_t usb_three_show(struct device *dev,
2213 struct device_attribute *attr, char *buf)
2214{
2215 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2216 u32 state;
2217 int ret;
2218
2219 ret = toshiba_usb_three_get(toshiba, &state);
2220 if (ret < 0)
2221 return ret;
2222
2223 return sprintf(buf, "%d\n", state);
2224}
2225
2226static ssize_t usb_three_store(struct device *dev,
2227 struct device_attribute *attr,
2228 const char *buf, size_t count)
2229{
2230 struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
2231 int state;
2232 int ret;
2233
2234 ret = kstrtoint(buf, 0, &state);
2235 if (ret)
2236 return ret;
2237 /*
2238 * Check for USB 3 mode where:
2239 * 0 - Disabled (Acts like a USB 2 port, saving power)
2240 * 1 - Enabled
2241 */
2242 if (state != 0 && state != 1)
2243 return -EINVAL;
2244
2245 ret = toshiba_usb_three_set(toshiba, state);
2246 if (ret)
2247 return ret;
2248
2249 pr_info("Reboot for changes to USB 3 to take effect");
2250
2251 return count;
2252}
2253static DEVICE_ATTR_RW(usb_three);
2254
2255static struct attribute *toshiba_attributes[] = {
2256 &dev_attr_version.attr,
2257 &dev_attr_fan.attr,
2258 &dev_attr_kbd_backlight_mode.attr,
2259 &dev_attr_kbd_type.attr,
2260 &dev_attr_available_kbd_modes.attr,
2261 &dev_attr_kbd_backlight_timeout.attr,
2262 &dev_attr_touchpad.attr,
2263 &dev_attr_position.attr,
2264 &dev_attr_usb_sleep_charge.attr,
2265 &dev_attr_sleep_functions_on_battery.attr,
2266 &dev_attr_usb_rapid_charge.attr,
2267 &dev_attr_usb_sleep_music.attr,
2268 &dev_attr_kbd_function_keys.attr,
2269 &dev_attr_panel_power_on.attr,
2270 &dev_attr_usb_three.attr,
2271 NULL,
2272};
1551 2273
1552static umode_t toshiba_sysfs_is_visible(struct kobject *kobj, 2274static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
1553 struct attribute *attr, int idx) 2275 struct attribute *attr, int idx)
@@ -1556,7 +2278,9 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
1556 struct toshiba_acpi_dev *drv = dev_get_drvdata(dev); 2278 struct toshiba_acpi_dev *drv = dev_get_drvdata(dev);
1557 bool exists = true; 2279 bool exists = true;
1558 2280
1559 if (attr == &dev_attr_kbd_backlight_mode.attr) 2281 if (attr == &dev_attr_fan.attr)
2282 exists = (drv->fan_supported) ? true : false;
2283 else if (attr == &dev_attr_kbd_backlight_mode.attr)
1560 exists = (drv->kbd_illum_supported) ? true : false; 2284 exists = (drv->kbd_illum_supported) ? true : false;
1561 else if (attr == &dev_attr_kbd_backlight_timeout.attr) 2285 else if (attr == &dev_attr_kbd_backlight_timeout.attr)
1562 exists = (drv->kbd_mode == SCI_KBD_MODE_AUTO) ? true : false; 2286 exists = (drv->kbd_mode == SCI_KBD_MODE_AUTO) ? true : false;
@@ -1564,10 +2288,29 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
1564 exists = (drv->touchpad_supported) ? true : false; 2288 exists = (drv->touchpad_supported) ? true : false;
1565 else if (attr == &dev_attr_position.attr) 2289 else if (attr == &dev_attr_position.attr)
1566 exists = (drv->accelerometer_supported) ? true : false; 2290 exists = (drv->accelerometer_supported) ? true : false;
2291 else if (attr == &dev_attr_usb_sleep_charge.attr)
2292 exists = (drv->usb_sleep_charge_supported) ? true : false;
2293 else if (attr == &dev_attr_sleep_functions_on_battery.attr)
2294 exists = (drv->usb_sleep_charge_supported) ? true : false;
2295 else if (attr == &dev_attr_usb_rapid_charge.attr)
2296 exists = (drv->usb_rapid_charge_supported) ? true : false;
2297 else if (attr == &dev_attr_usb_sleep_music.attr)
2298 exists = (drv->usb_sleep_music_supported) ? true : false;
2299 else if (attr == &dev_attr_kbd_function_keys.attr)
2300 exists = (drv->kbd_function_keys_supported) ? true : false;
2301 else if (attr == &dev_attr_panel_power_on.attr)
2302 exists = (drv->panel_power_on_supported) ? true : false;
2303 else if (attr == &dev_attr_usb_three.attr)
2304 exists = (drv->usb_three_supported) ? true : false;
1567 2305
1568 return exists ? attr->mode : 0; 2306 return exists ? attr->mode : 0;
1569} 2307}
1570 2308
2309static struct attribute_group toshiba_attr_group = {
2310 .is_visible = toshiba_sysfs_is_visible,
2311 .attrs = toshiba_attributes,
2312};
2313
1571/* 2314/*
1572 * Hotkeys 2315 * Hotkeys
1573 */ 2316 */
@@ -1644,7 +2387,7 @@ static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev,
1644 if (scancode == 0x100) 2387 if (scancode == 0x100)
1645 return; 2388 return;
1646 2389
1647 /* act on key press; ignore key release */ 2390 /* Act on key press; ignore key release */
1648 if (scancode & 0x80) 2391 if (scancode & 0x80)
1649 return; 2392 return;
1650 2393
@@ -1680,7 +2423,7 @@ static void toshiba_acpi_process_hotkeys(struct toshiba_acpi_dev *dev)
1680 hci_result = 2423 hci_result =
1681 hci_write1(dev, HCI_SYSTEM_EVENT, 1); 2424 hci_write1(dev, HCI_SYSTEM_EVENT, 1);
1682 pr_notice("Re-enabled hotkeys\n"); 2425 pr_notice("Re-enabled hotkeys\n");
1683 /* fall through */ 2426 /* Fall through */
1684 default: 2427 default:
1685 retries--; 2428 retries--;
1686 break; 2429 break;
@@ -1802,7 +2545,7 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
1802 props.type = BACKLIGHT_PLATFORM; 2545 props.type = BACKLIGHT_PLATFORM;
1803 props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1; 2546 props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
1804 2547
1805 /* adding an extra level and having 0 change to transflective mode */ 2548 /* Adding an extra level and having 0 change to transflective mode */
1806 if (dev->tr_backlight_supported) 2549 if (dev->tr_backlight_supported)
1807 props.max_brightness++; 2550 props.max_brightness++;
1808 2551
@@ -1973,6 +2716,24 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
1973 ret = toshiba_accelerometer_supported(dev); 2716 ret = toshiba_accelerometer_supported(dev);
1974 dev->accelerometer_supported = !ret; 2717 dev->accelerometer_supported = !ret;
1975 2718
2719 ret = toshiba_usb_sleep_charge_get(dev, &dummy);
2720 dev->usb_sleep_charge_supported = !ret;
2721
2722 ret = toshiba_usb_rapid_charge_get(dev, &dummy);
2723 dev->usb_rapid_charge_supported = !ret;
2724
2725 ret = toshiba_usb_sleep_music_get(dev, &dummy);
2726 dev->usb_sleep_music_supported = !ret;
2727
2728 ret = toshiba_function_keys_get(dev, &dummy);
2729 dev->kbd_function_keys_supported = !ret;
2730
2731 ret = toshiba_panel_power_on_get(dev, &dummy);
2732 dev->panel_power_on_supported = !ret;
2733
2734 ret = toshiba_usb_three_get(dev, &dummy);
2735 dev->usb_three_supported = !ret;
2736
1976 /* Determine whether or not BIOS supports fan and video interfaces */ 2737 /* Determine whether or not BIOS supports fan and video interfaces */
1977 2738
1978 ret = get_video_status(dev, &dummy); 2739 ret = get_video_status(dev, &dummy);
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index 782e82289571..f980ff7166e9 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -179,8 +179,9 @@ int pnp_check_port(struct pnp_dev *dev, struct resource *res)
179 /* check if the resource is already in use, skip if the 179 /* check if the resource is already in use, skip if the
180 * device is active because it itself may be in use */ 180 * device is active because it itself may be in use */
181 if (!dev->active) { 181 if (!dev->active) {
182 if (__check_region(&ioport_resource, *port, length(port, end))) 182 if (!request_region(*port, length(port, end), "pnp"))
183 return 0; 183 return 0;
184 release_region(*port, length(port, end));
184 } 185 }
185 186
186 /* check if the resource is reserved */ 187 /* check if the resource is reserved */
@@ -241,8 +242,9 @@ int pnp_check_mem(struct pnp_dev *dev, struct resource *res)
241 /* check if the resource is already in use, skip if the 242 /* check if the resource is already in use, skip if the
242 * device is active because it itself may be in use */ 243 * device is active because it itself may be in use */
243 if (!dev->active) { 244 if (!dev->active) {
244 if (check_mem_region(*addr, length(addr, end))) 245 if (!request_mem_region(*addr, length(addr, end), "pnp"))
245 return 0; 246 return 0;
247 release_mem_region(*addr, length(addr, end));
246 } 248 }
247 249
248 /* check if the resource is reserved */ 250 /* check if the resource is reserved */
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index a3ecf5809634..b1541f40fd8d 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -53,6 +53,7 @@ config PWM_ATMEL
53config PWM_ATMEL_HLCDC_PWM 53config PWM_ATMEL_HLCDC_PWM
54 tristate "Atmel HLCDC PWM support" 54 tristate "Atmel HLCDC PWM support"
55 depends on MFD_ATMEL_HLCDC 55 depends on MFD_ATMEL_HLCDC
56 depends on HAVE_CLK
56 help 57 help
57 Generic PWM framework driver for the PWM output of the HLCDC 58 Generic PWM framework driver for the PWM output of the HLCDC
58 (Atmel High-end LCD Controller). This PWM output is mainly used 59 (Atmel High-end LCD Controller). This PWM output is mainly used
@@ -130,6 +131,19 @@ config PWM_FSL_FTM
130 To compile this driver as a module, choose M here: the module 131 To compile this driver as a module, choose M here: the module
131 will be called pwm-fsl-ftm. 132 will be called pwm-fsl-ftm.
132 133
134config PWM_IMG
135 tristate "Imagination Technologies PWM driver"
136 depends on HAS_IOMEM
137 depends on MFD_SYSCON
138 depends on COMMON_CLK
139 depends on MIPS || COMPILE_TEST
140 help
141 Generic PWM framework driver for Imagination Technologies
142 PWM block which supports 4 channels.
143
144 To compile this driver as a module, choose M here: the module
145 will be called pwm-img
146
133config PWM_IMX 147config PWM_IMX
134 tristate "i.MX PWM support" 148 tristate "i.MX PWM support"
135 depends on ARCH_MXC 149 depends on ARCH_MXC
@@ -283,6 +297,16 @@ config PWM_STI
283 To compile this driver as a module, choose M here: the module 297 To compile this driver as a module, choose M here: the module
284 will be called pwm-sti. 298 will be called pwm-sti.
285 299
300config PWM_SUN4I
301 tristate "Allwinner PWM support"
302 depends on ARCH_SUNXI || COMPILE_TEST
303 depends on HAS_IOMEM && COMMON_CLK
304 help
305 Generic PWM framework driver for Allwinner SoCs.
306
307 To compile this driver as a module, choose M here: the module
308 will be called pwm-sun4i.
309
286config PWM_TEGRA 310config PWM_TEGRA
287 tristate "NVIDIA Tegra PWM support" 311 tristate "NVIDIA Tegra PWM support"
288 depends on ARCH_TEGRA 312 depends on ARCH_TEGRA
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 65259ac1e8de..ec50eb5b5a8f 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o
10obj-$(CONFIG_PWM_CLPS711X) += pwm-clps711x.o 10obj-$(CONFIG_PWM_CLPS711X) += pwm-clps711x.o
11obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o 11obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o
12obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o 12obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o
13obj-$(CONFIG_PWM_IMG) += pwm-img.o
13obj-$(CONFIG_PWM_IMX) += pwm-imx.o 14obj-$(CONFIG_PWM_IMX) += pwm-imx.o
14obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o 15obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
15obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o 16obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o
@@ -26,6 +27,7 @@ obj-$(CONFIG_PWM_ROCKCHIP) += pwm-rockchip.o
26obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o 27obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o
27obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o 28obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o
28obj-$(CONFIG_PWM_STI) += pwm-sti.o 29obj-$(CONFIG_PWM_STI) += pwm-sti.o
30obj-$(CONFIG_PWM_SUN4I) += pwm-sun4i.o
29obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o 31obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o
30obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o 32obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o
31obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o 33obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 966497d10c6e..810aef3f4c3e 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -192,7 +192,7 @@ static void of_pwmchip_add(struct pwm_chip *chip)
192 192
193static void of_pwmchip_remove(struct pwm_chip *chip) 193static void of_pwmchip_remove(struct pwm_chip *chip)
194{ 194{
195 if (chip->dev && chip->dev->of_node) 195 if (chip->dev)
196 of_node_put(chip->dev->of_node); 196 of_node_put(chip->dev->of_node);
197} 197}
198 198
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index e7a785fadcdf..522f7075bb1a 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -64,6 +64,9 @@ static int atmel_hlcdc_pwm_config(struct pwm_chip *c,
64 64
65 if (!chip->errata || !chip->errata->slow_clk_erratum) { 65 if (!chip->errata || !chip->errata->slow_clk_erratum) {
66 clk_freq = clk_get_rate(new_clk); 66 clk_freq = clk_get_rate(new_clk);
67 if (!clk_freq)
68 return -EINVAL;
69
67 clk_period_ns = (u64)NSEC_PER_SEC * 256; 70 clk_period_ns = (u64)NSEC_PER_SEC * 256;
68 do_div(clk_period_ns, clk_freq); 71 do_div(clk_period_ns, clk_freq);
69 } 72 }
@@ -73,6 +76,9 @@ static int atmel_hlcdc_pwm_config(struct pwm_chip *c,
73 clk_period_ns > period_ns) { 76 clk_period_ns > period_ns) {
74 new_clk = hlcdc->sys_clk; 77 new_clk = hlcdc->sys_clk;
75 clk_freq = clk_get_rate(new_clk); 78 clk_freq = clk_get_rate(new_clk);
79 if (!clk_freq)
80 return -EINVAL;
81
76 clk_period_ns = (u64)NSEC_PER_SEC * 256; 82 clk_period_ns = (u64)NSEC_PER_SEC * 256;
77 do_div(clk_period_ns, clk_freq); 83 do_div(clk_period_ns, clk_freq);
78 } 84 }
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
new file mode 100644
index 000000000000..476171a768d6
--- /dev/null
+++ b/drivers/pwm/pwm-img.c
@@ -0,0 +1,249 @@
1/*
2 * Imagination Technologies Pulse Width Modulator driver
3 *
4 * Copyright (c) 2014-2015, Imagination Technologies
5 *
6 * Based on drivers/pwm/pwm-tegra.c, Copyright (c) 2010, NVIDIA Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License.
11 */
12
13#include <linux/clk.h>
14#include <linux/err.h>
15#include <linux/io.h>
16#include <linux/mfd/syscon.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/platform_device.h>
20#include <linux/pwm.h>
21#include <linux/regmap.h>
22#include <linux/slab.h>
23
24/* PWM registers */
25#define PWM_CTRL_CFG 0x0000
26#define PWM_CTRL_CFG_NO_SUB_DIV 0
27#define PWM_CTRL_CFG_SUB_DIV0 1
28#define PWM_CTRL_CFG_SUB_DIV1 2
29#define PWM_CTRL_CFG_SUB_DIV0_DIV1 3
30#define PWM_CTRL_CFG_DIV_SHIFT(ch) ((ch) * 2 + 4)
31#define PWM_CTRL_CFG_DIV_MASK 0x3
32
33#define PWM_CH_CFG(ch) (0x4 + (ch) * 4)
34#define PWM_CH_CFG_TMBASE_SHIFT 0
35#define PWM_CH_CFG_DUTY_SHIFT 16
36
37#define PERIP_PWM_PDM_CONTROL 0x0140
38#define PERIP_PWM_PDM_CONTROL_CH_MASK 0x1
39#define PERIP_PWM_PDM_CONTROL_CH_SHIFT(ch) ((ch) * 4)
40
41#define MAX_TMBASE_STEPS 65536
42
43struct img_pwm_chip {
44 struct device *dev;
45 struct pwm_chip chip;
46 struct clk *pwm_clk;
47 struct clk *sys_clk;
48 void __iomem *base;
49 struct regmap *periph_regs;
50};
51
52static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip)
53{
54 return container_of(chip, struct img_pwm_chip, chip);
55}
56
57static inline void img_pwm_writel(struct img_pwm_chip *chip,
58 u32 reg, u32 val)
59{
60 writel(val, chip->base + reg);
61}
62
63static inline u32 img_pwm_readl(struct img_pwm_chip *chip,
64 u32 reg)
65{
66 return readl(chip->base + reg);
67}
68
69static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
70 int duty_ns, int period_ns)
71{
72 u32 val, div, duty, timebase;
73 unsigned long mul, output_clk_hz, input_clk_hz;
74 struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
75
76 input_clk_hz = clk_get_rate(pwm_chip->pwm_clk);
77 output_clk_hz = DIV_ROUND_UP(NSEC_PER_SEC, period_ns);
78
79 mul = DIV_ROUND_UP(input_clk_hz, output_clk_hz);
80 if (mul <= MAX_TMBASE_STEPS) {
81 div = PWM_CTRL_CFG_NO_SUB_DIV;
82 timebase = DIV_ROUND_UP(mul, 1);
83 } else if (mul <= MAX_TMBASE_STEPS * 8) {
84 div = PWM_CTRL_CFG_SUB_DIV0;
85 timebase = DIV_ROUND_UP(mul, 8);
86 } else if (mul <= MAX_TMBASE_STEPS * 64) {
87 div = PWM_CTRL_CFG_SUB_DIV1;
88 timebase = DIV_ROUND_UP(mul, 64);
89 } else if (mul <= MAX_TMBASE_STEPS * 512) {
90 div = PWM_CTRL_CFG_SUB_DIV0_DIV1;
91 timebase = DIV_ROUND_UP(mul, 512);
92 } else if (mul > MAX_TMBASE_STEPS * 512) {
93 dev_err(chip->dev,
94 "failed to configure timebase steps/divider value\n");
95 return -EINVAL;
96 }
97
98 duty = DIV_ROUND_UP(timebase * duty_ns, period_ns);
99
100 val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
101 val &= ~(PWM_CTRL_CFG_DIV_MASK << PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm));
102 val |= (div & PWM_CTRL_CFG_DIV_MASK) <<
103 PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm);
104 img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
105
106 val = (duty << PWM_CH_CFG_DUTY_SHIFT) |
107 (timebase << PWM_CH_CFG_TMBASE_SHIFT);
108 img_pwm_writel(pwm_chip, PWM_CH_CFG(pwm->hwpwm), val);
109
110 return 0;
111}
112
113static int img_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
114{
115 u32 val;
116 struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
117
118 val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
119 val |= BIT(pwm->hwpwm);
120 img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
121
122 regmap_update_bits(pwm_chip->periph_regs, PERIP_PWM_PDM_CONTROL,
123 PERIP_PWM_PDM_CONTROL_CH_MASK <<
124 PERIP_PWM_PDM_CONTROL_CH_SHIFT(pwm->hwpwm), 0);
125
126 return 0;
127}
128
129static void img_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
130{
131 u32 val;
132 struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
133
134 val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
135 val &= ~BIT(pwm->hwpwm);
136 img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
137}
138
139static const struct pwm_ops img_pwm_ops = {
140 .config = img_pwm_config,
141 .enable = img_pwm_enable,
142 .disable = img_pwm_disable,
143 .owner = THIS_MODULE,
144};
145
146static int img_pwm_probe(struct platform_device *pdev)
147{
148 int ret;
149 struct resource *res;
150 struct img_pwm_chip *pwm;
151
152 pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
153 if (!pwm)
154 return -ENOMEM;
155
156 pwm->dev = &pdev->dev;
157
158 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
159 pwm->base = devm_ioremap_resource(&pdev->dev, res);
160 if (IS_ERR(pwm->base))
161 return PTR_ERR(pwm->base);
162
163 pwm->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
164 "img,cr-periph");
165 if (IS_ERR(pwm->periph_regs))
166 return PTR_ERR(pwm->periph_regs);
167
168 pwm->sys_clk = devm_clk_get(&pdev->dev, "sys");
169 if (IS_ERR(pwm->sys_clk)) {
170 dev_err(&pdev->dev, "failed to get system clock\n");
171 return PTR_ERR(pwm->sys_clk);
172 }
173
174 pwm->pwm_clk = devm_clk_get(&pdev->dev, "pwm");
175 if (IS_ERR(pwm->pwm_clk)) {
176 dev_err(&pdev->dev, "failed to get pwm clock\n");
177 return PTR_ERR(pwm->pwm_clk);
178 }
179
180 ret = clk_prepare_enable(pwm->sys_clk);
181 if (ret < 0) {
182 dev_err(&pdev->dev, "could not prepare or enable sys clock\n");
183 return ret;
184 }
185
186 ret = clk_prepare_enable(pwm->pwm_clk);
187 if (ret < 0) {
188 dev_err(&pdev->dev, "could not prepare or enable pwm clock\n");
189 goto disable_sysclk;
190 }
191
192 pwm->chip.dev = &pdev->dev;
193 pwm->chip.ops = &img_pwm_ops;
194 pwm->chip.base = -1;
195 pwm->chip.npwm = 4;
196
197 ret = pwmchip_add(&pwm->chip);
198 if (ret < 0) {
199 dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret);
200 goto disable_pwmclk;
201 }
202
203 platform_set_drvdata(pdev, pwm);
204 return 0;
205
206disable_pwmclk:
207 clk_disable_unprepare(pwm->pwm_clk);
208disable_sysclk:
209 clk_disable_unprepare(pwm->sys_clk);
210 return ret;
211}
212
213static int img_pwm_remove(struct platform_device *pdev)
214{
215 struct img_pwm_chip *pwm_chip = platform_get_drvdata(pdev);
216 u32 val;
217 unsigned int i;
218
219 for (i = 0; i < pwm_chip->chip.npwm; i++) {
220 val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
221 val &= ~BIT(i);
222 img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
223 }
224
225 clk_disable_unprepare(pwm_chip->pwm_clk);
226 clk_disable_unprepare(pwm_chip->sys_clk);
227
228 return pwmchip_remove(&pwm_chip->chip);
229}
230
231static const struct of_device_id img_pwm_of_match[] = {
232 { .compatible = "img,pistachio-pwm", },
233 { }
234};
235MODULE_DEVICE_TABLE(of, img_pwm_of_match);
236
237static struct platform_driver img_pwm_driver = {
238 .driver = {
239 .name = "img-pwm",
240 .of_match_table = img_pwm_of_match,
241 },
242 .probe = img_pwm_probe,
243 .remove = img_pwm_remove,
244};
245module_platform_driver(img_pwm_driver);
246
247MODULE_AUTHOR("Sai Masarapu <Sai.Masarapu@imgtec.com>");
248MODULE_DESCRIPTION("Imagination Technologies PWM DAC driver");
249MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
index b95115cdaea7..92abbd56b9f7 100644
--- a/drivers/pwm/pwm-sti.c
+++ b/drivers/pwm/pwm-sti.c
@@ -57,6 +57,7 @@ struct sti_pwm_chip {
57 struct regmap_field *pwm_int_en; 57 struct regmap_field *pwm_int_en;
58 struct pwm_chip chip; 58 struct pwm_chip chip;
59 struct pwm_device *cur; 59 struct pwm_device *cur;
60 unsigned long configured;
60 unsigned int en_count; 61 unsigned int en_count;
61 struct mutex sti_pwm_lock; /* To sync between enable/disable calls */ 62 struct mutex sti_pwm_lock; /* To sync between enable/disable calls */
62 void __iomem *mmio; 63 void __iomem *mmio;
@@ -102,24 +103,6 @@ static int sti_pwm_get_prescale(struct sti_pwm_chip *pc, unsigned long period,
102 return 0; 103 return 0;
103} 104}
104 105
105/* Calculate the number of PWM devices configured with a period. */
106static unsigned int sti_pwm_count_configured(struct pwm_chip *chip)
107{
108 struct pwm_device *pwm;
109 unsigned int ncfg = 0;
110 unsigned int i;
111
112 for (i = 0; i < chip->npwm; i++) {
113 pwm = &chip->pwms[i];
114 if (test_bit(PWMF_REQUESTED, &pwm->flags)) {
115 if (pwm_get_period(pwm))
116 ncfg++;
117 }
118 }
119
120 return ncfg;
121}
122
123/* 106/*
124 * For STiH4xx PWM IP, the PWM period is fixed to 256 local clock cycles. 107 * For STiH4xx PWM IP, the PWM period is fixed to 256 local clock cycles.
125 * The only way to change the period (apart from changing the PWM input clock) 108 * The only way to change the period (apart from changing the PWM input clock)
@@ -141,7 +124,7 @@ static int sti_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
141 unsigned int ncfg; 124 unsigned int ncfg;
142 bool period_same = false; 125 bool period_same = false;
143 126
144 ncfg = sti_pwm_count_configured(chip); 127 ncfg = hweight_long(pc->configured);
145 if (ncfg) 128 if (ncfg)
146 period_same = (period_ns == pwm_get_period(cur)); 129 period_same = (period_ns == pwm_get_period(cur));
147 130
@@ -197,6 +180,7 @@ static int sti_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
197 180
198 ret = regmap_field_write(pc->pwm_int_en, 0); 181 ret = regmap_field_write(pc->pwm_int_en, 0);
199 182
183 set_bit(pwm->hwpwm, &pc->configured);
200 pc->cur = pwm; 184 pc->cur = pwm;
201 185
202 dev_dbg(dev, "prescale:%u, period:%i, duty:%i, pwmvalx:%u\n", 186 dev_dbg(dev, "prescale:%u, period:%i, duty:%i, pwmvalx:%u\n",
@@ -254,10 +238,18 @@ static void sti_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
254 mutex_unlock(&pc->sti_pwm_lock); 238 mutex_unlock(&pc->sti_pwm_lock);
255} 239}
256 240
241static void sti_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
242{
243 struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
244
245 clear_bit(pwm->hwpwm, &pc->configured);
246}
247
257static const struct pwm_ops sti_pwm_ops = { 248static const struct pwm_ops sti_pwm_ops = {
258 .config = sti_pwm_config, 249 .config = sti_pwm_config,
259 .enable = sti_pwm_enable, 250 .enable = sti_pwm_enable,
260 .disable = sti_pwm_disable, 251 .disable = sti_pwm_disable,
252 .free = sti_pwm_free,
261 .owner = THIS_MODULE, 253 .owner = THIS_MODULE,
262}; 254};
263 255
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
new file mode 100644
index 000000000000..cd9dde563018
--- /dev/null
+++ b/drivers/pwm/pwm-sun4i.c
@@ -0,0 +1,366 @@
1/*
2 * Driver for Allwinner sun4i Pulse Width Modulation Controller
3 *
4 * Copyright (C) 2014 Alexandre Belloni <alexandre.belloni@free-electrons.com>
5 *
6 * Licensed under GPLv2.
7 */
8
9#include <linux/bitops.h>
10#include <linux/clk.h>
11#include <linux/err.h>
12#include <linux/io.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/of_device.h>
16#include <linux/platform_device.h>
17#include <linux/pwm.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/time.h>
21
22#define PWM_CTRL_REG 0x0
23
24#define PWM_CH_PRD_BASE 0x4
25#define PWM_CH_PRD_OFFSET 0x4
26#define PWM_CH_PRD(ch) (PWM_CH_PRD_BASE + PWM_CH_PRD_OFFSET * (ch))
27
28#define PWMCH_OFFSET 15
29#define PWM_PRESCAL_MASK GENMASK(3, 0)
30#define PWM_PRESCAL_OFF 0
31#define PWM_EN BIT(4)
32#define PWM_ACT_STATE BIT(5)
33#define PWM_CLK_GATING BIT(6)
34#define PWM_MODE BIT(7)
35#define PWM_PULSE BIT(8)
36#define PWM_BYPASS BIT(9)
37
38#define PWM_RDY_BASE 28
39#define PWM_RDY_OFFSET 1
40#define PWM_RDY(ch) BIT(PWM_RDY_BASE + PWM_RDY_OFFSET * (ch))
41
42#define PWM_PRD(prd) (((prd) - 1) << 16)
43#define PWM_PRD_MASK GENMASK(15, 0)
44
45#define PWM_DTY_MASK GENMASK(15, 0)
46
47#define BIT_CH(bit, chan) ((bit) << ((chan) * PWMCH_OFFSET))
48
49static const u32 prescaler_table[] = {
50 120,
51 180,
52 240,
53 360,
54 480,
55 0,
56 0,
57 0,
58 12000,
59 24000,
60 36000,
61 48000,
62 72000,
63 0,
64 0,
65 0, /* Actually 1 but tested separately */
66};
67
68struct sun4i_pwm_data {
69 bool has_prescaler_bypass;
70 bool has_rdy;
71};
72
73struct sun4i_pwm_chip {
74 struct pwm_chip chip;
75 struct clk *clk;
76 void __iomem *base;
77 spinlock_t ctrl_lock;
78 const struct sun4i_pwm_data *data;
79};
80
81static inline struct sun4i_pwm_chip *to_sun4i_pwm_chip(struct pwm_chip *chip)
82{
83 return container_of(chip, struct sun4i_pwm_chip, chip);
84}
85
86static inline u32 sun4i_pwm_readl(struct sun4i_pwm_chip *chip,
87 unsigned long offset)
88{
89 return readl(chip->base + offset);
90}
91
92static inline void sun4i_pwm_writel(struct sun4i_pwm_chip *chip,
93 u32 val, unsigned long offset)
94{
95 writel(val, chip->base + offset);
96}
97
98static int sun4i_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
99 int duty_ns, int period_ns)
100{
101 struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
102 u32 prd, dty, val, clk_gate;
103 u64 clk_rate, div = 0;
104 unsigned int prescaler = 0;
105 int err;
106
107 clk_rate = clk_get_rate(sun4i_pwm->clk);
108
109 if (sun4i_pwm->data->has_prescaler_bypass) {
110 /* First, test without any prescaler when available */
111 prescaler = PWM_PRESCAL_MASK;
112 /*
113 * When not using any prescaler, the clock period in nanoseconds
114 * is not an integer so round it half up instead of
115 * truncating to get less surprising values.
116 */
117 div = clk_rate * period_ns + NSEC_PER_SEC/2;
118 do_div(div, NSEC_PER_SEC);
119 if (div - 1 > PWM_PRD_MASK)
120 prescaler = 0;
121 }
122
123 if (prescaler == 0) {
124 /* Go up from the first divider */
125 for (prescaler = 0; prescaler < PWM_PRESCAL_MASK; prescaler++) {
126 if (!prescaler_table[prescaler])
127 continue;
128 div = clk_rate;
129 do_div(div, prescaler_table[prescaler]);
130 div = div * period_ns;
131 do_div(div, NSEC_PER_SEC);
132 if (div - 1 <= PWM_PRD_MASK)
133 break;
134 }
135
136 if (div - 1 > PWM_PRD_MASK) {
137 dev_err(chip->dev, "period exceeds the maximum value\n");
138 return -EINVAL;
139 }
140 }
141
142 prd = div;
143 div *= duty_ns;
144 do_div(div, period_ns);
145 dty = div;
146
147 err = clk_prepare_enable(sun4i_pwm->clk);
148 if (err) {
149 dev_err(chip->dev, "failed to enable PWM clock\n");
150 return err;
151 }
152
153 spin_lock(&sun4i_pwm->ctrl_lock);
154 val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
155
156 if (sun4i_pwm->data->has_rdy && (val & PWM_RDY(pwm->hwpwm))) {
157 spin_unlock(&sun4i_pwm->ctrl_lock);
158 clk_disable_unprepare(sun4i_pwm->clk);
159 return -EBUSY;
160 }
161
162 clk_gate = val & BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
163 if (clk_gate) {
164 val &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
165 sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
166 }
167
168 val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
169 val &= ~BIT_CH(PWM_PRESCAL_MASK, pwm->hwpwm);
170 val |= BIT_CH(prescaler, pwm->hwpwm);
171 sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
172
173 val = (dty & PWM_DTY_MASK) | PWM_PRD(prd);
174 sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm));
175
176 if (clk_gate) {
177 val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
178 val |= clk_gate;
179 sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
180 }
181
182 spin_unlock(&sun4i_pwm->ctrl_lock);
183 clk_disable_unprepare(sun4i_pwm->clk);
184
185 return 0;
186}
187
188static int sun4i_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
189 enum pwm_polarity polarity)
190{
191 struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
192 u32 val;
193 int ret;
194
195 ret = clk_prepare_enable(sun4i_pwm->clk);
196 if (ret) {
197 dev_err(chip->dev, "failed to enable PWM clock\n");
198 return ret;
199 }
200
201 spin_lock(&sun4i_pwm->ctrl_lock);
202 val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
203
204 if (polarity != PWM_POLARITY_NORMAL)
205 val &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
206 else
207 val |= BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
208
209 sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
210
211 spin_unlock(&sun4i_pwm->ctrl_lock);
212 clk_disable_unprepare(sun4i_pwm->clk);
213
214 return 0;
215}
216
217static int sun4i_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
218{
219 struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
220 u32 val;
221 int ret;
222
223 ret = clk_prepare_enable(sun4i_pwm->clk);
224 if (ret) {
225 dev_err(chip->dev, "failed to enable PWM clock\n");
226 return ret;
227 }
228
229 spin_lock(&sun4i_pwm->ctrl_lock);
230 val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
231 val |= BIT_CH(PWM_EN, pwm->hwpwm);
232 val |= BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
233 sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
234 spin_unlock(&sun4i_pwm->ctrl_lock);
235
236 return 0;
237}
238
239static void sun4i_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
240{
241 struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
242 u32 val;
243
244 spin_lock(&sun4i_pwm->ctrl_lock);
245 val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
246 val &= ~BIT_CH(PWM_EN, pwm->hwpwm);
247 val &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
248 sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG);
249 spin_unlock(&sun4i_pwm->ctrl_lock);
250
251 clk_disable_unprepare(sun4i_pwm->clk);
252}
253
254static const struct pwm_ops sun4i_pwm_ops = {
255 .config = sun4i_pwm_config,
256 .set_polarity = sun4i_pwm_set_polarity,
257 .enable = sun4i_pwm_enable,
258 .disable = sun4i_pwm_disable,
259 .owner = THIS_MODULE,
260};
261
262static const struct sun4i_pwm_data sun4i_pwm_data_a10 = {
263 .has_prescaler_bypass = false,
264 .has_rdy = false,
265};
266
267static const struct sun4i_pwm_data sun4i_pwm_data_a20 = {
268 .has_prescaler_bypass = true,
269 .has_rdy = true,
270};
271
272static const struct of_device_id sun4i_pwm_dt_ids[] = {
273 {
274 .compatible = "allwinner,sun4i-a10-pwm",
275 .data = &sun4i_pwm_data_a10,
276 }, {
277 .compatible = "allwinner,sun7i-a20-pwm",
278 .data = &sun4i_pwm_data_a20,
279 }, {
280 /* sentinel */
281 },
282};
283MODULE_DEVICE_TABLE(of, sun4i_pwm_dt_ids);
284
285static int sun4i_pwm_probe(struct platform_device *pdev)
286{
287 struct sun4i_pwm_chip *pwm;
288 struct resource *res;
289 u32 val;
290 int i, ret;
291 const struct of_device_id *match;
292
293 match = of_match_device(sun4i_pwm_dt_ids, &pdev->dev);
294
295 pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
296 if (!pwm)
297 return -ENOMEM;
298
299 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
300 pwm->base = devm_ioremap_resource(&pdev->dev, res);
301 if (IS_ERR(pwm->base))
302 return PTR_ERR(pwm->base);
303
304 pwm->clk = devm_clk_get(&pdev->dev, NULL);
305 if (IS_ERR(pwm->clk))
306 return PTR_ERR(pwm->clk);
307
308 pwm->chip.dev = &pdev->dev;
309 pwm->chip.ops = &sun4i_pwm_ops;
310 pwm->chip.base = -1;
311 pwm->chip.npwm = 2;
312 pwm->chip.can_sleep = true;
313 pwm->chip.of_xlate = of_pwm_xlate_with_flags;
314 pwm->chip.of_pwm_n_cells = 3;
315 pwm->data = match->data;
316
317 spin_lock_init(&pwm->ctrl_lock);
318
319 ret = pwmchip_add(&pwm->chip);
320 if (ret < 0) {
321 dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
322 return ret;
323 }
324
325 platform_set_drvdata(pdev, pwm);
326
327 ret = clk_prepare_enable(pwm->clk);
328 if (ret) {
329 dev_err(&pdev->dev, "failed to enable PWM clock\n");
330 goto clk_error;
331 }
332
333 val = sun4i_pwm_readl(pwm, PWM_CTRL_REG);
334 for (i = 0; i < pwm->chip.npwm; i++)
335 if (!(val & BIT_CH(PWM_ACT_STATE, i)))
336 pwm->chip.pwms[i].polarity = PWM_POLARITY_INVERSED;
337 clk_disable_unprepare(pwm->clk);
338
339 return 0;
340
341clk_error:
342 pwmchip_remove(&pwm->chip);
343 return ret;
344}
345
346static int sun4i_pwm_remove(struct platform_device *pdev)
347{
348 struct sun4i_pwm_chip *pwm = platform_get_drvdata(pdev);
349
350 return pwmchip_remove(&pwm->chip);
351}
352
353static struct platform_driver sun4i_pwm_driver = {
354 .driver = {
355 .name = "sun4i-pwm",
356 .of_match_table = sun4i_pwm_dt_ids,
357 },
358 .probe = sun4i_pwm_probe,
359 .remove = sun4i_pwm_remove,
360};
361module_platform_driver(sun4i_pwm_driver);
362
363MODULE_ALIAS("platform:sun4i-pwm");
364MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@free-electrons.com>");
365MODULE_DESCRIPTION("Allwinner sun4i PWM driver");
366MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 5b97cae5423a..cabd7d8e05cc 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -87,7 +87,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
87 * cycles at the PWM clock rate will take period_ns nanoseconds. 87 * cycles at the PWM clock rate will take period_ns nanoseconds.
88 */ 88 */
89 rate = clk_get_rate(pc->clk) >> PWM_DUTY_WIDTH; 89 rate = clk_get_rate(pc->clk) >> PWM_DUTY_WIDTH;
90 hz = 1000000000ul / period_ns; 90 hz = NSEC_PER_SEC / period_ns;
91 91
92 rate = (rate + (hz / 2)) / hz; 92 rate = (rate + (hz / 2)) / hz;
93 93
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index f64c5decb747..47295940a868 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -815,8 +815,7 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
815 return txd; 815 return txd;
816} 816}
817 817
818static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, 818static int tsi721_terminate_all(struct dma_chan *dchan)
819 unsigned long arg)
820{ 819{
821 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 820 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
822 struct tsi721_tx_desc *desc, *_d; 821 struct tsi721_tx_desc *desc, *_d;
@@ -825,9 +824,6 @@ static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
825 824
826 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); 825 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
827 826
828 if (cmd != DMA_TERMINATE_ALL)
829 return -ENOSYS;
830
831 spin_lock_bh(&bdma_chan->lock); 827 spin_lock_bh(&bdma_chan->lock);
832 828
833 bdma_chan->active = false; 829 bdma_chan->active = false;
@@ -901,7 +897,7 @@ int tsi721_register_dma(struct tsi721_device *priv)
901 mport->dma.device_tx_status = tsi721_tx_status; 897 mport->dma.device_tx_status = tsi721_tx_status;
902 mport->dma.device_issue_pending = tsi721_issue_pending; 898 mport->dma.device_issue_pending = tsi721_issue_pending;
903 mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg; 899 mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
904 mport->dma.device_control = tsi721_device_control; 900 mport->dma.device_terminate_all = tsi721_terminate_all;
905 901
906 err = dma_async_device_register(&mport->dma); 902 err = dma_async_device_register(&mport->dma);
907 if (err) 903 if (err)
diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c
index e8647f7cf25e..00c5cc3d9546 100644
--- a/drivers/regulator/qcom_rpm-regulator.c
+++ b/drivers/regulator/qcom_rpm-regulator.c
@@ -205,6 +205,7 @@ static int rpm_reg_write(struct qcom_rpm_reg *vreg,
205 vreg->val[req->word] |= value << req->shift; 205 vreg->val[req->word] |= value << req->shift;
206 206
207 return qcom_rpm_write(vreg->rpm, 207 return qcom_rpm_write(vreg->rpm,
208 QCOM_RPM_ACTIVE_STATE,
208 vreg->resource, 209 vreg->resource,
209 vreg->val, 210 vreg->val,
210 vreg->parts->request_len); 211 vreg->parts->request_len);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index cedb41c95dae..b5b5c3d485d6 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -65,7 +65,7 @@ config RTC_DEBUG
65comment "RTC interfaces" 65comment "RTC interfaces"
66 66
67config RTC_INTF_SYSFS 67config RTC_INTF_SYSFS
68 boolean "/sys/class/rtc/rtcN (sysfs)" 68 bool "/sys/class/rtc/rtcN (sysfs)"
69 depends on SYSFS 69 depends on SYSFS
70 default RTC_CLASS 70 default RTC_CLASS
71 help 71 help
@@ -75,7 +75,7 @@ config RTC_INTF_SYSFS
75 If unsure, say Y. 75 If unsure, say Y.
76 76
77config RTC_INTF_PROC 77config RTC_INTF_PROC
78 boolean "/proc/driver/rtc (procfs for rtcN)" 78 bool "/proc/driver/rtc (procfs for rtcN)"
79 depends on PROC_FS 79 depends on PROC_FS
80 default RTC_CLASS 80 default RTC_CLASS
81 help 81 help
@@ -88,7 +88,7 @@ config RTC_INTF_PROC
88 If unsure, say Y. 88 If unsure, say Y.
89 89
90config RTC_INTF_DEV 90config RTC_INTF_DEV
91 boolean "/dev/rtcN (character devices)" 91 bool "/dev/rtcN (character devices)"
92 default RTC_CLASS 92 default RTC_CLASS
93 help 93 help
94 Say yes here if you want to use your RTCs using the /dev 94 Say yes here if you want to use your RTCs using the /dev
@@ -466,7 +466,7 @@ config RTC_DRV_DM355EVM
466 Supports the RTC firmware in the MSP430 on the DM355 EVM. 466 Supports the RTC firmware in the MSP430 on the DM355 EVM.
467 467
468config RTC_DRV_TWL92330 468config RTC_DRV_TWL92330
469 boolean "TI TWL92330/Menelaus" 469 bool "TI TWL92330/Menelaus"
470 depends on MENELAUS 470 depends on MENELAUS
471 help 471 help
472 If you say yes here you get support for the RTC on the 472 If you say yes here you get support for the RTC on the
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 8c3bfcb115b7..803869c7d7c2 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -399,21 +399,21 @@ ds1685_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
399 * of this RTC chip. We check for it anyways in case support is 399 * of this RTC chip. We check for it anyways in case support is
400 * added in the future. 400 * added in the future.
401 */ 401 */
402 if (unlikely((seconds >= 0xc0) && (seconds <= 0xff))) 402 if (unlikely(seconds >= 0xc0))
403 alrm->time.tm_sec = -1; 403 alrm->time.tm_sec = -1;
404 else 404 else
405 alrm->time.tm_sec = ds1685_rtc_bcd2bin(rtc, seconds, 405 alrm->time.tm_sec = ds1685_rtc_bcd2bin(rtc, seconds,
406 RTC_SECS_BCD_MASK, 406 RTC_SECS_BCD_MASK,
407 RTC_SECS_BIN_MASK); 407 RTC_SECS_BIN_MASK);
408 408
409 if (unlikely((minutes >= 0xc0) && (minutes <= 0xff))) 409 if (unlikely(minutes >= 0xc0))
410 alrm->time.tm_min = -1; 410 alrm->time.tm_min = -1;
411 else 411 else
412 alrm->time.tm_min = ds1685_rtc_bcd2bin(rtc, minutes, 412 alrm->time.tm_min = ds1685_rtc_bcd2bin(rtc, minutes,
413 RTC_MINS_BCD_MASK, 413 RTC_MINS_BCD_MASK,
414 RTC_MINS_BIN_MASK); 414 RTC_MINS_BIN_MASK);
415 415
416 if (unlikely((hours >= 0xc0) && (hours <= 0xff))) 416 if (unlikely(hours >= 0xc0))
417 alrm->time.tm_hour = -1; 417 alrm->time.tm_hour = -1;
418 else 418 else
419 alrm->time.tm_hour = ds1685_rtc_bcd2bin(rtc, hours, 419 alrm->time.tm_hour = ds1685_rtc_bcd2bin(rtc, hours,
@@ -472,13 +472,13 @@ ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
472 * field, and we only support four fields. We put the support 472 * field, and we only support four fields. We put the support
473 * here anyways for the future. 473 * here anyways for the future.
474 */ 474 */
475 if (unlikely((seconds >= 0xc0) && (seconds <= 0xff))) 475 if (unlikely(seconds >= 0xc0))
476 seconds = 0xff; 476 seconds = 0xff;
477 477
478 if (unlikely((minutes >= 0xc0) && (minutes <= 0xff))) 478 if (unlikely(minutes >= 0xc0))
479 minutes = 0xff; 479 minutes = 0xff;
480 480
481 if (unlikely((hours >= 0xc0) && (hours <= 0xff))) 481 if (unlikely(hours >= 0xc0))
482 hours = 0xff; 482 hours = 0xff;
483 483
484 alrm->time.tm_mon = -1; 484 alrm->time.tm_mon = -1;
@@ -528,7 +528,6 @@ ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
528/* ----------------------------------------------------------------------- */ 528/* ----------------------------------------------------------------------- */
529/* /dev/rtcX Interface functions */ 529/* /dev/rtcX Interface functions */
530 530
531#ifdef CONFIG_RTC_INTF_DEV
532/** 531/**
533 * ds1685_rtc_alarm_irq_enable - replaces ioctl() RTC_AIE on/off. 532 * ds1685_rtc_alarm_irq_enable - replaces ioctl() RTC_AIE on/off.
534 * @dev: pointer to device structure. 533 * @dev: pointer to device structure.
@@ -557,7 +556,6 @@ ds1685_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
557 556
558 return 0; 557 return 0;
559} 558}
560#endif
561/* ----------------------------------------------------------------------- */ 559/* ----------------------------------------------------------------------- */
562 560
563 561
@@ -1612,7 +1610,7 @@ ds1685_rtc_sysfs_time_regs_show(struct device *dev,
1612 ds1685_rtc_sysfs_time_regs_lookup(attr->attr.name, false); 1610 ds1685_rtc_sysfs_time_regs_lookup(attr->attr.name, false);
1613 1611
1614 /* Make sure we actually matched something. */ 1612 /* Make sure we actually matched something. */
1615 if (!bcd_reg_info && !bin_reg_info) 1613 if (!bcd_reg_info || !bin_reg_info)
1616 return -EINVAL; 1614 return -EINVAL;
1617 1615
1618 /* bcd_reg_info->reg == bin_reg_info->reg. */ 1616 /* bcd_reg_info->reg == bin_reg_info->reg. */
@@ -1650,7 +1648,7 @@ ds1685_rtc_sysfs_time_regs_store(struct device *dev,
1650 return -EINVAL; 1648 return -EINVAL;
1651 1649
1652 /* Make sure we actually matched something. */ 1650 /* Make sure we actually matched something. */
1653 if (!bcd_reg_info && !bin_reg_info) 1651 if (!bcd_reg_info || !bin_reg_info)
1654 return -EINVAL; 1652 return -EINVAL;
1655 1653
1656 /* Check for a valid range. */ 1654 /* Check for a valid range. */
diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c
index aa3e2c7cd83c..a6f5ee80fadc 100644
--- a/drivers/scsi/am53c974.c
+++ b/drivers/scsi/am53c974.c
@@ -178,12 +178,6 @@ static void pci_esp_dma_drain(struct esp *esp)
178 break; 178 break;
179 cpu_relax(); 179 cpu_relax();
180 } 180 }
181 if (resid > 1) {
182 /* FIFO not cleared */
183 shost_printk(KERN_INFO, esp->host,
184 "FIFO not cleared, %d bytes left\n",
185 resid);
186 }
187 181
188 /* 182 /*
189 * When there is a residual BCMPLT will never be set 183 * When there is a residual BCMPLT will never be set
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 96241b20fd2c..a7cc61837818 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -585,7 +585,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
585 "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); 585 "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
586 return NULL; 586 return NULL;
587 } 587 }
588 shost->dma_boundary = pcidev->dma_mask;
589 shost->max_id = BE2_MAX_SESSIONS; 588 shost->max_id = BE2_MAX_SESSIONS;
590 shost->max_channel = 0; 589 shost->max_channel = 0;
591 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; 590 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 95d581c45413..a1cfbd3dda47 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -6831,10 +6831,8 @@ static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
6831 char *name) 6831 char *name)
6832{ 6832{
6833 struct workqueue_struct *wq = NULL; 6833 struct workqueue_struct *wq = NULL;
6834 char wq_name[20];
6835 6834
6836 snprintf(wq_name, sizeof(wq_name), "%s_%d_hpsa", name, h->ctlr); 6835 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
6837 wq = alloc_ordered_workqueue(wq_name, 0);
6838 if (!wq) 6836 if (!wq)
6839 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name); 6837 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
6840 6838
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 73f9feecda72..99f43b7fc9ab 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1570,9 +1570,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
1570 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs. 1570 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
1571 */ 1571 */
1572 memset(&port_name, 0, 36); 1572 memset(&port_name, 0, 36);
1573 snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", 1573 snprintf(port_name, sizeof(port_name), "%8phC", fc_wwpn);
1574 fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
1575 fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
1576 /* 1574 /*
1577 * Locate our struct se_node_acl either from an explict NodeACL created 1575 * Locate our struct se_node_acl either from an explict NodeACL created
1578 * via ConfigFS, or via running in TPG demo mode. 1576 * via ConfigFS, or via running in TPG demo mode.
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0cbc1fb45f10..2270bd51f9c2 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -546,7 +546,7 @@ static ssize_t
546sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) 546sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
547{ 547{
548 sg_io_hdr_t *hp = &srp->header; 548 sg_io_hdr_t *hp = &srp->header;
549 int err = 0; 549 int err = 0, err2;
550 int len; 550 int len;
551 551
552 if (count < SZ_SG_IO_HDR) { 552 if (count < SZ_SG_IO_HDR) {
@@ -575,8 +575,8 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
575 goto err_out; 575 goto err_out;
576 } 576 }
577err_out: 577err_out:
578 err = sg_finish_rem_req(srp); 578 err2 = sg_finish_rem_req(srp);
579 return (0 == err) ? count : err; 579 return err ? : err2 ? : count;
580} 580}
581 581
582static ssize_t 582static ssize_t
@@ -1335,6 +1335,17 @@ sg_rq_end_io(struct request *rq, int uptodate)
1335 } 1335 }
1336 /* Rely on write phase to clean out srp status values, so no "else" */ 1336 /* Rely on write phase to clean out srp status values, so no "else" */
1337 1337
1338 /*
1339 * Free the request as soon as it is complete so that its resources
1340 * can be reused without waiting for userspace to read() the
1341 * result. But keep the associated bio (if any) around until
1342 * blk_rq_unmap_user() can be called from user context.
1343 */
1344 srp->rq = NULL;
1345 if (rq->cmd != rq->__cmd)
1346 kfree(rq->cmd);
1347 __blk_put_request(rq->q, rq);
1348
1338 write_lock_irqsave(&sfp->rq_list_lock, iflags); 1349 write_lock_irqsave(&sfp->rq_list_lock, iflags);
1339 if (unlikely(srp->orphan)) { 1350 if (unlikely(srp->orphan)) {
1340 if (sfp->keep_orphan) 1351 if (sfp->keep_orphan)
@@ -1669,7 +1680,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
1669 return -ENOMEM; 1680 return -ENOMEM;
1670 } 1681 }
1671 1682
1672 rq = blk_get_request(q, rw, GFP_ATOMIC); 1683 /*
1684 * NOTE
1685 *
1686 * With scsi-mq enabled, there are a fixed number of preallocated
1687 * requests equal in number to shost->can_queue. If all of the
1688 * preallocated requests are already in use, then using GFP_ATOMIC with
1689 * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
1690 * will cause blk_get_request() to sleep until an active command
1691 * completes, freeing up a request. Neither option is ideal, but
1692 * GFP_KERNEL is the better choice to prevent userspace from getting an
1693 * unexpected EWOULDBLOCK.
1694 *
1695 * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
1696 * does not sleep except under memory pressure.
1697 */
1698 rq = blk_get_request(q, rw, GFP_KERNEL);
1673 if (IS_ERR(rq)) { 1699 if (IS_ERR(rq)) {
1674 kfree(long_cmdp); 1700 kfree(long_cmdp);
1675 return PTR_ERR(rq); 1701 return PTR_ERR(rq);
@@ -1759,10 +1785,10 @@ sg_finish_rem_req(Sg_request *srp)
1759 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, 1785 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
1760 "sg_finish_rem_req: res_used=%d\n", 1786 "sg_finish_rem_req: res_used=%d\n",
1761 (int) srp->res_used)); 1787 (int) srp->res_used));
1762 if (srp->rq) { 1788 if (srp->bio)
1763 if (srp->bio) 1789 ret = blk_rq_unmap_user(srp->bio);
1764 ret = blk_rq_unmap_user(srp->bio);
1765 1790
1791 if (srp->rq) {
1766 if (srp->rq->cmd != srp->rq->__cmd) 1792 if (srp->rq->cmd != srp->rq->__cmd)
1767 kfree(srp->rq->cmd); 1793 kfree(srp->rq->cmd);
1768 blk_put_request(srp->rq); 1794 blk_put_request(srp->rq);
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index c52bb5dfaedb..f164f24a4a55 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -950,6 +950,12 @@ static int virtscsi_probe(struct virtio_device *vdev)
950 u32 num_queues; 950 u32 num_queues;
951 struct scsi_host_template *hostt; 951 struct scsi_host_template *hostt;
952 952
953 if (!vdev->config->get) {
954 dev_err(&vdev->dev, "%s failure: config access disabled\n",
955 __func__);
956 return -EINVAL;
957 }
958
953 /* We need to know how many queues before we allocate. */ 959 /* We need to know how many queues before we allocate. */
954 num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; 960 num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
955 961
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 7702664d7ed3..289ad016d925 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -870,6 +870,7 @@ fail_free_params:
870} 870}
871 871
872static struct scsi_host_template wd719x_template = { 872static struct scsi_host_template wd719x_template = {
873 .module = THIS_MODULE,
873 .name = "Western Digital 719x", 874 .name = "Western Digital 719x",
874 .queuecommand = wd719x_queuecommand, 875 .queuecommand = wd719x_queuecommand,
875 .eh_abort_handler = wd719x_abort, 876 .eh_abort_handler = wd719x_abort,
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index f3ee439d6f0e..cd4c293f0dd0 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -81,7 +81,9 @@ static int __init sh_pm_runtime_init(void)
81 if (!of_machine_is_compatible("renesas,emev2") && 81 if (!of_machine_is_compatible("renesas,emev2") &&
82 !of_machine_is_compatible("renesas,r7s72100") && 82 !of_machine_is_compatible("renesas,r7s72100") &&
83 !of_machine_is_compatible("renesas,r8a73a4") && 83 !of_machine_is_compatible("renesas,r8a73a4") &&
84#ifndef CONFIG_PM_GENERIC_DOMAINS_OF
84 !of_machine_is_compatible("renesas,r8a7740") && 85 !of_machine_is_compatible("renesas,r8a7740") &&
86#endif
85 !of_machine_is_compatible("renesas,r8a7778") && 87 !of_machine_is_compatible("renesas,r8a7778") &&
86 !of_machine_is_compatible("renesas,r8a7779") && 88 !of_machine_is_compatible("renesas,r8a7779") &&
87 !of_machine_is_compatible("renesas,r8a7790") && 89 !of_machine_is_compatible("renesas,r8a7790") &&
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 95ccedabba4f..ab8dfbef6f1b 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -29,7 +29,7 @@ menuconfig SPI
29if SPI 29if SPI
30 30
31config SPI_DEBUG 31config SPI_DEBUG
32 boolean "Debug support for SPI drivers" 32 bool "Debug support for SPI drivers"
33 depends on DEBUG_KERNEL 33 depends on DEBUG_KERNEL
34 help 34 help
35 Say "yes" to enable debug messaging (like dev_dbg and pr_debug), 35 Say "yes" to enable debug messaging (like dev_dbg and pr_debug),
@@ -40,8 +40,8 @@ config SPI_DEBUG
40# 40#
41 41
42config SPI_MASTER 42config SPI_MASTER
43# boolean "SPI Master Support" 43# bool "SPI Master Support"
44 boolean 44 bool
45 default SPI 45 default SPI
46 help 46 help
47 If your system has an master-capable SPI controller (which 47 If your system has an master-capable SPI controller (which
diff --git a/drivers/staging/board/Kconfig b/drivers/staging/board/Kconfig
index 7eda0b8b7aab..0a89ad16371f 100644
--- a/drivers/staging/board/Kconfig
+++ b/drivers/staging/board/Kconfig
@@ -1,5 +1,5 @@
1config STAGING_BOARD 1config STAGING_BOARD
2 boolean "Staging Board Support" 2 bool "Staging Board Support"
3 depends on OF_ADDRESS 3 depends on OF_ADDRESS
4 depends on BROKEN 4 depends on BROKEN
5 help 5 help
diff --git a/drivers/staging/emxx_udc/Kconfig b/drivers/staging/emxx_udc/Kconfig
index 9bc6d3db86d9..cc3402020487 100644
--- a/drivers/staging/emxx_udc/Kconfig
+++ b/drivers/staging/emxx_udc/Kconfig
@@ -1,5 +1,5 @@
1config USB_EMXX 1config USB_EMXX
2 boolean "EMXX USB Function Device Controller" 2 bool "EMXX USB Function Device Controller"
3 depends on USB_GADGET && (ARCH_SHMOBILE || (ARM && COMPILE_TEST)) 3 depends on USB_GADGET && (ARCH_SHMOBILE || (ARM && COMPILE_TEST))
4 help 4 help
5 The Emma Mobile series of SoCs from Renesas Electronics and 5 The Emma Mobile series of SoCs from Renesas Electronics and
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index fa38be0982f9..24183028bd71 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -30,13 +30,13 @@ config IIO_SIMPLE_DUMMY
30if IIO_SIMPLE_DUMMY 30if IIO_SIMPLE_DUMMY
31 31
32config IIO_SIMPLE_DUMMY_EVENTS 32config IIO_SIMPLE_DUMMY_EVENTS
33 boolean "Event generation support" 33 bool "Event generation support"
34 select IIO_DUMMY_EVGEN 34 select IIO_DUMMY_EVGEN
35 help 35 help
36 Add some dummy events to the simple dummy driver. 36 Add some dummy events to the simple dummy driver.
37 37
38config IIO_SIMPLE_DUMMY_BUFFER 38config IIO_SIMPLE_DUMMY_BUFFER
39 boolean "Buffered capture support" 39 bool "Buffered capture support"
40 select IIO_BUFFER 40 select IIO_BUFFER
41 select IIO_KFIFO_BUF 41 select IIO_KFIFO_BUF
42 help 42 help
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index 88614b71cf6d..ddf1fa9f67f8 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -270,7 +270,7 @@ void ll_invalidate_aliases(struct inode *inode)
270 270
271int ll_revalidate_it_finish(struct ptlrpc_request *request, 271int ll_revalidate_it_finish(struct ptlrpc_request *request,
272 struct lookup_intent *it, 272 struct lookup_intent *it,
273 struct dentry *de) 273 struct inode *inode)
274{ 274{
275 int rc = 0; 275 int rc = 0;
276 276
@@ -280,19 +280,17 @@ int ll_revalidate_it_finish(struct ptlrpc_request *request,
280 if (it_disposition(it, DISP_LOOKUP_NEG)) 280 if (it_disposition(it, DISP_LOOKUP_NEG))
281 return -ENOENT; 281 return -ENOENT;
282 282
283 rc = ll_prep_inode(&de->d_inode, request, NULL, it); 283 rc = ll_prep_inode(&inode, request, NULL, it);
284 284
285 return rc; 285 return rc;
286} 286}
287 287
288void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry) 288void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode)
289{ 289{
290 LASSERT(it != NULL); 290 LASSERT(it != NULL);
291 LASSERT(dentry != NULL);
292 291
293 if (it->d.lustre.it_lock_mode && dentry->d_inode != NULL) { 292 if (it->d.lustre.it_lock_mode && inode != NULL) {
294 struct inode *inode = dentry->d_inode; 293 struct ll_sb_info *sbi = ll_i2sbi(inode);
295 struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
296 294
297 CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n", 295 CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
298 inode, inode->i_ino, inode->i_generation); 296 inode, inode->i_ino, inode->i_generation);
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index 7c7ef7ec908e..5ebee6ca0a10 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -2912,8 +2912,8 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
2912 oit.it_op = IT_LOOKUP; 2912 oit.it_op = IT_LOOKUP;
2913 2913
2914 /* Call getattr by fid, so do not provide name at all. */ 2914 /* Call getattr by fid, so do not provide name at all. */
2915 op_data = ll_prep_md_op_data(NULL, dentry->d_inode, 2915 op_data = ll_prep_md_op_data(NULL, inode,
2916 dentry->d_inode, NULL, 0, 0, 2916 inode, NULL, 0, 0,
2917 LUSTRE_OPC_ANY, NULL); 2917 LUSTRE_OPC_ANY, NULL);
2918 if (IS_ERR(op_data)) 2918 if (IS_ERR(op_data))
2919 return PTR_ERR(op_data); 2919 return PTR_ERR(op_data);
@@ -2931,7 +2931,7 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
2931 goto out; 2931 goto out;
2932 } 2932 }
2933 2933
2934 rc = ll_revalidate_it_finish(req, &oit, dentry); 2934 rc = ll_revalidate_it_finish(req, &oit, inode);
2935 if (rc != 0) { 2935 if (rc != 0) {
2936 ll_intent_release(&oit); 2936 ll_intent_release(&oit);
2937 goto out; 2937 goto out;
@@ -2944,7 +2944,7 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
2944 if (!dentry->d_inode->i_nlink) 2944 if (!dentry->d_inode->i_nlink)
2945 d_lustre_invalidate(dentry, 0); 2945 d_lustre_invalidate(dentry, 0);
2946 2946
2947 ll_lookup_finish_locks(&oit, dentry); 2947 ll_lookup_finish_locks(&oit, inode);
2948 } else if (!ll_have_md_lock(dentry->d_inode, &ibits, LCK_MINMODE)) { 2948 } else if (!ll_have_md_lock(dentry->d_inode, &ibits, LCK_MINMODE)) {
2949 struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode); 2949 struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
2950 u64 valid = OBD_MD_FLGETATTR; 2950 u64 valid = OBD_MD_FLGETATTR;
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index d032c2b086cc..2af1d7286250 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -786,9 +786,9 @@ extern const struct dentry_operations ll_d_ops;
786void ll_intent_drop_lock(struct lookup_intent *); 786void ll_intent_drop_lock(struct lookup_intent *);
787void ll_intent_release(struct lookup_intent *); 787void ll_intent_release(struct lookup_intent *);
788void ll_invalidate_aliases(struct inode *); 788void ll_invalidate_aliases(struct inode *);
789void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry); 789void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode);
790int ll_revalidate_it_finish(struct ptlrpc_request *request, 790int ll_revalidate_it_finish(struct ptlrpc_request *request,
791 struct lookup_intent *it, struct dentry *de); 791 struct lookup_intent *it, struct inode *inode);
792 792
793/* llite/llite_lib.c */ 793/* llite/llite_lib.c */
794extern struct super_operations lustre_super_operations; 794extern struct super_operations lustre_super_operations;
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 4f361b77c749..890ac190f5fa 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -481,6 +481,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
481 struct lookup_intent lookup_it = { .it_op = IT_LOOKUP }; 481 struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
482 struct dentry *save = dentry, *retval; 482 struct dentry *save = dentry, *retval;
483 struct ptlrpc_request *req = NULL; 483 struct ptlrpc_request *req = NULL;
484 struct inode *inode;
484 struct md_op_data *op_data; 485 struct md_op_data *op_data;
485 __u32 opc; 486 __u32 opc;
486 int rc; 487 int rc;
@@ -539,12 +540,13 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
539 goto out; 540 goto out;
540 } 541 }
541 542
542 if ((it->it_op & IT_OPEN) && dentry->d_inode && 543 inode = dentry->d_inode;
543 !S_ISREG(dentry->d_inode->i_mode) && 544 if ((it->it_op & IT_OPEN) && inode &&
544 !S_ISDIR(dentry->d_inode->i_mode)) { 545 !S_ISREG(inode->i_mode) &&
545 ll_release_openhandle(dentry->d_inode, it); 546 !S_ISDIR(inode->i_mode)) {
547 ll_release_openhandle(inode, it);
546 } 548 }
547 ll_lookup_finish_locks(it, dentry); 549 ll_lookup_finish_locks(it, inode);
548 550
549 if (dentry == save) 551 if (dentry == save)
550 retval = NULL; 552 retval = NULL;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index aebde3289c50..50bad55a0c42 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -30,7 +30,7 @@
30#include <target/target_core_fabric.h> 30#include <target/target_core_fabric.h>
31#include <target/target_core_configfs.h> 31#include <target/target_core_configfs.h>
32 32
33#include "iscsi_target_core.h" 33#include <target/iscsi/iscsi_target_core.h>
34#include "iscsi_target_parameters.h" 34#include "iscsi_target_parameters.h"
35#include "iscsi_target_seq_pdu_list.h" 35#include "iscsi_target_seq_pdu_list.h"
36#include "iscsi_target_tq.h" 36#include "iscsi_target_tq.h"
@@ -45,7 +45,7 @@
45#include "iscsi_target_util.h" 45#include "iscsi_target_util.h"
46#include "iscsi_target.h" 46#include "iscsi_target.h"
47#include "iscsi_target_device.h" 47#include "iscsi_target_device.h"
48#include "iscsi_target_stat.h" 48#include <target/iscsi/iscsi_target_stat.h>
49 49
50#include <target/iscsi/iscsi_transport.h> 50#include <target/iscsi/iscsi_transport.h>
51 51
@@ -968,11 +968,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
968 968
969 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 969 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
970 if (hdr->flags & ISCSI_FLAG_CMD_READ) { 970 if (hdr->flags & ISCSI_FLAG_CMD_READ) {
971 spin_lock_bh(&conn->sess->ttt_lock); 971 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
972 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
973 if (cmd->targ_xfer_tag == 0xFFFFFFFF)
974 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
975 spin_unlock_bh(&conn->sess->ttt_lock);
976 } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE) 972 } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
977 cmd->targ_xfer_tag = 0xFFFFFFFF; 973 cmd->targ_xfer_tag = 0xFFFFFFFF;
978 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 974 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
@@ -1998,6 +1994,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1998 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 1994 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
1999 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); 1995 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
2000 cmd->data_direction = DMA_NONE; 1996 cmd->data_direction = DMA_NONE;
1997 cmd->text_in_ptr = NULL;
2001 1998
2002 return 0; 1999 return 0;
2003} 2000}
@@ -2011,9 +2008,13 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2011 int cmdsn_ret; 2008 int cmdsn_ret;
2012 2009
2013 if (!text_in) { 2010 if (!text_in) {
2014 pr_err("Unable to locate text_in buffer for sendtargets" 2011 cmd->targ_xfer_tag = be32_to_cpu(hdr->ttt);
2015 " discovery\n"); 2012 if (cmd->targ_xfer_tag == 0xFFFFFFFF) {
2016 goto reject; 2013 pr_err("Unable to locate text_in buffer for sendtargets"
2014 " discovery\n");
2015 goto reject;
2016 }
2017 goto empty_sendtargets;
2017 } 2018 }
2018 if (strncmp("SendTargets", text_in, 11) != 0) { 2019 if (strncmp("SendTargets", text_in, 11) != 0) {
2019 pr_err("Received Text Data that is not" 2020 pr_err("Received Text Data that is not"
@@ -2040,6 +2041,7 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2040 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 2041 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2041 spin_unlock_bh(&conn->cmd_lock); 2042 spin_unlock_bh(&conn->cmd_lock);
2042 2043
2044empty_sendtargets:
2043 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); 2045 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2044 2046
2045 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 2047 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
@@ -3047,11 +3049,7 @@ static int iscsit_send_r2t(
3047 int_to_scsilun(cmd->se_cmd.orig_fe_lun, 3049 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
3048 (struct scsi_lun *)&hdr->lun); 3050 (struct scsi_lun *)&hdr->lun);
3049 hdr->itt = cmd->init_task_tag; 3051 hdr->itt = cmd->init_task_tag;
3050 spin_lock_bh(&conn->sess->ttt_lock); 3052 r2t->targ_xfer_tag = session_get_next_ttt(conn->sess);
3051 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
3052 if (r2t->targ_xfer_tag == 0xFFFFFFFF)
3053 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
3054 spin_unlock_bh(&conn->sess->ttt_lock);
3055 hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag); 3053 hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag);
3056 hdr->statsn = cpu_to_be32(conn->stat_sn); 3054 hdr->statsn = cpu_to_be32(conn->stat_sn);
3057 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3055 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
@@ -3393,7 +3391,8 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np)
3393 3391
3394static int 3392static int
3395iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, 3393iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
3396 enum iscsit_transport_type network_transport) 3394 enum iscsit_transport_type network_transport,
3395 int skip_bytes, bool *completed)
3397{ 3396{
3398 char *payload = NULL; 3397 char *payload = NULL;
3399 struct iscsi_conn *conn = cmd->conn; 3398 struct iscsi_conn *conn = cmd->conn;
@@ -3405,7 +3404,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
3405 unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */ 3404 unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
3406 unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL; 3405 unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
3407 3406
3408 buffer_len = max(conn->conn_ops->MaxRecvDataSegmentLength, 3407 buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength,
3409 SENDTARGETS_BUF_LIMIT); 3408 SENDTARGETS_BUF_LIMIT);
3410 3409
3411 payload = kzalloc(buffer_len, GFP_KERNEL); 3410 payload = kzalloc(buffer_len, GFP_KERNEL);
@@ -3484,9 +3483,16 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
3484 end_of_buf = 1; 3483 end_of_buf = 1;
3485 goto eob; 3484 goto eob;
3486 } 3485 }
3487 memcpy(payload + payload_len, buf, len); 3486
3488 payload_len += len; 3487 if (skip_bytes && len <= skip_bytes) {
3489 target_name_printed = 1; 3488 skip_bytes -= len;
3489 } else {
3490 memcpy(payload + payload_len, buf, len);
3491 payload_len += len;
3492 target_name_printed = 1;
3493 if (len > skip_bytes)
3494 skip_bytes = 0;
3495 }
3490 } 3496 }
3491 3497
3492 len = sprintf(buf, "TargetAddress=" 3498 len = sprintf(buf, "TargetAddress="
@@ -3502,15 +3508,24 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
3502 end_of_buf = 1; 3508 end_of_buf = 1;
3503 goto eob; 3509 goto eob;
3504 } 3510 }
3505 memcpy(payload + payload_len, buf, len); 3511
3506 payload_len += len; 3512 if (skip_bytes && len <= skip_bytes) {
3513 skip_bytes -= len;
3514 } else {
3515 memcpy(payload + payload_len, buf, len);
3516 payload_len += len;
3517 if (len > skip_bytes)
3518 skip_bytes = 0;
3519 }
3507 } 3520 }
3508 spin_unlock(&tpg->tpg_np_lock); 3521 spin_unlock(&tpg->tpg_np_lock);
3509 } 3522 }
3510 spin_unlock(&tiqn->tiqn_tpg_lock); 3523 spin_unlock(&tiqn->tiqn_tpg_lock);
3511eob: 3524eob:
3512 if (end_of_buf) 3525 if (end_of_buf) {
3526 *completed = false;
3513 break; 3527 break;
3528 }
3514 3529
3515 if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) 3530 if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
3516 break; 3531 break;
@@ -3528,13 +3543,23 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3528 enum iscsit_transport_type network_transport) 3543 enum iscsit_transport_type network_transport)
3529{ 3544{
3530 int text_length, padding; 3545 int text_length, padding;
3546 bool completed = true;
3531 3547
3532 text_length = iscsit_build_sendtargets_response(cmd, network_transport); 3548 text_length = iscsit_build_sendtargets_response(cmd, network_transport,
3549 cmd->read_data_done,
3550 &completed);
3533 if (text_length < 0) 3551 if (text_length < 0)
3534 return text_length; 3552 return text_length;
3535 3553
3554 if (completed) {
3555 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3556 } else {
3557 hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE;
3558 cmd->read_data_done += text_length;
3559 if (cmd->targ_xfer_tag == 0xFFFFFFFF)
3560 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
3561 }
3536 hdr->opcode = ISCSI_OP_TEXT_RSP; 3562 hdr->opcode = ISCSI_OP_TEXT_RSP;
3537 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3538 padding = ((-text_length) & 3); 3563 padding = ((-text_length) & 3);
3539 hton24(hdr->dlength, text_length); 3564 hton24(hdr->dlength, text_length);
3540 hdr->itt = cmd->init_task_tag; 3565 hdr->itt = cmd->init_task_tag;
@@ -3543,21 +3568,25 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3543 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3568 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3544 3569
3545 iscsit_increment_maxcmdsn(cmd, conn->sess); 3570 iscsit_increment_maxcmdsn(cmd, conn->sess);
3571 /*
3572 * Reset maxcmdsn_inc in multi-part text payload exchanges to
3573 * correctly increment MaxCmdSN for each response answering a
3574 * non immediate text request with a valid CmdSN.
3575 */
3576 cmd->maxcmdsn_inc = 0;
3546 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3577 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3547 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3578 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3548 3579
3549 pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x," 3580 pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x,"
3550 " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn, 3581 " Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag,
3551 text_length, conn->cid); 3582 cmd->targ_xfer_tag, cmd->stat_sn, text_length, conn->cid,
3583 !!(hdr->flags & ISCSI_FLAG_CMD_FINAL),
3584 !!(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE));
3552 3585
3553 return text_length + padding; 3586 return text_length + padding;
3554} 3587}
3555EXPORT_SYMBOL(iscsit_build_text_rsp); 3588EXPORT_SYMBOL(iscsit_build_text_rsp);
3556 3589
3557/*
3558 * FIXME: Add support for F_BIT and C_BIT when the length is longer than
3559 * MaxRecvDataSegmentLength.
3560 */
3561static int iscsit_send_text_rsp( 3590static int iscsit_send_text_rsp(
3562 struct iscsi_cmd *cmd, 3591 struct iscsi_cmd *cmd,
3563 struct iscsi_conn *conn) 3592 struct iscsi_conn *conn)
@@ -4021,9 +4050,15 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
4021 ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf); 4050 ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
4022 break; 4051 break;
4023 case ISCSI_OP_TEXT: 4052 case ISCSI_OP_TEXT:
4024 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 4053 if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
4025 if (!cmd) 4054 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
4026 goto reject; 4055 if (!cmd)
4056 goto reject;
4057 } else {
4058 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
4059 if (!cmd)
4060 goto reject;
4061 }
4027 4062
4028 ret = iscsit_handle_text_cmd(conn, cmd, buf); 4063 ret = iscsit_handle_text_cmd(conn, cmd, buf);
4029 break; 4064 break;
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index ab4915c0d933..47e249dccb5f 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -22,7 +22,7 @@
22#include <linux/err.h> 22#include <linux/err.h>
23#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
24 24
25#include "iscsi_target_core.h" 25#include <target/iscsi/iscsi_target_core.h>
26#include "iscsi_target_nego.h" 26#include "iscsi_target_nego.h"
27#include "iscsi_target_auth.h" 27#include "iscsi_target_auth.h"
28 28
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 9059c1e0b26e..48384b675e62 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -28,7 +28,7 @@
28#include <target/configfs_macros.h> 28#include <target/configfs_macros.h>
29#include <target/iscsi/iscsi_transport.h> 29#include <target/iscsi/iscsi_transport.h>
30 30
31#include "iscsi_target_core.h" 31#include <target/iscsi/iscsi_target_core.h>
32#include "iscsi_target_parameters.h" 32#include "iscsi_target_parameters.h"
33#include "iscsi_target_device.h" 33#include "iscsi_target_device.h"
34#include "iscsi_target_erl0.h" 34#include "iscsi_target_erl0.h"
@@ -36,7 +36,7 @@
36#include "iscsi_target_tpg.h" 36#include "iscsi_target_tpg.h"
37#include "iscsi_target_util.h" 37#include "iscsi_target_util.h"
38#include "iscsi_target.h" 38#include "iscsi_target.h"
39#include "iscsi_target_stat.h" 39#include <target/iscsi/iscsi_target_stat.h>
40#include "iscsi_target_configfs.h" 40#include "iscsi_target_configfs.h"
41 41
42struct target_fabric_configfs *lio_target_fabric_configfs; 42struct target_fabric_configfs *lio_target_fabric_configfs;
@@ -674,12 +674,9 @@ static ssize_t lio_target_nacl_show_info(
674 rb += sprintf(page+rb, "InitiatorAlias: %s\n", 674 rb += sprintf(page+rb, "InitiatorAlias: %s\n",
675 sess->sess_ops->InitiatorAlias); 675 sess->sess_ops->InitiatorAlias);
676 676
677 rb += sprintf(page+rb, "LIO Session ID: %u " 677 rb += sprintf(page+rb,
678 "ISID: 0x%02x %02x %02x %02x %02x %02x " 678 "LIO Session ID: %u ISID: 0x%6ph TSIH: %hu ",
679 "TSIH: %hu ", sess->sid, 679 sess->sid, sess->isid, sess->tsih);
680 sess->isid[0], sess->isid[1], sess->isid[2],
681 sess->isid[3], sess->isid[4], sess->isid[5],
682 sess->tsih);
683 rb += sprintf(page+rb, "SessionType: %s\n", 680 rb += sprintf(page+rb, "SessionType: %s\n",
684 (sess->sess_ops->SessionType) ? 681 (sess->sess_ops->SessionType) ?
685 "Discovery" : "Normal"); 682 "Discovery" : "Normal");
@@ -1758,9 +1755,7 @@ static u32 lio_sess_get_initiator_sid(
1758 /* 1755 /*
1759 * iSCSI Initiator Session Identifier from RFC-3720. 1756 * iSCSI Initiator Session Identifier from RFC-3720.
1760 */ 1757 */
1761 return snprintf(buf, size, "%02x%02x%02x%02x%02x%02x", 1758 return snprintf(buf, size, "%6phN", sess->isid);
1762 sess->isid[0], sess->isid[1], sess->isid[2],
1763 sess->isid[3], sess->isid[4], sess->isid[5]);
1764} 1759}
1765 1760
1766static int lio_queue_data_in(struct se_cmd *se_cmd) 1761static int lio_queue_data_in(struct se_cmd *se_cmd)
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
deleted file mode 100644
index cbcff38ac9b7..000000000000
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ /dev/null
@@ -1,883 +0,0 @@
1#ifndef ISCSI_TARGET_CORE_H
2#define ISCSI_TARGET_CORE_H
3
4#include <linux/in.h>
5#include <linux/configfs.h>
6#include <net/sock.h>
7#include <net/tcp.h>
8#include <scsi/scsi_cmnd.h>
9#include <scsi/iscsi_proto.h>
10#include <target/target_core_base.h>
11
12#define ISCSIT_VERSION "v4.1.0"
13#define ISCSI_MAX_DATASN_MISSING_COUNT 16
14#define ISCSI_TX_THREAD_TCP_TIMEOUT 2
15#define ISCSI_RX_THREAD_TCP_TIMEOUT 2
16#define SECONDS_FOR_ASYNC_LOGOUT 10
17#define SECONDS_FOR_ASYNC_TEXT 10
18#define SECONDS_FOR_LOGOUT_COMP 15
19#define WHITE_SPACE " \t\v\f\n\r"
20#define ISCSIT_MIN_TAGS 16
21#define ISCSIT_EXTRA_TAGS 8
22#define ISCSIT_TCP_BACKLOG 256
23
24/* struct iscsi_node_attrib sanity values */
25#define NA_DATAOUT_TIMEOUT 3
26#define NA_DATAOUT_TIMEOUT_MAX 60
27#define NA_DATAOUT_TIMEOUT_MIX 2
28#define NA_DATAOUT_TIMEOUT_RETRIES 5
29#define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
30#define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
31#define NA_NOPIN_TIMEOUT 15
32#define NA_NOPIN_TIMEOUT_MAX 60
33#define NA_NOPIN_TIMEOUT_MIN 3
34#define NA_NOPIN_RESPONSE_TIMEOUT 30
35#define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60
36#define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3
37#define NA_RANDOM_DATAIN_PDU_OFFSETS 0
38#define NA_RANDOM_DATAIN_SEQ_OFFSETS 0
39#define NA_RANDOM_R2T_OFFSETS 0
40
41/* struct iscsi_tpg_attrib sanity values */
42#define TA_AUTHENTICATION 1
43#define TA_LOGIN_TIMEOUT 15
44#define TA_LOGIN_TIMEOUT_MAX 30
45#define TA_LOGIN_TIMEOUT_MIN 5
46#define TA_NETIF_TIMEOUT 2
47#define TA_NETIF_TIMEOUT_MAX 15
48#define TA_NETIF_TIMEOUT_MIN 2
49#define TA_GENERATE_NODE_ACLS 0
50#define TA_DEFAULT_CMDSN_DEPTH 64
51#define TA_DEFAULT_CMDSN_DEPTH_MAX 512
52#define TA_DEFAULT_CMDSN_DEPTH_MIN 1
53#define TA_CACHE_DYNAMIC_ACLS 0
54/* Enabled by default in demo mode (generic_node_acls=1) */
55#define TA_DEMO_MODE_WRITE_PROTECT 1
56/* Disabled by default in production mode w/ explict ACLs */
57#define TA_PROD_MODE_WRITE_PROTECT 0
58#define TA_DEMO_MODE_DISCOVERY 1
59#define TA_DEFAULT_ERL 0
60#define TA_CACHE_CORE_NPS 0
61/* T10 protection information disabled by default */
62#define TA_DEFAULT_T10_PI 0
63
64#define ISCSI_IOV_DATA_BUFFER 5
65
66enum iscsit_transport_type {
67 ISCSI_TCP = 0,
68 ISCSI_SCTP_TCP = 1,
69 ISCSI_SCTP_UDP = 2,
70 ISCSI_IWARP_TCP = 3,
71 ISCSI_IWARP_SCTP = 4,
72 ISCSI_INFINIBAND = 5,
73};
74
75/* RFC-3720 7.1.4 Standard Connection State Diagram for a Target */
76enum target_conn_state_table {
77 TARG_CONN_STATE_FREE = 0x1,
78 TARG_CONN_STATE_XPT_UP = 0x3,
79 TARG_CONN_STATE_IN_LOGIN = 0x4,
80 TARG_CONN_STATE_LOGGED_IN = 0x5,
81 TARG_CONN_STATE_IN_LOGOUT = 0x6,
82 TARG_CONN_STATE_LOGOUT_REQUESTED = 0x7,
83 TARG_CONN_STATE_CLEANUP_WAIT = 0x8,
84};
85
86/* RFC-3720 7.3.2 Session State Diagram for a Target */
87enum target_sess_state_table {
88 TARG_SESS_STATE_FREE = 0x1,
89 TARG_SESS_STATE_ACTIVE = 0x2,
90 TARG_SESS_STATE_LOGGED_IN = 0x3,
91 TARG_SESS_STATE_FAILED = 0x4,
92 TARG_SESS_STATE_IN_CONTINUE = 0x5,
93};
94
95/* struct iscsi_data_count->type */
96enum data_count_type {
97 ISCSI_RX_DATA = 1,
98 ISCSI_TX_DATA = 2,
99};
100
101/* struct iscsi_datain_req->dr_complete */
102enum datain_req_comp_table {
103 DATAIN_COMPLETE_NORMAL = 1,
104 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY = 2,
105 DATAIN_COMPLETE_CONNECTION_RECOVERY = 3,
106};
107
108/* struct iscsi_datain_req->recovery */
109enum datain_req_rec_table {
110 DATAIN_WITHIN_COMMAND_RECOVERY = 1,
111 DATAIN_CONNECTION_RECOVERY = 2,
112};
113
114/* struct iscsi_portal_group->state */
115enum tpg_state_table {
116 TPG_STATE_FREE = 0,
117 TPG_STATE_ACTIVE = 1,
118 TPG_STATE_INACTIVE = 2,
119 TPG_STATE_COLD_RESET = 3,
120};
121
122/* struct iscsi_tiqn->tiqn_state */
123enum tiqn_state_table {
124 TIQN_STATE_ACTIVE = 1,
125 TIQN_STATE_SHUTDOWN = 2,
126};
127
128/* struct iscsi_cmd->cmd_flags */
129enum cmd_flags_table {
130 ICF_GOT_LAST_DATAOUT = 0x00000001,
131 ICF_GOT_DATACK_SNACK = 0x00000002,
132 ICF_NON_IMMEDIATE_UNSOLICITED_DATA = 0x00000004,
133 ICF_SENT_LAST_R2T = 0x00000008,
134 ICF_WITHIN_COMMAND_RECOVERY = 0x00000010,
135 ICF_CONTIG_MEMORY = 0x00000020,
136 ICF_ATTACHED_TO_RQUEUE = 0x00000040,
137 ICF_OOO_CMDSN = 0x00000080,
138 ICF_SENDTARGETS_ALL = 0x00000100,
139 ICF_SENDTARGETS_SINGLE = 0x00000200,
140};
141
142/* struct iscsi_cmd->i_state */
143enum cmd_i_state_table {
144 ISTATE_NO_STATE = 0,
145 ISTATE_NEW_CMD = 1,
146 ISTATE_DEFERRED_CMD = 2,
147 ISTATE_UNSOLICITED_DATA = 3,
148 ISTATE_RECEIVE_DATAOUT = 4,
149 ISTATE_RECEIVE_DATAOUT_RECOVERY = 5,
150 ISTATE_RECEIVED_LAST_DATAOUT = 6,
151 ISTATE_WITHIN_DATAOUT_RECOVERY = 7,
152 ISTATE_IN_CONNECTION_RECOVERY = 8,
153 ISTATE_RECEIVED_TASKMGT = 9,
154 ISTATE_SEND_ASYNCMSG = 10,
155 ISTATE_SENT_ASYNCMSG = 11,
156 ISTATE_SEND_DATAIN = 12,
157 ISTATE_SEND_LAST_DATAIN = 13,
158 ISTATE_SENT_LAST_DATAIN = 14,
159 ISTATE_SEND_LOGOUTRSP = 15,
160 ISTATE_SENT_LOGOUTRSP = 16,
161 ISTATE_SEND_NOPIN = 17,
162 ISTATE_SENT_NOPIN = 18,
163 ISTATE_SEND_REJECT = 19,
164 ISTATE_SENT_REJECT = 20,
165 ISTATE_SEND_R2T = 21,
166 ISTATE_SENT_R2T = 22,
167 ISTATE_SEND_R2T_RECOVERY = 23,
168 ISTATE_SENT_R2T_RECOVERY = 24,
169 ISTATE_SEND_LAST_R2T = 25,
170 ISTATE_SENT_LAST_R2T = 26,
171 ISTATE_SEND_LAST_R2T_RECOVERY = 27,
172 ISTATE_SENT_LAST_R2T_RECOVERY = 28,
173 ISTATE_SEND_STATUS = 29,
174 ISTATE_SEND_STATUS_BROKEN_PC = 30,
175 ISTATE_SENT_STATUS = 31,
176 ISTATE_SEND_STATUS_RECOVERY = 32,
177 ISTATE_SENT_STATUS_RECOVERY = 33,
178 ISTATE_SEND_TASKMGTRSP = 34,
179 ISTATE_SENT_TASKMGTRSP = 35,
180 ISTATE_SEND_TEXTRSP = 36,
181 ISTATE_SENT_TEXTRSP = 37,
182 ISTATE_SEND_NOPIN_WANT_RESPONSE = 38,
183 ISTATE_SENT_NOPIN_WANT_RESPONSE = 39,
184 ISTATE_SEND_NOPIN_NO_RESPONSE = 40,
185 ISTATE_REMOVE = 41,
186 ISTATE_FREE = 42,
187};
188
189/* Used for iscsi_recover_cmdsn() return values */
190enum recover_cmdsn_ret_table {
191 CMDSN_ERROR_CANNOT_RECOVER = -1,
192 CMDSN_NORMAL_OPERATION = 0,
193 CMDSN_LOWER_THAN_EXP = 1,
194 CMDSN_HIGHER_THAN_EXP = 2,
195 CMDSN_MAXCMDSN_OVERRUN = 3,
196};
197
198/* Used for iscsi_handle_immediate_data() return values */
199enum immedate_data_ret_table {
200 IMMEDIATE_DATA_CANNOT_RECOVER = -1,
201 IMMEDIATE_DATA_NORMAL_OPERATION = 0,
202 IMMEDIATE_DATA_ERL1_CRC_FAILURE = 1,
203};
204
205/* Used for iscsi_decide_dataout_action() return values */
206enum dataout_action_ret_table {
207 DATAOUT_CANNOT_RECOVER = -1,
208 DATAOUT_NORMAL = 0,
209 DATAOUT_SEND_R2T = 1,
210 DATAOUT_SEND_TO_TRANSPORT = 2,
211 DATAOUT_WITHIN_COMMAND_RECOVERY = 3,
212};
213
214/* Used for struct iscsi_node_auth->naf_flags */
215enum naf_flags_table {
216 NAF_USERID_SET = 0x01,
217 NAF_PASSWORD_SET = 0x02,
218 NAF_USERID_IN_SET = 0x04,
219 NAF_PASSWORD_IN_SET = 0x08,
220};
221
222/* Used by various struct timer_list to manage iSCSI specific state */
223enum iscsi_timer_flags_table {
224 ISCSI_TF_RUNNING = 0x01,
225 ISCSI_TF_STOP = 0x02,
226 ISCSI_TF_EXPIRED = 0x04,
227};
228
229/* Used for struct iscsi_np->np_flags */
230enum np_flags_table {
231 NPF_IP_NETWORK = 0x00,
232};
233
234/* Used for struct iscsi_np->np_thread_state */
235enum np_thread_state_table {
236 ISCSI_NP_THREAD_ACTIVE = 1,
237 ISCSI_NP_THREAD_INACTIVE = 2,
238 ISCSI_NP_THREAD_RESET = 3,
239 ISCSI_NP_THREAD_SHUTDOWN = 4,
240 ISCSI_NP_THREAD_EXIT = 5,
241};
242
243struct iscsi_conn_ops {
244 u8 HeaderDigest; /* [0,1] == [None,CRC32C] */
245 u8 DataDigest; /* [0,1] == [None,CRC32C] */
246 u32 MaxRecvDataSegmentLength; /* [512..2**24-1] */
247 u32 MaxXmitDataSegmentLength; /* [512..2**24-1] */
248 u8 OFMarker; /* [0,1] == [No,Yes] */
249 u8 IFMarker; /* [0,1] == [No,Yes] */
250 u32 OFMarkInt; /* [1..65535] */
251 u32 IFMarkInt; /* [1..65535] */
252 /*
253 * iSER specific connection parameters
254 */
255 u32 InitiatorRecvDataSegmentLength; /* [512..2**24-1] */
256 u32 TargetRecvDataSegmentLength; /* [512..2**24-1] */
257};
258
259struct iscsi_sess_ops {
260 char InitiatorName[224];
261 char InitiatorAlias[256];
262 char TargetName[224];
263 char TargetAlias[256];
264 char TargetAddress[256];
265 u16 TargetPortalGroupTag; /* [0..65535] */
266 u16 MaxConnections; /* [1..65535] */
267 u8 InitialR2T; /* [0,1] == [No,Yes] */
268 u8 ImmediateData; /* [0,1] == [No,Yes] */
269 u32 MaxBurstLength; /* [512..2**24-1] */
270 u32 FirstBurstLength; /* [512..2**24-1] */
271 u16 DefaultTime2Wait; /* [0..3600] */
272 u16 DefaultTime2Retain; /* [0..3600] */
273 u16 MaxOutstandingR2T; /* [1..65535] */
274 u8 DataPDUInOrder; /* [0,1] == [No,Yes] */
275 u8 DataSequenceInOrder; /* [0,1] == [No,Yes] */
276 u8 ErrorRecoveryLevel; /* [0..2] */
277 u8 SessionType; /* [0,1] == [Normal,Discovery]*/
278 /*
279 * iSER specific session parameters
280 */
281 u8 RDMAExtensions; /* [0,1] == [No,Yes] */
282};
283
284struct iscsi_queue_req {
285 int state;
286 struct iscsi_cmd *cmd;
287 struct list_head qr_list;
288};
289
290struct iscsi_data_count {
291 int data_length;
292 int sync_and_steering;
293 enum data_count_type type;
294 u32 iov_count;
295 u32 ss_iov_count;
296 u32 ss_marker_count;
297 struct kvec *iov;
298};
299
300struct iscsi_param_list {
301 bool iser;
302 struct list_head param_list;
303 struct list_head extra_response_list;
304};
305
306struct iscsi_datain_req {
307 enum datain_req_comp_table dr_complete;
308 int generate_recovery_values;
309 enum datain_req_rec_table recovery;
310 u32 begrun;
311 u32 runlength;
312 u32 data_length;
313 u32 data_offset;
314 u32 data_sn;
315 u32 next_burst_len;
316 u32 read_data_done;
317 u32 seq_send_order;
318 struct list_head cmd_datain_node;
319} ____cacheline_aligned;
320
321struct iscsi_ooo_cmdsn {
322 u16 cid;
323 u32 batch_count;
324 u32 cmdsn;
325 u32 exp_cmdsn;
326 struct iscsi_cmd *cmd;
327 struct list_head ooo_list;
328} ____cacheline_aligned;
329
330struct iscsi_datain {
331 u8 flags;
332 u32 data_sn;
333 u32 length;
334 u32 offset;
335} ____cacheline_aligned;
336
337struct iscsi_r2t {
338 int seq_complete;
339 int recovery_r2t;
340 int sent_r2t;
341 u32 r2t_sn;
342 u32 offset;
343 u32 targ_xfer_tag;
344 u32 xfer_len;
345 struct list_head r2t_list;
346} ____cacheline_aligned;
347
348struct iscsi_cmd {
349 enum iscsi_timer_flags_table dataout_timer_flags;
350 /* DataOUT timeout retries */
351 u8 dataout_timeout_retries;
352 /* Within command recovery count */
353 u8 error_recovery_count;
354 /* iSCSI dependent state for out or order CmdSNs */
355 enum cmd_i_state_table deferred_i_state;
356 /* iSCSI dependent state */
357 enum cmd_i_state_table i_state;
358 /* Command is an immediate command (ISCSI_OP_IMMEDIATE set) */
359 u8 immediate_cmd;
360 /* Immediate data present */
361 u8 immediate_data;
362 /* iSCSI Opcode */
363 u8 iscsi_opcode;
364 /* iSCSI Response Code */
365 u8 iscsi_response;
366 /* Logout reason when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
367 u8 logout_reason;
368 /* Logout response code when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
369 u8 logout_response;
370 /* MaxCmdSN has been incremented */
371 u8 maxcmdsn_inc;
372 /* Immediate Unsolicited Dataout */
373 u8 unsolicited_data;
374 /* Reject reason code */
375 u8 reject_reason;
376 /* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
377 u16 logout_cid;
378 /* Command flags */
379 enum cmd_flags_table cmd_flags;
380 /* Initiator Task Tag assigned from Initiator */
381 itt_t init_task_tag;
382 /* Target Transfer Tag assigned from Target */
383 u32 targ_xfer_tag;
384 /* CmdSN assigned from Initiator */
385 u32 cmd_sn;
386 /* ExpStatSN assigned from Initiator */
387 u32 exp_stat_sn;
388 /* StatSN assigned to this ITT */
389 u32 stat_sn;
390 /* DataSN Counter */
391 u32 data_sn;
392 /* R2TSN Counter */
393 u32 r2t_sn;
394 /* Last DataSN acknowledged via DataAck SNACK */
395 u32 acked_data_sn;
396 /* Used for echoing NOPOUT ping data */
397 u32 buf_ptr_size;
398 /* Used to store DataDigest */
399 u32 data_crc;
400 /* Counter for MaxOutstandingR2T */
401 u32 outstanding_r2ts;
402 /* Next R2T Offset when DataSequenceInOrder=Yes */
403 u32 r2t_offset;
404 /* Iovec current and orig count for iscsi_cmd->iov_data */
405 u32 iov_data_count;
406 u32 orig_iov_data_count;
407 /* Number of miscellaneous iovecs used for IP stack calls */
408 u32 iov_misc_count;
409 /* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */
410 u32 pdu_count;
411 /* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */
412 u32 pdu_send_order;
413 /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
414 u32 pdu_start;
415 /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
416 u32 seq_send_order;
417 /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
418 u32 seq_count;
419 /* Current struct iscsi_seq in struct iscsi_cmd->seq_list */
420 u32 seq_no;
421 /* Lowest offset in current DataOUT sequence */
422 u32 seq_start_offset;
423 /* Highest offset in current DataOUT sequence */
424 u32 seq_end_offset;
425 /* Total size in bytes received so far of READ data */
426 u32 read_data_done;
427 /* Total size in bytes received so far of WRITE data */
428 u32 write_data_done;
429 /* Counter for FirstBurstLength key */
430 u32 first_burst_len;
431 /* Counter for MaxBurstLength key */
432 u32 next_burst_len;
433 /* Transfer size used for IP stack calls */
434 u32 tx_size;
435 /* Buffer used for various purposes */
436 void *buf_ptr;
437 /* Used by SendTargets=[iqn.,eui.] discovery */
438 void *text_in_ptr;
439 /* See include/linux/dma-mapping.h */
440 enum dma_data_direction data_direction;
441 /* iSCSI PDU Header + CRC */
442 unsigned char pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN];
443 /* Number of times struct iscsi_cmd is present in immediate queue */
444 atomic_t immed_queue_count;
445 atomic_t response_queue_count;
446 spinlock_t datain_lock;
447 spinlock_t dataout_timeout_lock;
448 /* spinlock for protecting struct iscsi_cmd->i_state */
449 spinlock_t istate_lock;
450 /* spinlock for adding within command recovery entries */
451 spinlock_t error_lock;
452 /* spinlock for adding R2Ts */
453 spinlock_t r2t_lock;
454 /* DataIN List */
455 struct list_head datain_list;
456 /* R2T List */
457 struct list_head cmd_r2t_list;
458 /* Timer for DataOUT */
459 struct timer_list dataout_timer;
460 /* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
461 struct kvec *iov_data;
462 /* Iovecs for miscellaneous purposes */
463#define ISCSI_MISC_IOVECS 5
464 struct kvec iov_misc[ISCSI_MISC_IOVECS];
465 /* Array of struct iscsi_pdu used for DataPDUInOrder=No */
466 struct iscsi_pdu *pdu_list;
467 /* Current struct iscsi_pdu used for DataPDUInOrder=No */
468 struct iscsi_pdu *pdu_ptr;
469 /* Array of struct iscsi_seq used for DataSequenceInOrder=No */
470 struct iscsi_seq *seq_list;
471 /* Current struct iscsi_seq used for DataSequenceInOrder=No */
472 struct iscsi_seq *seq_ptr;
473 /* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */
474 struct iscsi_tmr_req *tmr_req;
475 /* Connection this command is alligient to */
476 struct iscsi_conn *conn;
477 /* Pointer to connection recovery entry */
478 struct iscsi_conn_recovery *cr;
479 /* Session the command is part of, used for connection recovery */
480 struct iscsi_session *sess;
481 /* list_head for connection list */
482 struct list_head i_conn_node;
483 /* The TCM I/O descriptor that is accessed via container_of() */
484 struct se_cmd se_cmd;
485 /* Sense buffer that will be mapped into outgoing status */
486#define ISCSI_SENSE_BUFFER_LEN (TRANSPORT_SENSE_BUFFER + 2)
487 unsigned char sense_buffer[ISCSI_SENSE_BUFFER_LEN];
488
489 u32 padding;
490 u8 pad_bytes[4];
491
492 struct scatterlist *first_data_sg;
493 u32 first_data_sg_off;
494 u32 kmapped_nents;
495 sense_reason_t sense_reason;
496} ____cacheline_aligned;
497
498struct iscsi_tmr_req {
499 bool task_reassign:1;
500 u32 exp_data_sn;
501 struct iscsi_cmd *ref_cmd;
502 struct iscsi_conn_recovery *conn_recovery;
503 struct se_tmr_req *se_tmr_req;
504};
505
506struct iscsi_conn {
507 wait_queue_head_t queues_wq;
508 /* Authentication Successful for this connection */
509 u8 auth_complete;
510 /* State connection is currently in */
511 u8 conn_state;
512 u8 conn_logout_reason;
513 u8 network_transport;
514 enum iscsi_timer_flags_table nopin_timer_flags;
515 enum iscsi_timer_flags_table nopin_response_timer_flags;
516 /* Used to know what thread encountered a transport failure */
517 u8 which_thread;
518 /* connection id assigned by the Initiator */
519 u16 cid;
520 /* Remote TCP Port */
521 u16 login_port;
522 u16 local_port;
523 int net_size;
524 int login_family;
525 u32 auth_id;
526 u32 conn_flags;
527 /* Used for iscsi_tx_login_rsp() */
528 itt_t login_itt;
529 u32 exp_statsn;
530 /* Per connection status sequence number */
531 u32 stat_sn;
532 /* IFMarkInt's Current Value */
533 u32 if_marker;
534 /* OFMarkInt's Current Value */
535 u32 of_marker;
536 /* Used for calculating OFMarker offset to next PDU */
537 u32 of_marker_offset;
538#define IPV6_ADDRESS_SPACE 48
539 unsigned char login_ip[IPV6_ADDRESS_SPACE];
540 unsigned char local_ip[IPV6_ADDRESS_SPACE];
541 int conn_usage_count;
542 int conn_waiting_on_uc;
543 atomic_t check_immediate_queue;
544 atomic_t conn_logout_remove;
545 atomic_t connection_exit;
546 atomic_t connection_recovery;
547 atomic_t connection_reinstatement;
548 atomic_t connection_wait_rcfr;
549 atomic_t sleep_on_conn_wait_comp;
550 atomic_t transport_failed;
551 struct completion conn_post_wait_comp;
552 struct completion conn_wait_comp;
553 struct completion conn_wait_rcfr_comp;
554 struct completion conn_waiting_on_uc_comp;
555 struct completion conn_logout_comp;
556 struct completion tx_half_close_comp;
557 struct completion rx_half_close_comp;
558 /* socket used by this connection */
559 struct socket *sock;
560 void (*orig_data_ready)(struct sock *);
561 void (*orig_state_change)(struct sock *);
562#define LOGIN_FLAGS_READ_ACTIVE 1
563#define LOGIN_FLAGS_CLOSED 2
564#define LOGIN_FLAGS_READY 4
565 unsigned long login_flags;
566 struct delayed_work login_work;
567 struct delayed_work login_cleanup_work;
568 struct iscsi_login *login;
569 struct timer_list nopin_timer;
570 struct timer_list nopin_response_timer;
571 struct timer_list transport_timer;
572 struct task_struct *login_kworker;
573 /* Spinlock used for add/deleting cmd's from conn_cmd_list */
574 spinlock_t cmd_lock;
575 spinlock_t conn_usage_lock;
576 spinlock_t immed_queue_lock;
577 spinlock_t nopin_timer_lock;
578 spinlock_t response_queue_lock;
579 spinlock_t state_lock;
580 /* libcrypto RX and TX contexts for crc32c */
581 struct hash_desc conn_rx_hash;
582 struct hash_desc conn_tx_hash;
583 /* Used for scheduling TX and RX connection kthreads */
584 cpumask_var_t conn_cpumask;
585 unsigned int conn_rx_reset_cpumask:1;
586 unsigned int conn_tx_reset_cpumask:1;
587 /* list_head of struct iscsi_cmd for this connection */
588 struct list_head conn_cmd_list;
589 struct list_head immed_queue_list;
590 struct list_head response_queue_list;
591 struct iscsi_conn_ops *conn_ops;
592 struct iscsi_login *conn_login;
593 struct iscsit_transport *conn_transport;
594 struct iscsi_param_list *param_list;
595 /* Used for per connection auth state machine */
596 void *auth_protocol;
597 void *context;
598 struct iscsi_login_thread_s *login_thread;
599 struct iscsi_portal_group *tpg;
600 struct iscsi_tpg_np *tpg_np;
601 /* Pointer to parent session */
602 struct iscsi_session *sess;
603 /* Pointer to thread_set in use for this conn's threads */
604 struct iscsi_thread_set *thread_set;
605 /* list_head for session connection list */
606 struct list_head conn_list;
607} ____cacheline_aligned;
608
609struct iscsi_conn_recovery {
610 u16 cid;
611 u32 cmd_count;
612 u32 maxrecvdatasegmentlength;
613 u32 maxxmitdatasegmentlength;
614 int ready_for_reallegiance;
615 struct list_head conn_recovery_cmd_list;
616 spinlock_t conn_recovery_cmd_lock;
617 struct timer_list time2retain_timer;
618 struct iscsi_session *sess;
619 struct list_head cr_list;
620} ____cacheline_aligned;
621
622struct iscsi_session {
623 u8 initiator_vendor;
624 u8 isid[6];
625 enum iscsi_timer_flags_table time2retain_timer_flags;
626 u8 version_active;
627 u16 cid_called;
628 u16 conn_recovery_count;
629 u16 tsih;
630 /* state session is currently in */
631 u32 session_state;
632 /* session wide counter: initiator assigned task tag */
633 itt_t init_task_tag;
634 /* session wide counter: target assigned task tag */
635 u32 targ_xfer_tag;
636 u32 cmdsn_window;
637
638 /* protects cmdsn values */
639 struct mutex cmdsn_mutex;
640 /* session wide counter: expected command sequence number */
641 u32 exp_cmd_sn;
642 /* session wide counter: maximum allowed command sequence number */
643 u32 max_cmd_sn;
644 struct list_head sess_ooo_cmdsn_list;
645
646 /* LIO specific session ID */
647 u32 sid;
648 char auth_type[8];
649 /* unique within the target */
650 int session_index;
651 /* Used for session reference counting */
652 int session_usage_count;
653 int session_waiting_on_uc;
654 atomic_long_t cmd_pdus;
655 atomic_long_t rsp_pdus;
656 atomic_long_t tx_data_octets;
657 atomic_long_t rx_data_octets;
658 atomic_long_t conn_digest_errors;
659 atomic_long_t conn_timeout_errors;
660 u64 creation_time;
661 /* Number of active connections */
662 atomic_t nconn;
663 atomic_t session_continuation;
664 atomic_t session_fall_back_to_erl0;
665 atomic_t session_logout;
666 atomic_t session_reinstatement;
667 atomic_t session_stop_active;
668 atomic_t sleep_on_sess_wait_comp;
669 /* connection list */
670 struct list_head sess_conn_list;
671 struct list_head cr_active_list;
672 struct list_head cr_inactive_list;
673 spinlock_t conn_lock;
674 spinlock_t cr_a_lock;
675 spinlock_t cr_i_lock;
676 spinlock_t session_usage_lock;
677 spinlock_t ttt_lock;
678 struct completion async_msg_comp;
679 struct completion reinstatement_comp;
680 struct completion session_wait_comp;
681 struct completion session_waiting_on_uc_comp;
682 struct timer_list time2retain_timer;
683 struct iscsi_sess_ops *sess_ops;
684 struct se_session *se_sess;
685 struct iscsi_portal_group *tpg;
686} ____cacheline_aligned;
687
688struct iscsi_login {
689 u8 auth_complete;
690 u8 checked_for_existing;
691 u8 current_stage;
692 u8 leading_connection;
693 u8 first_request;
694 u8 version_min;
695 u8 version_max;
696 u8 login_complete;
697 u8 login_failed;
698 bool zero_tsih;
699 char isid[6];
700 u32 cmd_sn;
701 itt_t init_task_tag;
702 u32 initial_exp_statsn;
703 u32 rsp_length;
704 u16 cid;
705 u16 tsih;
706 char req[ISCSI_HDR_LEN];
707 char rsp[ISCSI_HDR_LEN];
708 char *req_buf;
709 char *rsp_buf;
710 struct iscsi_conn *conn;
711 struct iscsi_np *np;
712} ____cacheline_aligned;
713
714struct iscsi_node_attrib {
715 u32 dataout_timeout;
716 u32 dataout_timeout_retries;
717 u32 default_erl;
718 u32 nopin_timeout;
719 u32 nopin_response_timeout;
720 u32 random_datain_pdu_offsets;
721 u32 random_datain_seq_offsets;
722 u32 random_r2t_offsets;
723 u32 tmr_cold_reset;
724 u32 tmr_warm_reset;
725 struct iscsi_node_acl *nacl;
726};
727
728struct se_dev_entry_s;
729
730struct iscsi_node_auth {
731 enum naf_flags_table naf_flags;
732 int authenticate_target;
733 /* Used for iscsit_global->discovery_auth,
734 * set to zero (auth disabled) by default */
735 int enforce_discovery_auth;
736#define MAX_USER_LEN 256
737#define MAX_PASS_LEN 256
738 char userid[MAX_USER_LEN];
739 char password[MAX_PASS_LEN];
740 char userid_mutual[MAX_USER_LEN];
741 char password_mutual[MAX_PASS_LEN];
742};
743
744#include "iscsi_target_stat.h"
745
746struct iscsi_node_stat_grps {
747 struct config_group iscsi_sess_stats_group;
748 struct config_group iscsi_conn_stats_group;
749};
750
751struct iscsi_node_acl {
752 struct iscsi_node_attrib node_attrib;
753 struct iscsi_node_auth node_auth;
754 struct iscsi_node_stat_grps node_stat_grps;
755 struct se_node_acl se_node_acl;
756};
757
758struct iscsi_tpg_attrib {
759 u32 authentication;
760 u32 login_timeout;
761 u32 netif_timeout;
762 u32 generate_node_acls;
763 u32 cache_dynamic_acls;
764 u32 default_cmdsn_depth;
765 u32 demo_mode_write_protect;
766 u32 prod_mode_write_protect;
767 u32 demo_mode_discovery;
768 u32 default_erl;
769 u8 t10_pi;
770 struct iscsi_portal_group *tpg;
771};
772
773struct iscsi_np {
774 int np_network_transport;
775 int np_ip_proto;
776 int np_sock_type;
777 enum np_thread_state_table np_thread_state;
778 bool enabled;
779 enum iscsi_timer_flags_table np_login_timer_flags;
780 u32 np_exports;
781 enum np_flags_table np_flags;
782 unsigned char np_ip[IPV6_ADDRESS_SPACE];
783 u16 np_port;
784 spinlock_t np_thread_lock;
785 struct completion np_restart_comp;
786 struct socket *np_socket;
787 struct __kernel_sockaddr_storage np_sockaddr;
788 struct task_struct *np_thread;
789 struct timer_list np_login_timer;
790 void *np_context;
791 struct iscsit_transport *np_transport;
792 struct list_head np_list;
793} ____cacheline_aligned;
794
795struct iscsi_tpg_np {
796 struct iscsi_np *tpg_np;
797 struct iscsi_portal_group *tpg;
798 struct iscsi_tpg_np *tpg_np_parent;
799 struct list_head tpg_np_list;
800 struct list_head tpg_np_child_list;
801 struct list_head tpg_np_parent_list;
802 struct se_tpg_np se_tpg_np;
803 spinlock_t tpg_np_parent_lock;
804 struct completion tpg_np_comp;
805 struct kref tpg_np_kref;
806};
807
808struct iscsi_portal_group {
809 unsigned char tpg_chap_id;
810 /* TPG State */
811 enum tpg_state_table tpg_state;
812 /* Target Portal Group Tag */
813 u16 tpgt;
814 /* Id assigned to target sessions */
815 u16 ntsih;
816 /* Number of active sessions */
817 u32 nsessions;
818 /* Number of Network Portals available for this TPG */
819 u32 num_tpg_nps;
820 /* Per TPG LIO specific session ID. */
821 u32 sid;
822 /* Spinlock for adding/removing Network Portals */
823 spinlock_t tpg_np_lock;
824 spinlock_t tpg_state_lock;
825 struct se_portal_group tpg_se_tpg;
826 struct mutex tpg_access_lock;
827 struct semaphore np_login_sem;
828 struct iscsi_tpg_attrib tpg_attrib;
829 struct iscsi_node_auth tpg_demo_auth;
830 /* Pointer to default list of iSCSI parameters for TPG */
831 struct iscsi_param_list *param_list;
832 struct iscsi_tiqn *tpg_tiqn;
833 struct list_head tpg_gnp_list;
834 struct list_head tpg_list;
835} ____cacheline_aligned;
836
837struct iscsi_wwn_stat_grps {
838 struct config_group iscsi_stat_group;
839 struct config_group iscsi_instance_group;
840 struct config_group iscsi_sess_err_group;
841 struct config_group iscsi_tgt_attr_group;
842 struct config_group iscsi_login_stats_group;
843 struct config_group iscsi_logout_stats_group;
844};
845
846struct iscsi_tiqn {
847#define ISCSI_IQN_LEN 224
848 unsigned char tiqn[ISCSI_IQN_LEN];
849 enum tiqn_state_table tiqn_state;
850 int tiqn_access_count;
851 u32 tiqn_active_tpgs;
852 u32 tiqn_ntpgs;
853 u32 tiqn_num_tpg_nps;
854 u32 tiqn_nsessions;
855 struct list_head tiqn_list;
856 struct list_head tiqn_tpg_list;
857 spinlock_t tiqn_state_lock;
858 spinlock_t tiqn_tpg_lock;
859 struct se_wwn tiqn_wwn;
860 struct iscsi_wwn_stat_grps tiqn_stat_grps;
861 int tiqn_index;
862 struct iscsi_sess_err_stats sess_err_stats;
863 struct iscsi_login_stats login_stats;
864 struct iscsi_logout_stats logout_stats;
865} ____cacheline_aligned;
866
867struct iscsit_global {
868 /* In core shutdown */
869 u32 in_shutdown;
870 u32 active_ts;
871 /* Unique identifier used for the authentication daemon */
872 u32 auth_id;
873 u32 inactive_ts;
874 /* Thread Set bitmap count */
875 int ts_bitmap_count;
876 /* Thread Set bitmap pointer */
877 unsigned long *ts_bitmap;
878 /* Used for iSCSI discovery session authentication */
879 struct iscsi_node_acl discovery_acl;
880 struct iscsi_portal_group *discovery_tpg;
881};
882
883#endif /* ISCSI_TARGET_CORE_H */
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
index e93d5a7a3f81..fb3b52b124ac 100644
--- a/drivers/target/iscsi/iscsi_target_datain_values.c
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -18,7 +18,7 @@
18 18
19#include <scsi/iscsi_proto.h> 19#include <scsi/iscsi_proto.h>
20 20
21#include "iscsi_target_core.h" 21#include <target/iscsi/iscsi_target_core.h>
22#include "iscsi_target_seq_pdu_list.h" 22#include "iscsi_target_seq_pdu_list.h"
23#include "iscsi_target_erl1.h" 23#include "iscsi_target_erl1.h"
24#include "iscsi_target_util.h" 24#include "iscsi_target_util.h"
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index 7087c736daa5..34c3cd1b05ce 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -21,7 +21,7 @@
21#include <target/target_core_base.h> 21#include <target/target_core_base.h>
22#include <target/target_core_fabric.h> 22#include <target/target_core_fabric.h>
23 23
24#include "iscsi_target_core.h" 24#include <target/iscsi/iscsi_target_core.h>
25#include "iscsi_target_device.h" 25#include "iscsi_target_device.h"
26#include "iscsi_target_tpg.h" 26#include "iscsi_target_tpg.h"
27#include "iscsi_target_util.h" 27#include "iscsi_target_util.h"
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index a0ae5fc0ad75..1c197bad6132 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -21,7 +21,8 @@
21#include <target/target_core_base.h> 21#include <target/target_core_base.h>
22#include <target/target_core_fabric.h> 22#include <target/target_core_fabric.h>
23 23
24#include "iscsi_target_core.h" 24#include <target/iscsi/iscsi_target_core.h>
25#include <target/iscsi/iscsi_transport.h>
25#include "iscsi_target_seq_pdu_list.h" 26#include "iscsi_target_seq_pdu_list.h"
26#include "iscsi_target_tq.h" 27#include "iscsi_target_tq.h"
27#include "iscsi_target_erl0.h" 28#include "iscsi_target_erl0.h"
@@ -939,7 +940,8 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
939 940
940 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { 941 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
941 spin_unlock_bh(&conn->state_lock); 942 spin_unlock_bh(&conn->state_lock);
942 iscsit_close_connection(conn); 943 if (conn->conn_transport->transport_type == ISCSI_TCP)
944 iscsit_close_connection(conn);
943 return; 945 return;
944 } 946 }
945 947
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index cda4d80cfaef..2e561deb30a2 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -22,7 +22,7 @@
22#include <target/target_core_fabric.h> 22#include <target/target_core_fabric.h>
23#include <target/iscsi/iscsi_transport.h> 23#include <target/iscsi/iscsi_transport.h>
24 24
25#include "iscsi_target_core.h" 25#include <target/iscsi/iscsi_target_core.h>
26#include "iscsi_target_seq_pdu_list.h" 26#include "iscsi_target_seq_pdu_list.h"
27#include "iscsi_target_datain_values.h" 27#include "iscsi_target_datain_values.h"
28#include "iscsi_target_device.h" 28#include "iscsi_target_device.h"
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 4ca8fd2a70db..e24f1c7c5862 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -21,7 +21,7 @@
21#include <target/target_core_base.h> 21#include <target/target_core_base.h>
22#include <target/target_core_fabric.h> 22#include <target/target_core_fabric.h>
23 23
24#include "iscsi_target_core.h" 24#include <target/iscsi/iscsi_target_core.h>
25#include "iscsi_target_datain_values.h" 25#include "iscsi_target_datain_values.h"
26#include "iscsi_target_util.h" 26#include "iscsi_target_util.h"
27#include "iscsi_target_erl0.h" 27#include "iscsi_target_erl0.h"
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 713c0c1877ab..153fb66ac1b8 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -24,14 +24,14 @@
24#include <target/target_core_base.h> 24#include <target/target_core_base.h>
25#include <target/target_core_fabric.h> 25#include <target/target_core_fabric.h>
26 26
27#include "iscsi_target_core.h" 27#include <target/iscsi/iscsi_target_core.h>
28#include <target/iscsi/iscsi_target_stat.h>
28#include "iscsi_target_tq.h" 29#include "iscsi_target_tq.h"
29#include "iscsi_target_device.h" 30#include "iscsi_target_device.h"
30#include "iscsi_target_nego.h" 31#include "iscsi_target_nego.h"
31#include "iscsi_target_erl0.h" 32#include "iscsi_target_erl0.h"
32#include "iscsi_target_erl2.h" 33#include "iscsi_target_erl2.h"
33#include "iscsi_target_login.h" 34#include "iscsi_target_login.h"
34#include "iscsi_target_stat.h"
35#include "iscsi_target_tpg.h" 35#include "iscsi_target_tpg.h"
36#include "iscsi_target_util.h" 36#include "iscsi_target_util.h"
37#include "iscsi_target.h" 37#include "iscsi_target.h"
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 62a095f36bf2..8c02fa34716f 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -22,7 +22,7 @@
22#include <target/target_core_fabric.h> 22#include <target/target_core_fabric.h>
23#include <target/iscsi/iscsi_transport.h> 23#include <target/iscsi/iscsi_transport.h>
24 24
25#include "iscsi_target_core.h" 25#include <target/iscsi/iscsi_target_core.h>
26#include "iscsi_target_parameters.h" 26#include "iscsi_target_parameters.h"
27#include "iscsi_target_login.h" 27#include "iscsi_target_login.h"
28#include "iscsi_target_nego.h" 28#include "iscsi_target_nego.h"
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
index 16454a922e2b..208cca8a363c 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.c
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -18,7 +18,7 @@
18 18
19#include <target/target_core_base.h> 19#include <target/target_core_base.h>
20 20
21#include "iscsi_target_core.h" 21#include <target/iscsi/iscsi_target_core.h>
22#include "iscsi_target_device.h" 22#include "iscsi_target_device.h"
23#include "iscsi_target_tpg.h" 23#include "iscsi_target_tpg.h"
24#include "iscsi_target_util.h" 24#include "iscsi_target_util.h"
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 18c29260b4a2..d4f9e9645697 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -18,7 +18,7 @@
18 18
19#include <linux/slab.h> 19#include <linux/slab.h>
20 20
21#include "iscsi_target_core.h" 21#include <target/iscsi/iscsi_target_core.h>
22#include "iscsi_target_util.h" 22#include "iscsi_target_util.h"
23#include "iscsi_target_parameters.h" 23#include "iscsi_target_parameters.h"
24 24
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
index ca41b583f2f6..e446a09c886b 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -20,7 +20,7 @@
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/random.h> 21#include <linux/random.h>
22 22
23#include "iscsi_target_core.h" 23#include <target/iscsi/iscsi_target_core.h>
24#include "iscsi_target_util.h" 24#include "iscsi_target_util.h"
25#include "iscsi_target_tpg.h" 25#include "iscsi_target_tpg.h"
26#include "iscsi_target_seq_pdu_list.h" 26#include "iscsi_target_seq_pdu_list.h"
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index 103395510307..5e1349a3b143 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -23,12 +23,12 @@
23#include <target/target_core_base.h> 23#include <target/target_core_base.h>
24#include <target/configfs_macros.h> 24#include <target/configfs_macros.h>
25 25
26#include "iscsi_target_core.h" 26#include <target/iscsi/iscsi_target_core.h>
27#include "iscsi_target_parameters.h" 27#include "iscsi_target_parameters.h"
28#include "iscsi_target_device.h" 28#include "iscsi_target_device.h"
29#include "iscsi_target_tpg.h" 29#include "iscsi_target_tpg.h"
30#include "iscsi_target_util.h" 30#include "iscsi_target_util.h"
31#include "iscsi_target_stat.h" 31#include <target/iscsi/iscsi_target_stat.h>
32 32
33#ifndef INITIAL_JIFFIES 33#ifndef INITIAL_JIFFIES
34#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) 34#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
diff --git a/drivers/target/iscsi/iscsi_target_stat.h b/drivers/target/iscsi/iscsi_target_stat.h
deleted file mode 100644
index 3ff76b4faad3..000000000000
--- a/drivers/target/iscsi/iscsi_target_stat.h
+++ /dev/null
@@ -1,64 +0,0 @@
1#ifndef ISCSI_TARGET_STAT_H
2#define ISCSI_TARGET_STAT_H
3
4/*
5 * For struct iscsi_tiqn->tiqn_wwn default groups
6 */
7extern struct config_item_type iscsi_stat_instance_cit;
8extern struct config_item_type iscsi_stat_sess_err_cit;
9extern struct config_item_type iscsi_stat_tgt_attr_cit;
10extern struct config_item_type iscsi_stat_login_cit;
11extern struct config_item_type iscsi_stat_logout_cit;
12
13/*
14 * For struct iscsi_session->se_sess default groups
15 */
16extern struct config_item_type iscsi_stat_sess_cit;
17
18/* iSCSI session error types */
19#define ISCSI_SESS_ERR_UNKNOWN 0
20#define ISCSI_SESS_ERR_DIGEST 1
21#define ISCSI_SESS_ERR_CXN_TIMEOUT 2
22#define ISCSI_SESS_ERR_PDU_FORMAT 3
23
24/* iSCSI session error stats */
25struct iscsi_sess_err_stats {
26 spinlock_t lock;
27 u32 digest_errors;
28 u32 cxn_timeout_errors;
29 u32 pdu_format_errors;
30 u32 last_sess_failure_type;
31 char last_sess_fail_rem_name[224];
32} ____cacheline_aligned;
33
34/* iSCSI login failure types (sub oids) */
35#define ISCSI_LOGIN_FAIL_OTHER 2
36#define ISCSI_LOGIN_FAIL_REDIRECT 3
37#define ISCSI_LOGIN_FAIL_AUTHORIZE 4
38#define ISCSI_LOGIN_FAIL_AUTHENTICATE 5
39#define ISCSI_LOGIN_FAIL_NEGOTIATE 6
40
41/* iSCSI login stats */
42struct iscsi_login_stats {
43 spinlock_t lock;
44 u32 accepts;
45 u32 other_fails;
46 u32 redirects;
47 u32 authorize_fails;
48 u32 authenticate_fails;
49 u32 negotiate_fails; /* used for notifications */
50 u64 last_fail_time; /* time stamp (jiffies) */
51 u32 last_fail_type;
52 int last_intr_fail_ip_family;
53 unsigned char last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE];
54 char last_intr_fail_name[224];
55} ____cacheline_aligned;
56
57/* iSCSI logout stats */
58struct iscsi_logout_stats {
59 spinlock_t lock;
60 u32 normal_logouts;
61 u32 abnormal_logouts;
62} ____cacheline_aligned;
63
64#endif /*** ISCSI_TARGET_STAT_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index 78404b1cc0bf..b0224a77e26d 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -23,7 +23,7 @@
23#include <target/target_core_fabric.h> 23#include <target/target_core_fabric.h>
24#include <target/iscsi/iscsi_transport.h> 24#include <target/iscsi/iscsi_transport.h>
25 25
26#include "iscsi_target_core.h" 26#include <target/iscsi/iscsi_target_core.h>
27#include "iscsi_target_seq_pdu_list.h" 27#include "iscsi_target_seq_pdu_list.h"
28#include "iscsi_target_datain_values.h" 28#include "iscsi_target_datain_values.h"
29#include "iscsi_target_device.h" 29#include "iscsi_target_device.h"
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 9053a3c0c6e5..bdd127c0e3ae 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -20,7 +20,7 @@
20#include <target/target_core_fabric.h> 20#include <target/target_core_fabric.h>
21#include <target/target_core_configfs.h> 21#include <target/target_core_configfs.h>
22 22
23#include "iscsi_target_core.h" 23#include <target/iscsi/iscsi_target_core.h>
24#include "iscsi_target_erl0.h" 24#include "iscsi_target_erl0.h"
25#include "iscsi_target_login.h" 25#include "iscsi_target_login.h"
26#include "iscsi_target_nodeattrib.h" 26#include "iscsi_target_nodeattrib.h"
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
index 601e9cc61e98..26aa50996473 100644
--- a/drivers/target/iscsi/iscsi_target_tq.c
+++ b/drivers/target/iscsi/iscsi_target_tq.c
@@ -20,40 +20,26 @@
20#include <linux/list.h> 20#include <linux/list.h>
21#include <linux/bitmap.h> 21#include <linux/bitmap.h>
22 22
23#include "iscsi_target_core.h" 23#include <target/iscsi/iscsi_target_core.h>
24#include "iscsi_target_tq.h" 24#include "iscsi_target_tq.h"
25#include "iscsi_target.h" 25#include "iscsi_target.h"
26 26
27static LIST_HEAD(active_ts_list);
28static LIST_HEAD(inactive_ts_list); 27static LIST_HEAD(inactive_ts_list);
29static DEFINE_SPINLOCK(active_ts_lock);
30static DEFINE_SPINLOCK(inactive_ts_lock); 28static DEFINE_SPINLOCK(inactive_ts_lock);
31static DEFINE_SPINLOCK(ts_bitmap_lock); 29static DEFINE_SPINLOCK(ts_bitmap_lock);
32 30
33static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts)
34{
35 spin_lock(&active_ts_lock);
36 list_add_tail(&ts->ts_list, &active_ts_list);
37 iscsit_global->active_ts++;
38 spin_unlock(&active_ts_lock);
39}
40
41static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts) 31static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
42{ 32{
33 if (!list_empty(&ts->ts_list)) {
34 WARN_ON(1);
35 return;
36 }
43 spin_lock(&inactive_ts_lock); 37 spin_lock(&inactive_ts_lock);
44 list_add_tail(&ts->ts_list, &inactive_ts_list); 38 list_add_tail(&ts->ts_list, &inactive_ts_list);
45 iscsit_global->inactive_ts++; 39 iscsit_global->inactive_ts++;
46 spin_unlock(&inactive_ts_lock); 40 spin_unlock(&inactive_ts_lock);
47} 41}
48 42
49static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts)
50{
51 spin_lock(&active_ts_lock);
52 list_del(&ts->ts_list);
53 iscsit_global->active_ts--;
54 spin_unlock(&active_ts_lock);
55}
56
57static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void) 43static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
58{ 44{
59 struct iscsi_thread_set *ts; 45 struct iscsi_thread_set *ts;
@@ -66,7 +52,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
66 52
67 ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list); 53 ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
68 54
69 list_del(&ts->ts_list); 55 list_del_init(&ts->ts_list);
70 iscsit_global->inactive_ts--; 56 iscsit_global->inactive_ts--;
71 spin_unlock(&inactive_ts_lock); 57 spin_unlock(&inactive_ts_lock);
72 58
@@ -204,8 +190,6 @@ static void iscsi_deallocate_extra_thread_sets(void)
204 190
205void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts) 191void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
206{ 192{
207 iscsi_add_ts_to_active_list(ts);
208
209 spin_lock_bh(&ts->ts_state_lock); 193 spin_lock_bh(&ts->ts_state_lock);
210 conn->thread_set = ts; 194 conn->thread_set = ts;
211 ts->conn = conn; 195 ts->conn = conn;
@@ -397,7 +381,6 @@ struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
397 381
398 if (ts->delay_inactive && (--ts->thread_count == 0)) { 382 if (ts->delay_inactive && (--ts->thread_count == 0)) {
399 spin_unlock_bh(&ts->ts_state_lock); 383 spin_unlock_bh(&ts->ts_state_lock);
400 iscsi_del_ts_from_active_list(ts);
401 384
402 if (!iscsit_global->in_shutdown) 385 if (!iscsit_global->in_shutdown)
403 iscsi_deallocate_extra_thread_sets(); 386 iscsi_deallocate_extra_thread_sets();
@@ -452,7 +435,6 @@ struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
452 435
453 if (ts->delay_inactive && (--ts->thread_count == 0)) { 436 if (ts->delay_inactive && (--ts->thread_count == 0)) {
454 spin_unlock_bh(&ts->ts_state_lock); 437 spin_unlock_bh(&ts->ts_state_lock);
455 iscsi_del_ts_from_active_list(ts);
456 438
457 if (!iscsit_global->in_shutdown) 439 if (!iscsit_global->in_shutdown)
458 iscsi_deallocate_extra_thread_sets(); 440 iscsi_deallocate_extra_thread_sets();
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index bcd88ec99793..390df8ed72b2 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -25,7 +25,7 @@
25#include <target/target_core_configfs.h> 25#include <target/target_core_configfs.h>
26#include <target/iscsi/iscsi_transport.h> 26#include <target/iscsi/iscsi_transport.h>
27 27
28#include "iscsi_target_core.h" 28#include <target/iscsi/iscsi_target_core.h>
29#include "iscsi_target_parameters.h" 29#include "iscsi_target_parameters.h"
30#include "iscsi_target_seq_pdu_list.h" 30#include "iscsi_target_seq_pdu_list.h"
31#include "iscsi_target_datain_values.h" 31#include "iscsi_target_datain_values.h"
@@ -390,6 +390,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt(
390 init_task_tag, conn->cid); 390 init_task_tag, conn->cid);
391 return NULL; 391 return NULL;
392} 392}
393EXPORT_SYMBOL(iscsit_find_cmd_from_itt);
393 394
394struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump( 395struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
395 struct iscsi_conn *conn, 396 struct iscsi_conn *conn,
@@ -939,13 +940,8 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
939 state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE : 940 state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
940 ISTATE_SEND_NOPIN_NO_RESPONSE; 941 ISTATE_SEND_NOPIN_NO_RESPONSE;
941 cmd->init_task_tag = RESERVED_ITT; 942 cmd->init_task_tag = RESERVED_ITT;
942 spin_lock_bh(&conn->sess->ttt_lock); 943 cmd->targ_xfer_tag = (want_response) ?
943 cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ : 944 session_get_next_ttt(conn->sess) : 0xFFFFFFFF;
944 0xFFFFFFFF;
945 if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF))
946 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
947 spin_unlock_bh(&conn->sess->ttt_lock);
948
949 spin_lock_bh(&conn->cmd_lock); 945 spin_lock_bh(&conn->cmd_lock);
950 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 946 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
951 spin_unlock_bh(&conn->cmd_lock); 947 spin_unlock_bh(&conn->cmd_lock);
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index a68508c4fec8..1ab754a671ff 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -16,7 +16,6 @@ extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
16extern int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 16extern int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
17 unsigned char * ,__be32 cmdsn); 17 unsigned char * ,__be32 cmdsn);
18extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *); 18extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
19extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
20extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *, 19extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
21 itt_t, u32); 20 itt_t, u32);
22extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32); 21extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index d836de200a03..44620fb6bd45 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -494,6 +494,11 @@ fd_execute_write_same(struct se_cmd *cmd)
494 target_complete_cmd(cmd, SAM_STAT_GOOD); 494 target_complete_cmd(cmd, SAM_STAT_GOOD);
495 return 0; 495 return 0;
496 } 496 }
497 if (cmd->prot_op) {
498 pr_err("WRITE_SAME: Protection information with FILEIO"
499 " backends not supported\n");
500 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
501 }
497 sg = &cmd->t_data_sg[0]; 502 sg = &cmd->t_data_sg[0];
498 503
499 if (cmd->t_data_nents > 1 || 504 if (cmd->t_data_nents > 1 ||
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 78346b850968..d4a4b0fb444a 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -464,6 +464,11 @@ iblock_execute_write_same(struct se_cmd *cmd)
464 sector_t block_lba = cmd->t_task_lba; 464 sector_t block_lba = cmd->t_task_lba;
465 sector_t sectors = sbc_get_write_same_sectors(cmd); 465 sector_t sectors = sbc_get_write_same_sectors(cmd);
466 466
467 if (cmd->prot_op) {
468 pr_err("WRITE_SAME: Protection information with IBLOCK"
469 " backends not supported\n");
470 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
471 }
467 sg = &cmd->t_data_sg[0]; 472 sg = &cmd->t_data_sg[0];
468 473
469 if (cmd->t_data_nents > 1 || 474 if (cmd->t_data_nents > 1 ||
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 283cf786ef98..2de6fb8cee8d 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1874,8 +1874,8 @@ static int core_scsi3_update_aptpl_buf(
1874 } 1874 }
1875 1875
1876 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { 1876 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
1877 pr_err("Unable to update renaming" 1877 pr_err("Unable to update renaming APTPL metadata,"
1878 " APTPL metadata\n"); 1878 " reallocating larger buffer\n");
1879 ret = -EMSGSIZE; 1879 ret = -EMSGSIZE;
1880 goto out; 1880 goto out;
1881 } 1881 }
@@ -1892,8 +1892,8 @@ static int core_scsi3_update_aptpl_buf(
1892 lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); 1892 lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
1893 1893
1894 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { 1894 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
1895 pr_err("Unable to update renaming" 1895 pr_err("Unable to update renaming APTPL metadata,"
1896 " APTPL metadata\n"); 1896 " reallocating larger buffer\n");
1897 ret = -EMSGSIZE; 1897 ret = -EMSGSIZE;
1898 goto out; 1898 goto out;
1899 } 1899 }
@@ -1956,7 +1956,7 @@ static int __core_scsi3_write_aptpl_to_file(
1956static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl) 1956static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl)
1957{ 1957{
1958 unsigned char *buf; 1958 unsigned char *buf;
1959 int rc; 1959 int rc, len = PR_APTPL_BUF_LEN;
1960 1960
1961 if (!aptpl) { 1961 if (!aptpl) {
1962 char *null_buf = "No Registrations or Reservations\n"; 1962 char *null_buf = "No Registrations or Reservations\n";
@@ -1970,25 +1970,26 @@ static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, b
1970 1970
1971 return 0; 1971 return 0;
1972 } 1972 }
1973 1973retry:
1974 buf = kzalloc(PR_APTPL_BUF_LEN, GFP_KERNEL); 1974 buf = vzalloc(len);
1975 if (!buf) 1975 if (!buf)
1976 return TCM_OUT_OF_RESOURCES; 1976 return TCM_OUT_OF_RESOURCES;
1977 1977
1978 rc = core_scsi3_update_aptpl_buf(dev, buf, PR_APTPL_BUF_LEN); 1978 rc = core_scsi3_update_aptpl_buf(dev, buf, len);
1979 if (rc < 0) { 1979 if (rc < 0) {
1980 kfree(buf); 1980 vfree(buf);
1981 return TCM_OUT_OF_RESOURCES; 1981 len *= 2;
1982 goto retry;
1982 } 1983 }
1983 1984
1984 rc = __core_scsi3_write_aptpl_to_file(dev, buf); 1985 rc = __core_scsi3_write_aptpl_to_file(dev, buf);
1985 if (rc != 0) { 1986 if (rc != 0) {
1986 pr_err("SPC-3 PR: Could not update APTPL\n"); 1987 pr_err("SPC-3 PR: Could not update APTPL\n");
1987 kfree(buf); 1988 vfree(buf);
1988 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1989 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1989 } 1990 }
1990 dev->t10_pr.pr_aptpl_active = 1; 1991 dev->t10_pr.pr_aptpl_active = 1;
1991 kfree(buf); 1992 vfree(buf);
1992 pr_debug("SPC-3 PR: Set APTPL Bit Activated\n"); 1993 pr_debug("SPC-3 PR: Set APTPL Bit Activated\n");
1993 return 0; 1994 return 0;
1994} 1995}
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index cd4bed7b2757..9a2f9d3a6e70 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -37,6 +37,9 @@
37#include "target_core_alua.h" 37#include "target_core_alua.h"
38 38
39static sense_reason_t 39static sense_reason_t
40sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
41
42static sense_reason_t
40sbc_emulate_readcapacity(struct se_cmd *cmd) 43sbc_emulate_readcapacity(struct se_cmd *cmd)
41{ 44{
42 struct se_device *dev = cmd->se_dev; 45 struct se_device *dev = cmd->se_dev;
@@ -251,7 +254,10 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
251static sense_reason_t 254static sense_reason_t
252sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) 255sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
253{ 256{
257 struct se_device *dev = cmd->se_dev;
258 sector_t end_lba = dev->transport->get_blocks(dev) + 1;
254 unsigned int sectors = sbc_get_write_same_sectors(cmd); 259 unsigned int sectors = sbc_get_write_same_sectors(cmd);
260 sense_reason_t ret;
255 261
256 if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 262 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
257 pr_err("WRITE_SAME PBDATA and LBDATA" 263 pr_err("WRITE_SAME PBDATA and LBDATA"
@@ -264,6 +270,16 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
264 sectors, cmd->se_dev->dev_attrib.max_write_same_len); 270 sectors, cmd->se_dev->dev_attrib.max_write_same_len);
265 return TCM_INVALID_CDB_FIELD; 271 return TCM_INVALID_CDB_FIELD;
266 } 272 }
273 /*
274 * Sanity check for LBA wrap and request past end of device.
275 */
276 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
277 ((cmd->t_task_lba + sectors) > end_lba)) {
278 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
279 (unsigned long long)end_lba, cmd->t_task_lba, sectors);
280 return TCM_ADDRESS_OUT_OF_RANGE;
281 }
282
267 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ 283 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
268 if (flags[0] & 0x10) { 284 if (flags[0] & 0x10) {
269 pr_warn("WRITE SAME with ANCHOR not supported\n"); 285 pr_warn("WRITE SAME with ANCHOR not supported\n");
@@ -277,12 +293,21 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
277 if (!ops->execute_write_same_unmap) 293 if (!ops->execute_write_same_unmap)
278 return TCM_UNSUPPORTED_SCSI_OPCODE; 294 return TCM_UNSUPPORTED_SCSI_OPCODE;
279 295
296 if (!dev->dev_attrib.emulate_tpws) {
297 pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device"
298 " has emulate_tpws disabled\n");
299 return TCM_UNSUPPORTED_SCSI_OPCODE;
300 }
280 cmd->execute_cmd = ops->execute_write_same_unmap; 301 cmd->execute_cmd = ops->execute_write_same_unmap;
281 return 0; 302 return 0;
282 } 303 }
283 if (!ops->execute_write_same) 304 if (!ops->execute_write_same)
284 return TCM_UNSUPPORTED_SCSI_OPCODE; 305 return TCM_UNSUPPORTED_SCSI_OPCODE;
285 306
307 ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true);
308 if (ret)
309 return ret;
310
286 cmd->execute_cmd = ops->execute_write_same; 311 cmd->execute_cmd = ops->execute_write_same;
287 return 0; 312 return 0;
288} 313}
@@ -614,14 +639,21 @@ sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
614 return 0; 639 return 0;
615} 640}
616 641
617static bool 642static sense_reason_t
618sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, 643sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
619 u32 sectors, bool is_write) 644 u32 sectors, bool is_write)
620{ 645{
621 u8 protect = cdb[1] >> 5; 646 u8 protect = cdb[1] >> 5;
622 647
623 if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto) 648 if (!cmd->t_prot_sg || !cmd->t_prot_nents) {
624 return true; 649 if (protect && !dev->dev_attrib.pi_prot_type) {
650 pr_err("CDB contains protect bit, but device does not"
651 " advertise PROTECT=1 feature bit\n");
652 return TCM_INVALID_CDB_FIELD;
653 }
654 if (cmd->prot_pto)
655 return TCM_NO_SENSE;
656 }
625 657
626 switch (dev->dev_attrib.pi_prot_type) { 658 switch (dev->dev_attrib.pi_prot_type) {
627 case TARGET_DIF_TYPE3_PROT: 659 case TARGET_DIF_TYPE3_PROT:
@@ -629,7 +661,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
629 break; 661 break;
630 case TARGET_DIF_TYPE2_PROT: 662 case TARGET_DIF_TYPE2_PROT:
631 if (protect) 663 if (protect)
632 return false; 664 return TCM_INVALID_CDB_FIELD;
633 665
634 cmd->reftag_seed = cmd->t_task_lba; 666 cmd->reftag_seed = cmd->t_task_lba;
635 break; 667 break;
@@ -638,12 +670,12 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
638 break; 670 break;
639 case TARGET_DIF_TYPE0_PROT: 671 case TARGET_DIF_TYPE0_PROT:
640 default: 672 default:
641 return true; 673 return TCM_NO_SENSE;
642 } 674 }
643 675
644 if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type, 676 if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
645 is_write, cmd)) 677 is_write, cmd))
646 return false; 678 return TCM_INVALID_CDB_FIELD;
647 679
648 cmd->prot_type = dev->dev_attrib.pi_prot_type; 680 cmd->prot_type = dev->dev_attrib.pi_prot_type;
649 cmd->prot_length = dev->prot_length * sectors; 681 cmd->prot_length = dev->prot_length * sectors;
@@ -662,7 +694,30 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
662 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length, 694 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
663 cmd->prot_op, cmd->prot_checks); 695 cmd->prot_op, cmd->prot_checks);
664 696
665 return true; 697 return TCM_NO_SENSE;
698}
699
700static int
701sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
702{
703 if (cdb[1] & 0x10) {
704 if (!dev->dev_attrib.emulate_dpo) {
705 pr_err("Got CDB: 0x%02x with DPO bit set, but device"
706 " does not advertise support for DPO\n", cdb[0]);
707 return -EINVAL;
708 }
709 }
710 if (cdb[1] & 0x8) {
711 if (!dev->dev_attrib.emulate_fua_write ||
712 !dev->dev_attrib.emulate_write_cache) {
713 pr_err("Got CDB: 0x%02x with FUA bit set, but device"
714 " does not advertise support for FUA write\n",
715 cdb[0]);
716 return -EINVAL;
717 }
718 cmd->se_cmd_flags |= SCF_FUA;
719 }
720 return 0;
666} 721}
667 722
668sense_reason_t 723sense_reason_t
@@ -686,8 +741,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
686 sectors = transport_get_sectors_10(cdb); 741 sectors = transport_get_sectors_10(cdb);
687 cmd->t_task_lba = transport_lba_32(cdb); 742 cmd->t_task_lba = transport_lba_32(cdb);
688 743
689 if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) 744 if (sbc_check_dpofua(dev, cmd, cdb))
690 return TCM_UNSUPPORTED_SCSI_OPCODE; 745 return TCM_INVALID_CDB_FIELD;
746
747 ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
748 if (ret)
749 return ret;
691 750
692 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 751 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
693 cmd->execute_rw = ops->execute_rw; 752 cmd->execute_rw = ops->execute_rw;
@@ -697,8 +756,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
697 sectors = transport_get_sectors_12(cdb); 756 sectors = transport_get_sectors_12(cdb);
698 cmd->t_task_lba = transport_lba_32(cdb); 757 cmd->t_task_lba = transport_lba_32(cdb);
699 758
700 if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) 759 if (sbc_check_dpofua(dev, cmd, cdb))
701 return TCM_UNSUPPORTED_SCSI_OPCODE; 760 return TCM_INVALID_CDB_FIELD;
761
762 ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
763 if (ret)
764 return ret;
702 765
703 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 766 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
704 cmd->execute_rw = ops->execute_rw; 767 cmd->execute_rw = ops->execute_rw;
@@ -708,8 +771,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
708 sectors = transport_get_sectors_16(cdb); 771 sectors = transport_get_sectors_16(cdb);
709 cmd->t_task_lba = transport_lba_64(cdb); 772 cmd->t_task_lba = transport_lba_64(cdb);
710 773
711 if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) 774 if (sbc_check_dpofua(dev, cmd, cdb))
712 return TCM_UNSUPPORTED_SCSI_OPCODE; 775 return TCM_INVALID_CDB_FIELD;
776
777 ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
778 if (ret)
779 return ret;
713 780
714 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 781 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
715 cmd->execute_rw = ops->execute_rw; 782 cmd->execute_rw = ops->execute_rw;
@@ -727,11 +794,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
727 sectors = transport_get_sectors_10(cdb); 794 sectors = transport_get_sectors_10(cdb);
728 cmd->t_task_lba = transport_lba_32(cdb); 795 cmd->t_task_lba = transport_lba_32(cdb);
729 796
730 if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) 797 if (sbc_check_dpofua(dev, cmd, cdb))
731 return TCM_UNSUPPORTED_SCSI_OPCODE; 798 return TCM_INVALID_CDB_FIELD;
799
800 ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
801 if (ret)
802 return ret;
732 803
733 if (cdb[1] & 0x8)
734 cmd->se_cmd_flags |= SCF_FUA;
735 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 804 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
736 cmd->execute_rw = ops->execute_rw; 805 cmd->execute_rw = ops->execute_rw;
737 cmd->execute_cmd = sbc_execute_rw; 806 cmd->execute_cmd = sbc_execute_rw;
@@ -740,11 +809,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
740 sectors = transport_get_sectors_12(cdb); 809 sectors = transport_get_sectors_12(cdb);
741 cmd->t_task_lba = transport_lba_32(cdb); 810 cmd->t_task_lba = transport_lba_32(cdb);
742 811
743 if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) 812 if (sbc_check_dpofua(dev, cmd, cdb))
744 return TCM_UNSUPPORTED_SCSI_OPCODE; 813 return TCM_INVALID_CDB_FIELD;
814
815 ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
816 if (ret)
817 return ret;
745 818
746 if (cdb[1] & 0x8)
747 cmd->se_cmd_flags |= SCF_FUA;
748 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 819 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
749 cmd->execute_rw = ops->execute_rw; 820 cmd->execute_rw = ops->execute_rw;
750 cmd->execute_cmd = sbc_execute_rw; 821 cmd->execute_cmd = sbc_execute_rw;
@@ -753,11 +824,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
753 sectors = transport_get_sectors_16(cdb); 824 sectors = transport_get_sectors_16(cdb);
754 cmd->t_task_lba = transport_lba_64(cdb); 825 cmd->t_task_lba = transport_lba_64(cdb);
755 826
756 if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) 827 if (sbc_check_dpofua(dev, cmd, cdb))
757 return TCM_UNSUPPORTED_SCSI_OPCODE; 828 return TCM_INVALID_CDB_FIELD;
829
830 ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
831 if (ret)
832 return ret;
758 833
759 if (cdb[1] & 0x8)
760 cmd->se_cmd_flags |= SCF_FUA;
761 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 834 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
762 cmd->execute_rw = ops->execute_rw; 835 cmd->execute_rw = ops->execute_rw;
763 cmd->execute_cmd = sbc_execute_rw; 836 cmd->execute_cmd = sbc_execute_rw;
@@ -768,6 +841,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
768 return TCM_INVALID_CDB_FIELD; 841 return TCM_INVALID_CDB_FIELD;
769 sectors = transport_get_sectors_10(cdb); 842 sectors = transport_get_sectors_10(cdb);
770 843
844 if (sbc_check_dpofua(dev, cmd, cdb))
845 return TCM_INVALID_CDB_FIELD;
846
771 cmd->t_task_lba = transport_lba_32(cdb); 847 cmd->t_task_lba = transport_lba_32(cdb);
772 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 848 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
773 849
@@ -777,8 +853,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
777 cmd->execute_rw = ops->execute_rw; 853 cmd->execute_rw = ops->execute_rw;
778 cmd->execute_cmd = sbc_execute_rw; 854 cmd->execute_cmd = sbc_execute_rw;
779 cmd->transport_complete_callback = &xdreadwrite_callback; 855 cmd->transport_complete_callback = &xdreadwrite_callback;
780 if (cdb[1] & 0x8)
781 cmd->se_cmd_flags |= SCF_FUA;
782 break; 856 break;
783 case VARIABLE_LENGTH_CMD: 857 case VARIABLE_LENGTH_CMD:
784 { 858 {
@@ -787,6 +861,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
787 case XDWRITEREAD_32: 861 case XDWRITEREAD_32:
788 sectors = transport_get_sectors_32(cdb); 862 sectors = transport_get_sectors_32(cdb);
789 863
864 if (sbc_check_dpofua(dev, cmd, cdb))
865 return TCM_INVALID_CDB_FIELD;
790 /* 866 /*
791 * Use WRITE_32 and READ_32 opcodes for the emulated 867 * Use WRITE_32 and READ_32 opcodes for the emulated
792 * XDWRITE_READ_32 logic. 868 * XDWRITE_READ_32 logic.
@@ -801,8 +877,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
801 cmd->execute_rw = ops->execute_rw; 877 cmd->execute_rw = ops->execute_rw;
802 cmd->execute_cmd = sbc_execute_rw; 878 cmd->execute_cmd = sbc_execute_rw;
803 cmd->transport_complete_callback = &xdreadwrite_callback; 879 cmd->transport_complete_callback = &xdreadwrite_callback;
804 if (cdb[1] & 0x8)
805 cmd->se_cmd_flags |= SCF_FUA;
806 break; 880 break;
807 case WRITE_SAME_32: 881 case WRITE_SAME_32:
808 sectors = transport_get_sectors_32(cdb); 882 sectors = transport_get_sectors_32(cdb);
@@ -888,6 +962,11 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
888 if (!ops->execute_unmap) 962 if (!ops->execute_unmap)
889 return TCM_UNSUPPORTED_SCSI_OPCODE; 963 return TCM_UNSUPPORTED_SCSI_OPCODE;
890 964
965 if (!dev->dev_attrib.emulate_tpu) {
966 pr_err("Got UNMAP, but backend device has"
967 " emulate_tpu disabled\n");
968 return TCM_UNSUPPORTED_SCSI_OPCODE;
969 }
891 size = get_unaligned_be16(&cdb[7]); 970 size = get_unaligned_be16(&cdb[7]);
892 cmd->execute_cmd = ops->execute_unmap; 971 cmd->execute_cmd = ops->execute_unmap;
893 break; 972 break;
@@ -955,7 +1034,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
955 unsigned long long end_lba; 1034 unsigned long long end_lba;
956check_lba: 1035check_lba:
957 end_lba = dev->transport->get_blocks(dev) + 1; 1036 end_lba = dev->transport->get_blocks(dev) + 1;
958 if (cmd->t_task_lba + sectors > end_lba) { 1037 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
1038 ((cmd->t_task_lba + sectors) > end_lba)) {
959 pr_err("cmd exceeds last lba %llu " 1039 pr_err("cmd exceeds last lba %llu "
960 "(lba %llu, sectors %u)\n", 1040 "(lba %llu, sectors %u)\n",
961 end_lba, cmd->t_task_lba, sectors); 1041 end_lba, cmd->t_task_lba, sectors);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 4c71657da56a..460e93109473 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -647,7 +647,7 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
647 * support the use of the WRITE SAME (16) command to unmap LBAs. 647 * support the use of the WRITE SAME (16) command to unmap LBAs.
648 */ 648 */
649 if (dev->dev_attrib.emulate_tpws != 0) 649 if (dev->dev_attrib.emulate_tpws != 0)
650 buf[5] |= 0x40; 650 buf[5] |= 0x40 | 0x20;
651 651
652 return 0; 652 return 0;
653} 653}
diff --git a/drivers/thermal/int340x_thermal/Makefile b/drivers/thermal/int340x_thermal/Makefile
index d4413698a85f..ba77a34f659f 100644
--- a/drivers/thermal/int340x_thermal/Makefile
+++ b/drivers/thermal/int340x_thermal/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_INT340X_THERMAL) += int3400_thermal.o 1obj-$(CONFIG_INT340X_THERMAL) += int3400_thermal.o
2obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal_zone.o
2obj-$(CONFIG_INT340X_THERMAL) += int3402_thermal.o 3obj-$(CONFIG_INT340X_THERMAL) += int3402_thermal.o
3obj-$(CONFIG_INT340X_THERMAL) += int3403_thermal.o 4obj-$(CONFIG_INT340X_THERMAL) += int3403_thermal.o
4obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_device.o 5obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_device.o
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
index 65a98a97df07..031018e7a65b 100644
--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
@@ -18,19 +18,15 @@
18 18
19enum int3400_thermal_uuid { 19enum int3400_thermal_uuid {
20 INT3400_THERMAL_PASSIVE_1, 20 INT3400_THERMAL_PASSIVE_1,
21 INT3400_THERMAL_PASSIVE_2,
22 INT3400_THERMAL_ACTIVE, 21 INT3400_THERMAL_ACTIVE,
23 INT3400_THERMAL_CRITICAL, 22 INT3400_THERMAL_CRITICAL,
24 INT3400_THERMAL_COOLING_MODE,
25 INT3400_THERMAL_MAXIMUM_UUID, 23 INT3400_THERMAL_MAXIMUM_UUID,
26}; 24};
27 25
28static u8 *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = { 26static u8 *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
29 "42A441D6-AE6A-462b-A84B-4A8CE79027D3", 27 "42A441D6-AE6A-462b-A84B-4A8CE79027D3",
30 "9E04115A-AE87-4D1C-9500-0F3E340BFE75",
31 "3A95C389-E4B8-4629-A526-C52C88626BAE", 28 "3A95C389-E4B8-4629-A526-C52C88626BAE",
32 "97C68AE7-15FA-499c-B8C9-5DA81D606E0A", 29 "97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
33 "16CAF1B7-DD38-40ed-B1C1-1B8A1913D531",
34}; 30};
35 31
36struct int3400_thermal_priv { 32struct int3400_thermal_priv {
@@ -266,13 +262,12 @@ static int int3400_thermal_probe(struct platform_device *pdev)
266 result = acpi_parse_art(priv->adev->handle, &priv->art_count, 262 result = acpi_parse_art(priv->adev->handle, &priv->art_count,
267 &priv->arts, true); 263 &priv->arts, true);
268 if (result) 264 if (result)
269 goto free_priv; 265 dev_dbg(&pdev->dev, "_ART table parsing error\n");
270
271 266
272 result = acpi_parse_trt(priv->adev->handle, &priv->trt_count, 267 result = acpi_parse_trt(priv->adev->handle, &priv->trt_count,
273 &priv->trts, true); 268 &priv->trts, true);
274 if (result) 269 if (result)
275 goto free_art; 270 dev_dbg(&pdev->dev, "_TRT table parsing error\n");
276 271
277 platform_set_drvdata(pdev, priv); 272 platform_set_drvdata(pdev, priv);
278 273
@@ -285,7 +280,7 @@ static int int3400_thermal_probe(struct platform_device *pdev)
285 &int3400_thermal_params, 0, 0); 280 &int3400_thermal_params, 0, 0);
286 if (IS_ERR(priv->thermal)) { 281 if (IS_ERR(priv->thermal)) {
287 result = PTR_ERR(priv->thermal); 282 result = PTR_ERR(priv->thermal);
288 goto free_trt; 283 goto free_art_trt;
289 } 284 }
290 285
291 priv->rel_misc_dev_res = acpi_thermal_rel_misc_device_add( 286 priv->rel_misc_dev_res = acpi_thermal_rel_misc_device_add(
@@ -299,9 +294,8 @@ static int int3400_thermal_probe(struct platform_device *pdev)
299 294
300free_zone: 295free_zone:
301 thermal_zone_device_unregister(priv->thermal); 296 thermal_zone_device_unregister(priv->thermal);
302free_trt: 297free_art_trt:
303 kfree(priv->trts); 298 kfree(priv->trts);
304free_art:
305 kfree(priv->arts); 299 kfree(priv->arts);
306free_priv: 300free_priv:
307 kfree(priv); 301 kfree(priv);
diff --git a/drivers/thermal/int340x_thermal/int3402_thermal.c b/drivers/thermal/int340x_thermal/int3402_thermal.c
index c5cbc3af3a05..69df3d960303 100644
--- a/drivers/thermal/int340x_thermal/int3402_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3402_thermal.c
@@ -14,152 +14,39 @@
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/acpi.h> 15#include <linux/acpi.h>
16#include <linux/thermal.h> 16#include <linux/thermal.h>
17#include "int340x_thermal_zone.h"
17 18
18#define ACPI_ACTIVE_COOLING_MAX_NR 10 19#define INT3402_PERF_CHANGED_EVENT 0x80
19 20#define INT3402_THERMAL_EVENT 0x90
20struct active_trip {
21 unsigned long temp;
22 int id;
23 bool valid;
24};
25 21
26struct int3402_thermal_data { 22struct int3402_thermal_data {
27 unsigned long *aux_trips;
28 int aux_trip_nr;
29 unsigned long psv_temp;
30 int psv_trip_id;
31 unsigned long crt_temp;
32 int crt_trip_id;
33 unsigned long hot_temp;
34 int hot_trip_id;
35 struct active_trip act_trips[ACPI_ACTIVE_COOLING_MAX_NR];
36 acpi_handle *handle; 23 acpi_handle *handle;
24 struct int34x_thermal_zone *int340x_zone;
37}; 25};
38 26
39static int int3402_thermal_get_zone_temp(struct thermal_zone_device *zone, 27static void int3402_notify(acpi_handle handle, u32 event, void *data)
40 unsigned long *temp)
41{
42 struct int3402_thermal_data *d = zone->devdata;
43 unsigned long long tmp;
44 acpi_status status;
45
46 status = acpi_evaluate_integer(d->handle, "_TMP", NULL, &tmp);
47 if (ACPI_FAILURE(status))
48 return -ENODEV;
49
50 /* _TMP returns the temperature in tenths of degrees Kelvin */
51 *temp = DECI_KELVIN_TO_MILLICELSIUS(tmp);
52
53 return 0;
54}
55
56static int int3402_thermal_get_trip_temp(struct thermal_zone_device *zone,
57 int trip, unsigned long *temp)
58{ 28{
59 struct int3402_thermal_data *d = zone->devdata; 29 struct int3402_thermal_data *priv = data;
60 int i; 30
61 31 if (!priv)
62 if (trip < d->aux_trip_nr) 32 return;
63 *temp = d->aux_trips[trip]; 33
64 else if (trip == d->crt_trip_id) 34 switch (event) {
65 *temp = d->crt_temp; 35 case INT3402_PERF_CHANGED_EVENT:
66 else if (trip == d->psv_trip_id) 36 break;
67 *temp = d->psv_temp; 37 case INT3402_THERMAL_EVENT:
68 else if (trip == d->hot_trip_id) 38 int340x_thermal_zone_device_update(priv->int340x_zone);
69 *temp = d->hot_temp; 39 break;
70 else { 40 default:
71 for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) { 41 break;
72 if (d->act_trips[i].valid &&
73 d->act_trips[i].id == trip) {
74 *temp = d->act_trips[i].temp;
75 break;
76 }
77 }
78 if (i == ACPI_ACTIVE_COOLING_MAX_NR)
79 return -EINVAL;
80 } 42 }
81 return 0;
82}
83
84static int int3402_thermal_get_trip_type(struct thermal_zone_device *zone,
85 int trip, enum thermal_trip_type *type)
86{
87 struct int3402_thermal_data *d = zone->devdata;
88 int i;
89
90 if (trip < d->aux_trip_nr)
91 *type = THERMAL_TRIP_PASSIVE;
92 else if (trip == d->crt_trip_id)
93 *type = THERMAL_TRIP_CRITICAL;
94 else if (trip == d->hot_trip_id)
95 *type = THERMAL_TRIP_HOT;
96 else if (trip == d->psv_trip_id)
97 *type = THERMAL_TRIP_PASSIVE;
98 else {
99 for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) {
100 if (d->act_trips[i].valid &&
101 d->act_trips[i].id == trip) {
102 *type = THERMAL_TRIP_ACTIVE;
103 break;
104 }
105 }
106 if (i == ACPI_ACTIVE_COOLING_MAX_NR)
107 return -EINVAL;
108 }
109 return 0;
110}
111
112static int int3402_thermal_set_trip_temp(struct thermal_zone_device *zone, int trip,
113 unsigned long temp)
114{
115 struct int3402_thermal_data *d = zone->devdata;
116 acpi_status status;
117 char name[10];
118
119 snprintf(name, sizeof(name), "PAT%d", trip);
120 status = acpi_execute_simple_method(d->handle, name,
121 MILLICELSIUS_TO_DECI_KELVIN(temp));
122 if (ACPI_FAILURE(status))
123 return -EIO;
124
125 d->aux_trips[trip] = temp;
126 return 0;
127}
128
129static struct thermal_zone_device_ops int3402_thermal_zone_ops = {
130 .get_temp = int3402_thermal_get_zone_temp,
131 .get_trip_temp = int3402_thermal_get_trip_temp,
132 .get_trip_type = int3402_thermal_get_trip_type,
133 .set_trip_temp = int3402_thermal_set_trip_temp,
134};
135
136static struct thermal_zone_params int3402_thermal_params = {
137 .governor_name = "user_space",
138 .no_hwmon = true,
139};
140
141static int int3402_thermal_get_temp(acpi_handle handle, char *name,
142 unsigned long *temp)
143{
144 unsigned long long r;
145 acpi_status status;
146
147 status = acpi_evaluate_integer(handle, name, NULL, &r);
148 if (ACPI_FAILURE(status))
149 return -EIO;
150
151 *temp = DECI_KELVIN_TO_MILLICELSIUS(r);
152 return 0;
153} 43}
154 44
155static int int3402_thermal_probe(struct platform_device *pdev) 45static int int3402_thermal_probe(struct platform_device *pdev)
156{ 46{
157 struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); 47 struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
158 struct int3402_thermal_data *d; 48 struct int3402_thermal_data *d;
159 struct thermal_zone_device *zone; 49 int ret;
160 acpi_status status;
161 unsigned long long trip_cnt;
162 int trip_mask = 0, i;
163 50
164 if (!acpi_has_method(adev->handle, "_TMP")) 51 if (!acpi_has_method(adev->handle, "_TMP"))
165 return -ENODEV; 52 return -ENODEV;
@@ -168,54 +55,33 @@ static int int3402_thermal_probe(struct platform_device *pdev)
168 if (!d) 55 if (!d)
169 return -ENOMEM; 56 return -ENOMEM;
170 57
171 status = acpi_evaluate_integer(adev->handle, "PATC", NULL, &trip_cnt); 58 d->int340x_zone = int340x_thermal_zone_add(adev, NULL);
172 if (ACPI_FAILURE(status)) 59 if (IS_ERR(d->int340x_zone))
173 trip_cnt = 0; 60 return PTR_ERR(d->int340x_zone);
174 else { 61
175 d->aux_trips = devm_kzalloc(&pdev->dev, 62 ret = acpi_install_notify_handler(adev->handle,
176 sizeof(*d->aux_trips) * trip_cnt, GFP_KERNEL); 63 ACPI_DEVICE_NOTIFY,
177 if (!d->aux_trips) 64 int3402_notify,
178 return -ENOMEM; 65 d);
179 trip_mask = trip_cnt - 1; 66 if (ret) {
180 d->handle = adev->handle; 67 int340x_thermal_zone_remove(d->int340x_zone);
181 d->aux_trip_nr = trip_cnt; 68 return ret;
182 }
183
184 d->crt_trip_id = -1;
185 if (!int3402_thermal_get_temp(adev->handle, "_CRT", &d->crt_temp))
186 d->crt_trip_id = trip_cnt++;
187 d->hot_trip_id = -1;
188 if (!int3402_thermal_get_temp(adev->handle, "_HOT", &d->hot_temp))
189 d->hot_trip_id = trip_cnt++;
190 d->psv_trip_id = -1;
191 if (!int3402_thermal_get_temp(adev->handle, "_PSV", &d->psv_temp))
192 d->psv_trip_id = trip_cnt++;
193 for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) {
194 char name[5] = { '_', 'A', 'C', '0' + i, '\0' };
195 if (int3402_thermal_get_temp(adev->handle, name,
196 &d->act_trips[i].temp))
197 break;
198 d->act_trips[i].id = trip_cnt++;
199 d->act_trips[i].valid = true;
200 } 69 }
201 70
202 zone = thermal_zone_device_register(acpi_device_bid(adev), trip_cnt, 71 d->handle = adev->handle;
203 trip_mask, d, 72 platform_set_drvdata(pdev, d);
204 &int3402_thermal_zone_ops,
205 &int3402_thermal_params,
206 0, 0);
207 if (IS_ERR(zone))
208 return PTR_ERR(zone);
209 platform_set_drvdata(pdev, zone);
210 73
211 return 0; 74 return 0;
212} 75}
213 76
214static int int3402_thermal_remove(struct platform_device *pdev) 77static int int3402_thermal_remove(struct platform_device *pdev)
215{ 78{
216 struct thermal_zone_device *zone = platform_get_drvdata(pdev); 79 struct int3402_thermal_data *d = platform_get_drvdata(pdev);
80
81 acpi_remove_notify_handler(d->handle,
82 ACPI_DEVICE_NOTIFY, int3402_notify);
83 int340x_thermal_zone_remove(d->int340x_zone);
217 84
218 thermal_zone_device_unregister(zone);
219 return 0; 85 return 0;
220} 86}
221 87
diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c
index 0faf500d8a77..50a7a08e3a15 100644
--- a/drivers/thermal/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3403_thermal.c
@@ -19,6 +19,7 @@
19#include <linux/acpi.h> 19#include <linux/acpi.h>
20#include <linux/thermal.h> 20#include <linux/thermal.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include "int340x_thermal_zone.h"
22 23
23#define INT3403_TYPE_SENSOR 0x03 24#define INT3403_TYPE_SENSOR 0x03
24#define INT3403_TYPE_CHARGER 0x0B 25#define INT3403_TYPE_CHARGER 0x0B
@@ -26,18 +27,9 @@
26#define INT3403_PERF_CHANGED_EVENT 0x80 27#define INT3403_PERF_CHANGED_EVENT 0x80
27#define INT3403_THERMAL_EVENT 0x90 28#define INT3403_THERMAL_EVENT 0x90
28 29
29#define DECI_KELVIN_TO_MILLI_CELSIUS(t, off) (((t) - (off)) * 100) 30/* Preserved structure for future expandbility */
30#define KELVIN_OFFSET 2732
31#define MILLI_CELSIUS_TO_DECI_KELVIN(t, off) (((t) / 100) + (off))
32
33struct int3403_sensor { 31struct int3403_sensor {
34 struct thermal_zone_device *tzone; 32 struct int34x_thermal_zone *int340x_zone;
35 unsigned long *thresholds;
36 unsigned long crit_temp;
37 int crit_trip_id;
38 unsigned long psv_temp;
39 int psv_trip_id;
40
41}; 33};
42 34
43struct int3403_performance_state { 35struct int3403_performance_state {
@@ -63,126 +55,6 @@ struct int3403_priv {
63 void *priv; 55 void *priv;
64}; 56};
65 57
66static int sys_get_curr_temp(struct thermal_zone_device *tzone,
67 unsigned long *temp)
68{
69 struct int3403_priv *priv = tzone->devdata;
70 struct acpi_device *device = priv->adev;
71 unsigned long long tmp;
72 acpi_status status;
73
74 status = acpi_evaluate_integer(device->handle, "_TMP", NULL, &tmp);
75 if (ACPI_FAILURE(status))
76 return -EIO;
77
78 *temp = DECI_KELVIN_TO_MILLI_CELSIUS(tmp, KELVIN_OFFSET);
79
80 return 0;
81}
82
83static int sys_get_trip_hyst(struct thermal_zone_device *tzone,
84 int trip, unsigned long *temp)
85{
86 struct int3403_priv *priv = tzone->devdata;
87 struct acpi_device *device = priv->adev;
88 unsigned long long hyst;
89 acpi_status status;
90
91 status = acpi_evaluate_integer(device->handle, "GTSH", NULL, &hyst);
92 if (ACPI_FAILURE(status))
93 return -EIO;
94
95 /*
96 * Thermal hysteresis represents a temperature difference.
97 * Kelvin and Celsius have same degree size. So the
98 * conversion here between tenths of degree Kelvin unit
99 * and Milli-Celsius unit is just to multiply 100.
100 */
101 *temp = hyst * 100;
102
103 return 0;
104}
105
106static int sys_get_trip_temp(struct thermal_zone_device *tzone,
107 int trip, unsigned long *temp)
108{
109 struct int3403_priv *priv = tzone->devdata;
110 struct int3403_sensor *obj = priv->priv;
111
112 if (priv->type != INT3403_TYPE_SENSOR || !obj)
113 return -EINVAL;
114
115 if (trip == obj->crit_trip_id)
116 *temp = obj->crit_temp;
117 else if (trip == obj->psv_trip_id)
118 *temp = obj->psv_temp;
119 else {
120 /*
121 * get_trip_temp is a mandatory callback but
122 * PATx method doesn't return any value, so return
123 * cached value, which was last set from user space
124 */
125 *temp = obj->thresholds[trip];
126 }
127
128 return 0;
129}
130
131static int sys_get_trip_type(struct thermal_zone_device *thermal,
132 int trip, enum thermal_trip_type *type)
133{
134 struct int3403_priv *priv = thermal->devdata;
135 struct int3403_sensor *obj = priv->priv;
136
137 /* Mandatory callback, may not mean much here */
138 if (trip == obj->crit_trip_id)
139 *type = THERMAL_TRIP_CRITICAL;
140 else
141 *type = THERMAL_TRIP_PASSIVE;
142
143 return 0;
144}
145
146int sys_set_trip_temp(struct thermal_zone_device *tzone, int trip,
147 unsigned long temp)
148{
149 struct int3403_priv *priv = tzone->devdata;
150 struct acpi_device *device = priv->adev;
151 struct int3403_sensor *obj = priv->priv;
152 acpi_status status;
153 char name[10];
154 int ret = 0;
155
156 snprintf(name, sizeof(name), "PAT%d", trip);
157 if (acpi_has_method(device->handle, name)) {
158 status = acpi_execute_simple_method(device->handle, name,
159 MILLI_CELSIUS_TO_DECI_KELVIN(temp,
160 KELVIN_OFFSET));
161 if (ACPI_FAILURE(status))
162 ret = -EIO;
163 else
164 obj->thresholds[trip] = temp;
165 } else {
166 ret = -EIO;
167 dev_err(&device->dev, "sys_set_trip_temp: method not found\n");
168 }
169
170 return ret;
171}
172
173static struct thermal_zone_device_ops tzone_ops = {
174 .get_temp = sys_get_curr_temp,
175 .get_trip_temp = sys_get_trip_temp,
176 .get_trip_type = sys_get_trip_type,
177 .set_trip_temp = sys_set_trip_temp,
178 .get_trip_hyst = sys_get_trip_hyst,
179};
180
181static struct thermal_zone_params int3403_thermal_params = {
182 .governor_name = "user_space",
183 .no_hwmon = true,
184};
185
186static void int3403_notify(acpi_handle handle, 58static void int3403_notify(acpi_handle handle,
187 u32 event, void *data) 59 u32 event, void *data)
188{ 60{
@@ -200,7 +72,7 @@ static void int3403_notify(acpi_handle handle,
200 case INT3403_PERF_CHANGED_EVENT: 72 case INT3403_PERF_CHANGED_EVENT:
201 break; 73 break;
202 case INT3403_THERMAL_EVENT: 74 case INT3403_THERMAL_EVENT:
203 thermal_zone_device_update(obj->tzone); 75 int340x_thermal_zone_device_update(obj->int340x_zone);
204 break; 76 break;
205 default: 77 default:
206 dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event); 78 dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event);
@@ -208,41 +80,10 @@ static void int3403_notify(acpi_handle handle,
208 } 80 }
209} 81}
210 82
211static int sys_get_trip_crt(struct acpi_device *device, unsigned long *temp)
212{
213 unsigned long long crt;
214 acpi_status status;
215
216 status = acpi_evaluate_integer(device->handle, "_CRT", NULL, &crt);
217 if (ACPI_FAILURE(status))
218 return -EIO;
219
220 *temp = DECI_KELVIN_TO_MILLI_CELSIUS(crt, KELVIN_OFFSET);
221
222 return 0;
223}
224
225static int sys_get_trip_psv(struct acpi_device *device, unsigned long *temp)
226{
227 unsigned long long psv;
228 acpi_status status;
229
230 status = acpi_evaluate_integer(device->handle, "_PSV", NULL, &psv);
231 if (ACPI_FAILURE(status))
232 return -EIO;
233
234 *temp = DECI_KELVIN_TO_MILLI_CELSIUS(psv, KELVIN_OFFSET);
235
236 return 0;
237}
238
239static int int3403_sensor_add(struct int3403_priv *priv) 83static int int3403_sensor_add(struct int3403_priv *priv)
240{ 84{
241 int result = 0; 85 int result = 0;
242 acpi_status status;
243 struct int3403_sensor *obj; 86 struct int3403_sensor *obj;
244 unsigned long long trip_cnt;
245 int trip_mask = 0;
246 87
247 obj = devm_kzalloc(&priv->pdev->dev, sizeof(*obj), GFP_KERNEL); 88 obj = devm_kzalloc(&priv->pdev->dev, sizeof(*obj), GFP_KERNEL);
248 if (!obj) 89 if (!obj)
@@ -250,39 +91,9 @@ static int int3403_sensor_add(struct int3403_priv *priv)
250 91
251 priv->priv = obj; 92 priv->priv = obj;
252 93
253 status = acpi_evaluate_integer(priv->adev->handle, "PATC", NULL, 94 obj->int340x_zone = int340x_thermal_zone_add(priv->adev, NULL);
254 &trip_cnt); 95 if (IS_ERR(obj->int340x_zone))
255 if (ACPI_FAILURE(status)) 96 return PTR_ERR(obj->int340x_zone);
256 trip_cnt = 0;
257
258 if (trip_cnt) {
259 /* We have to cache, thresholds can't be readback */
260 obj->thresholds = devm_kzalloc(&priv->pdev->dev,
261 sizeof(*obj->thresholds) * trip_cnt,
262 GFP_KERNEL);
263 if (!obj->thresholds) {
264 result = -ENOMEM;
265 goto err_free_obj;
266 }
267 trip_mask = BIT(trip_cnt) - 1;
268 }
269
270 obj->psv_trip_id = -1;
271 if (!sys_get_trip_psv(priv->adev, &obj->psv_temp))
272 obj->psv_trip_id = trip_cnt++;
273
274 obj->crit_trip_id = -1;
275 if (!sys_get_trip_crt(priv->adev, &obj->crit_temp))
276 obj->crit_trip_id = trip_cnt++;
277
278 obj->tzone = thermal_zone_device_register(acpi_device_bid(priv->adev),
279 trip_cnt, trip_mask, priv, &tzone_ops,
280 &int3403_thermal_params, 0, 0);
281 if (IS_ERR(obj->tzone)) {
282 result = PTR_ERR(obj->tzone);
283 obj->tzone = NULL;
284 goto err_free_obj;
285 }
286 97
287 result = acpi_install_notify_handler(priv->adev->handle, 98 result = acpi_install_notify_handler(priv->adev->handle,
288 ACPI_DEVICE_NOTIFY, int3403_notify, 99 ACPI_DEVICE_NOTIFY, int3403_notify,
@@ -293,7 +104,7 @@ static int int3403_sensor_add(struct int3403_priv *priv)
293 return 0; 104 return 0;
294 105
295 err_free_obj: 106 err_free_obj:
296 thermal_zone_device_unregister(obj->tzone); 107 int340x_thermal_zone_remove(obj->int340x_zone);
297 return result; 108 return result;
298} 109}
299 110
@@ -303,7 +114,8 @@ static int int3403_sensor_remove(struct int3403_priv *priv)
303 114
304 acpi_remove_notify_handler(priv->adev->handle, 115 acpi_remove_notify_handler(priv->adev->handle,
305 ACPI_DEVICE_NOTIFY, int3403_notify); 116 ACPI_DEVICE_NOTIFY, int3403_notify);
306 thermal_zone_device_unregister(obj->tzone); 117 int340x_thermal_zone_remove(obj->int340x_zone);
118
307 return 0; 119 return 0;
308} 120}
309 121
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
new file mode 100644
index 000000000000..f88b08877025
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
@@ -0,0 +1,276 @@
1/*
2 * int340x_thermal_zone.c
3 * Copyright (c) 2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/acpi.h>
19#include <linux/thermal.h>
20#include "int340x_thermal_zone.h"
21
22static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
23 unsigned long *temp)
24{
25 struct int34x_thermal_zone *d = zone->devdata;
26 unsigned long long tmp;
27 acpi_status status;
28
29 if (d->override_ops && d->override_ops->get_temp)
30 return d->override_ops->get_temp(zone, temp);
31
32 status = acpi_evaluate_integer(d->adev->handle, "_TMP", NULL, &tmp);
33 if (ACPI_FAILURE(status))
34 return -EIO;
35
36 if (d->lpat_table) {
37 int conv_temp;
38
39 conv_temp = acpi_lpat_raw_to_temp(d->lpat_table, (int)tmp);
40 if (conv_temp < 0)
41 return conv_temp;
42
43 *temp = (unsigned long)conv_temp * 10;
44 } else
45 /* _TMP returns the temperature in tenths of degrees Kelvin */
46 *temp = DECI_KELVIN_TO_MILLICELSIUS(tmp);
47
48 return 0;
49}
50
51static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
52 int trip, unsigned long *temp)
53{
54 struct int34x_thermal_zone *d = zone->devdata;
55 int i;
56
57 if (d->override_ops && d->override_ops->get_trip_temp)
58 return d->override_ops->get_trip_temp(zone, trip, temp);
59
60 if (trip < d->aux_trip_nr)
61 *temp = d->aux_trips[trip];
62 else if (trip == d->crt_trip_id)
63 *temp = d->crt_temp;
64 else if (trip == d->psv_trip_id)
65 *temp = d->psv_temp;
66 else if (trip == d->hot_trip_id)
67 *temp = d->hot_temp;
68 else {
69 for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
70 if (d->act_trips[i].valid &&
71 d->act_trips[i].id == trip) {
72 *temp = d->act_trips[i].temp;
73 break;
74 }
75 }
76 if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
77 return -EINVAL;
78 }
79
80 return 0;
81}
82
83static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
84 int trip,
85 enum thermal_trip_type *type)
86{
87 struct int34x_thermal_zone *d = zone->devdata;
88 int i;
89
90 if (d->override_ops && d->override_ops->get_trip_type)
91 return d->override_ops->get_trip_type(zone, trip, type);
92
93 if (trip < d->aux_trip_nr)
94 *type = THERMAL_TRIP_PASSIVE;
95 else if (trip == d->crt_trip_id)
96 *type = THERMAL_TRIP_CRITICAL;
97 else if (trip == d->hot_trip_id)
98 *type = THERMAL_TRIP_HOT;
99 else if (trip == d->psv_trip_id)
100 *type = THERMAL_TRIP_PASSIVE;
101 else {
102 for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
103 if (d->act_trips[i].valid &&
104 d->act_trips[i].id == trip) {
105 *type = THERMAL_TRIP_ACTIVE;
106 break;
107 }
108 }
109 if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
110 return -EINVAL;
111 }
112
113 return 0;
114}
115
116static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
117 int trip, unsigned long temp)
118{
119 struct int34x_thermal_zone *d = zone->devdata;
120 acpi_status status;
121 char name[10];
122
123 if (d->override_ops && d->override_ops->set_trip_temp)
124 return d->override_ops->set_trip_temp(zone, trip, temp);
125
126 snprintf(name, sizeof(name), "PAT%d", trip);
127 status = acpi_execute_simple_method(d->adev->handle, name,
128 MILLICELSIUS_TO_DECI_KELVIN(temp));
129 if (ACPI_FAILURE(status))
130 return -EIO;
131
132 d->aux_trips[trip] = temp;
133
134 return 0;
135}
136
137
138static int int340x_thermal_get_trip_hyst(struct thermal_zone_device *zone,
139 int trip, unsigned long *temp)
140{
141 struct int34x_thermal_zone *d = zone->devdata;
142 acpi_status status;
143 unsigned long long hyst;
144
145 if (d->override_ops && d->override_ops->get_trip_hyst)
146 return d->override_ops->get_trip_hyst(zone, trip, temp);
147
148 status = acpi_evaluate_integer(d->adev->handle, "GTSH", NULL, &hyst);
149 if (ACPI_FAILURE(status))
150 return -EIO;
151
152 *temp = hyst * 100;
153
154 return 0;
155}
156
157static struct thermal_zone_device_ops int340x_thermal_zone_ops = {
158 .get_temp = int340x_thermal_get_zone_temp,
159 .get_trip_temp = int340x_thermal_get_trip_temp,
160 .get_trip_type = int340x_thermal_get_trip_type,
161 .set_trip_temp = int340x_thermal_set_trip_temp,
162 .get_trip_hyst = int340x_thermal_get_trip_hyst,
163};
164
165static int int340x_thermal_get_trip_config(acpi_handle handle, char *name,
166 unsigned long *temp)
167{
168 unsigned long long r;
169 acpi_status status;
170
171 status = acpi_evaluate_integer(handle, name, NULL, &r);
172 if (ACPI_FAILURE(status))
173 return -EIO;
174
175 *temp = DECI_KELVIN_TO_MILLICELSIUS(r);
176
177 return 0;
178}
179
180static struct thermal_zone_params int340x_thermal_params = {
181 .governor_name = "user_space",
182 .no_hwmon = true,
183};
184
185struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
186 struct thermal_zone_device_ops *override_ops)
187{
188 struct int34x_thermal_zone *int34x_thermal_zone;
189 acpi_status status;
190 unsigned long long trip_cnt;
191 int trip_mask = 0, i;
192 int ret;
193
194 int34x_thermal_zone = kzalloc(sizeof(*int34x_thermal_zone),
195 GFP_KERNEL);
196 if (!int34x_thermal_zone)
197 return ERR_PTR(-ENOMEM);
198
199 int34x_thermal_zone->adev = adev;
200 int34x_thermal_zone->override_ops = override_ops;
201
202 status = acpi_evaluate_integer(adev->handle, "PATC", NULL, &trip_cnt);
203 if (ACPI_FAILURE(status))
204 trip_cnt = 0;
205 else {
206 int34x_thermal_zone->aux_trips = kzalloc(
207 sizeof(*int34x_thermal_zone->aux_trips) *
208 trip_cnt, GFP_KERNEL);
209 if (!int34x_thermal_zone->aux_trips) {
210 ret = -ENOMEM;
211 goto free_mem;
212 }
213 trip_mask = BIT(trip_cnt) - 1;
214 int34x_thermal_zone->aux_trip_nr = trip_cnt;
215 }
216
217 int34x_thermal_zone->crt_trip_id = -1;
218 if (!int340x_thermal_get_trip_config(adev->handle, "_CRT",
219 &int34x_thermal_zone->crt_temp))
220 int34x_thermal_zone->crt_trip_id = trip_cnt++;
221 int34x_thermal_zone->hot_trip_id = -1;
222 if (!int340x_thermal_get_trip_config(adev->handle, "_HOT",
223 &int34x_thermal_zone->hot_temp))
224 int34x_thermal_zone->hot_trip_id = trip_cnt++;
225 int34x_thermal_zone->psv_trip_id = -1;
226 if (!int340x_thermal_get_trip_config(adev->handle, "_PSV",
227 &int34x_thermal_zone->psv_temp))
228 int34x_thermal_zone->psv_trip_id = trip_cnt++;
229 for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
230 char name[5] = { '_', 'A', 'C', '0' + i, '\0' };
231
232 if (int340x_thermal_get_trip_config(adev->handle, name,
233 &int34x_thermal_zone->act_trips[i].temp))
234 break;
235
236 int34x_thermal_zone->act_trips[i].id = trip_cnt++;
237 int34x_thermal_zone->act_trips[i].valid = true;
238 }
239 int34x_thermal_zone->lpat_table = acpi_lpat_get_conversion_table(
240 adev->handle);
241
242 int34x_thermal_zone->zone = thermal_zone_device_register(
243 acpi_device_bid(adev),
244 trip_cnt,
245 trip_mask, int34x_thermal_zone,
246 &int340x_thermal_zone_ops,
247 &int340x_thermal_params,
248 0, 0);
249 if (IS_ERR(int34x_thermal_zone->zone)) {
250 ret = PTR_ERR(int34x_thermal_zone->zone);
251 goto free_lpat;
252 }
253
254 return int34x_thermal_zone;
255
256free_lpat:
257 acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
258free_mem:
259 kfree(int34x_thermal_zone);
260 return ERR_PTR(ret);
261}
262EXPORT_SYMBOL_GPL(int340x_thermal_zone_add);
263
264void int340x_thermal_zone_remove(struct int34x_thermal_zone
265 *int34x_thermal_zone)
266{
267 thermal_zone_device_unregister(int34x_thermal_zone->zone);
268 acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
269 kfree(int34x_thermal_zone);
270}
271EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
272
273MODULE_AUTHOR("Aaron Lu <aaron.lu@intel.com>");
274MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
275MODULE_DESCRIPTION("Intel INT340x common thermal zone handler");
276MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h
new file mode 100644
index 000000000000..9f38ab72c4bf
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h
@@ -0,0 +1,68 @@
1/*
2 * int340x_thermal_zone.h
3 * Copyright (c) 2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16#ifndef __INT340X_THERMAL_ZONE_H__
17#define __INT340X_THERMAL_ZONE_H__
18
19#include <acpi/acpi_lpat.h>
20
21#define INT340X_THERMAL_MAX_ACT_TRIP_COUNT 10
22
23struct active_trip {
24 unsigned long temp;
25 int id;
26 bool valid;
27};
28
29struct int34x_thermal_zone {
30 struct acpi_device *adev;
31 struct active_trip act_trips[INT340X_THERMAL_MAX_ACT_TRIP_COUNT];
32 unsigned long *aux_trips;
33 int aux_trip_nr;
34 unsigned long psv_temp;
35 int psv_trip_id;
36 unsigned long crt_temp;
37 int crt_trip_id;
38 unsigned long hot_temp;
39 int hot_trip_id;
40 struct thermal_zone_device *zone;
41 struct thermal_zone_device_ops *override_ops;
42 void *priv_data;
43 struct acpi_lpat_conversion_table *lpat_table;
44};
45
46struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *,
47 struct thermal_zone_device_ops *override_ops);
48void int340x_thermal_zone_remove(struct int34x_thermal_zone *);
49
50static inline void int340x_thermal_zone_set_priv_data(
51 struct int34x_thermal_zone *tzone, void *priv_data)
52{
53 tzone->priv_data = priv_data;
54}
55
56static inline void *int340x_thermal_zone_get_priv_data(
57 struct int34x_thermal_zone *tzone)
58{
59 return tzone->priv_data;
60}
61
62static inline void int340x_thermal_zone_device_update(
63 struct int34x_thermal_zone *tzone)
64{
65 thermal_zone_device_update(tzone->zone);
66}
67
68#endif
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
index 0fe5dbbea968..5e8d8e91ea6d 100644
--- a/drivers/thermal/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -18,6 +18,8 @@
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/acpi.h> 20#include <linux/acpi.h>
21#include <linux/thermal.h>
22#include "int340x_thermal_zone.h"
21 23
22/* Broadwell-U/HSB thermal reporting device */ 24/* Broadwell-U/HSB thermal reporting device */
23#define PCI_DEVICE_ID_PROC_BDW_THERMAL 0x1603 25#define PCI_DEVICE_ID_PROC_BDW_THERMAL 0x1603
@@ -39,6 +41,7 @@ struct proc_thermal_device {
39 struct device *dev; 41 struct device *dev;
40 struct acpi_device *adev; 42 struct acpi_device *adev;
41 struct power_config power_limits[2]; 43 struct power_config power_limits[2];
44 struct int34x_thermal_zone *int340x_zone;
42}; 45};
43 46
44enum proc_thermal_emum_mode_type { 47enum proc_thermal_emum_mode_type {
@@ -117,6 +120,72 @@ static struct attribute_group power_limit_attribute_group = {
117 .name = "power_limits" 120 .name = "power_limits"
118}; 121};
119 122
123static int stored_tjmax; /* since it is fixed, we can have local storage */
124
125static int get_tjmax(void)
126{
127 u32 eax, edx;
128 u32 val;
129 int err;
130
131 err = rdmsr_safe(MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
132 if (err)
133 return err;
134
135 val = (eax >> 16) & 0xff;
136 if (val)
137 return val;
138
139 return -EINVAL;
140}
141
142static int read_temp_msr(unsigned long *temp)
143{
144 int cpu;
145 u32 eax, edx;
146 int err;
147 unsigned long curr_temp_off = 0;
148
149 *temp = 0;
150
151 for_each_online_cpu(cpu) {
152 err = rdmsr_safe_on_cpu(cpu, MSR_IA32_THERM_STATUS, &eax,
153 &edx);
154 if (err)
155 goto err_ret;
156 else {
157 if (eax & 0x80000000) {
158 curr_temp_off = (eax >> 16) & 0x7f;
159 if (!*temp || curr_temp_off < *temp)
160 *temp = curr_temp_off;
161 } else {
162 err = -EINVAL;
163 goto err_ret;
164 }
165 }
166 }
167
168 return 0;
169err_ret:
170 return err;
171}
172
173static int proc_thermal_get_zone_temp(struct thermal_zone_device *zone,
174 unsigned long *temp)
175{
176 int ret;
177
178 ret = read_temp_msr(temp);
179 if (!ret)
180 *temp = (stored_tjmax - *temp) * 1000;
181
182 return ret;
183}
184
185static struct thermal_zone_device_ops proc_thermal_local_ops = {
186 .get_temp = proc_thermal_get_zone_temp,
187};
188
120static int proc_thermal_add(struct device *dev, 189static int proc_thermal_add(struct device *dev,
121 struct proc_thermal_device **priv) 190 struct proc_thermal_device **priv)
122{ 191{
@@ -126,6 +195,8 @@ static int proc_thermal_add(struct device *dev,
126 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 195 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
127 union acpi_object *elements, *ppcc; 196 union acpi_object *elements, *ppcc;
128 union acpi_object *p; 197 union acpi_object *p;
198 unsigned long long tmp;
199 struct thermal_zone_device_ops *ops = NULL;
129 int i; 200 int i;
130 int ret; 201 int ret;
131 202
@@ -178,6 +249,24 @@ static int proc_thermal_add(struct device *dev,
178 249
179 ret = sysfs_create_group(&dev->kobj, 250 ret = sysfs_create_group(&dev->kobj,
180 &power_limit_attribute_group); 251 &power_limit_attribute_group);
252 if (ret)
253 goto free_buffer;
254
255 status = acpi_evaluate_integer(adev->handle, "_TMP", NULL, &tmp);
256 if (ACPI_FAILURE(status)) {
257 /* there is no _TMP method, add local method */
258 stored_tjmax = get_tjmax();
259 if (stored_tjmax > 0)
260 ops = &proc_thermal_local_ops;
261 }
262
263 proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops);
264 if (IS_ERR(proc_priv->int340x_zone)) {
265 sysfs_remove_group(&proc_priv->dev->kobj,
266 &power_limit_attribute_group);
267 ret = PTR_ERR(proc_priv->int340x_zone);
268 } else
269 ret = 0;
181 270
182free_buffer: 271free_buffer:
183 kfree(buf.pointer); 272 kfree(buf.pointer);
@@ -185,8 +274,9 @@ free_buffer:
185 return ret; 274 return ret;
186} 275}
187 276
188void proc_thermal_remove(struct proc_thermal_device *proc_priv) 277static void proc_thermal_remove(struct proc_thermal_device *proc_priv)
189{ 278{
279 int340x_thermal_zone_remove(proc_priv->int340x_zone);
190 sysfs_remove_group(&proc_priv->dev->kobj, 280 sysfs_remove_group(&proc_priv->dev->kobj,
191 &power_limit_attribute_group); 281 &power_limit_attribute_group);
192} 282}
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 6ceebd659dd4..12623bc02f46 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -688,6 +688,7 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = {
688 { X86_VENDOR_INTEL, 6, 0x45}, 688 { X86_VENDOR_INTEL, 6, 0x45},
689 { X86_VENDOR_INTEL, 6, 0x46}, 689 { X86_VENDOR_INTEL, 6, 0x46},
690 { X86_VENDOR_INTEL, 6, 0x4c}, 690 { X86_VENDOR_INTEL, 6, 0x4c},
691 { X86_VENDOR_INTEL, 6, 0x4d},
691 { X86_VENDOR_INTEL, 6, 0x56}, 692 { X86_VENDOR_INTEL, 6, 0x56},
692 {} 693 {}
693}; 694};
diff --git a/drivers/thermal/intel_soc_dts_thermal.c b/drivers/thermal/intel_soc_dts_thermal.c
index 5580f5b24eb9..9013505e43b7 100644
--- a/drivers/thermal/intel_soc_dts_thermal.c
+++ b/drivers/thermal/intel_soc_dts_thermal.c
@@ -309,10 +309,13 @@ static int soc_dts_enable(int id)
309 return ret; 309 return ret;
310} 310}
311 311
312static struct soc_sensor_entry *alloc_soc_dts(int id, u32 tj_max) 312static struct soc_sensor_entry *alloc_soc_dts(int id, u32 tj_max,
313 bool notification_support)
313{ 314{
314 struct soc_sensor_entry *aux_entry; 315 struct soc_sensor_entry *aux_entry;
315 char name[10]; 316 char name[10];
317 int trip_count = 0;
318 int trip_mask = 0;
316 int err; 319 int err;
317 320
318 aux_entry = kzalloc(sizeof(*aux_entry), GFP_KERNEL); 321 aux_entry = kzalloc(sizeof(*aux_entry), GFP_KERNEL);
@@ -332,11 +335,16 @@ static struct soc_sensor_entry *alloc_soc_dts(int id, u32 tj_max)
332 aux_entry->tj_max = tj_max; 335 aux_entry->tj_max = tj_max;
333 aux_entry->temp_mask = 0x00FF << (id * 8); 336 aux_entry->temp_mask = 0x00FF << (id * 8);
334 aux_entry->temp_shift = id * 8; 337 aux_entry->temp_shift = id * 8;
338 if (notification_support) {
339 trip_count = SOC_MAX_DTS_TRIPS;
340 trip_mask = 0x02;
341 }
335 snprintf(name, sizeof(name), "soc_dts%d", id); 342 snprintf(name, sizeof(name), "soc_dts%d", id);
336 aux_entry->tzone = thermal_zone_device_register(name, 343 aux_entry->tzone = thermal_zone_device_register(name,
337 SOC_MAX_DTS_TRIPS, 344 trip_count,
338 0x02, 345 trip_mask,
339 aux_entry, &tzone_ops, NULL, 0, 0); 346 aux_entry, &tzone_ops,
347 NULL, 0, 0);
340 if (IS_ERR(aux_entry->tzone)) { 348 if (IS_ERR(aux_entry->tzone)) {
341 err = PTR_ERR(aux_entry->tzone); 349 err = PTR_ERR(aux_entry->tzone);
342 goto err_ret; 350 goto err_ret;
@@ -402,6 +410,7 @@ static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
402 410
403static const struct x86_cpu_id soc_thermal_ids[] = { 411static const struct x86_cpu_id soc_thermal_ids[] = {
404 { X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x37, 0, BYT_SOC_DTS_APIC_IRQ}, 412 { X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x37, 0, BYT_SOC_DTS_APIC_IRQ},
413 { X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x4c, 0, 0},
405 {} 414 {}
406}; 415};
407MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids); 416MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids);
@@ -420,8 +429,11 @@ static int __init intel_soc_thermal_init(void)
420 if (get_tj_max(&tj_max)) 429 if (get_tj_max(&tj_max))
421 return -EINVAL; 430 return -EINVAL;
422 431
432 soc_dts_thres_irq = (int)match_cpu->driver_data;
433
423 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) { 434 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
424 soc_dts[i] = alloc_soc_dts(i, tj_max); 435 soc_dts[i] = alloc_soc_dts(i, tj_max,
436 soc_dts_thres_irq ? true : false);
425 if (IS_ERR(soc_dts[i])) { 437 if (IS_ERR(soc_dts[i])) {
426 err = PTR_ERR(soc_dts[i]); 438 err = PTR_ERR(soc_dts[i]);
427 goto err_free; 439 goto err_free;
@@ -430,15 +442,15 @@ static int __init intel_soc_thermal_init(void)
430 442
431 spin_lock_init(&intr_notify_lock); 443 spin_lock_init(&intr_notify_lock);
432 444
433 soc_dts_thres_irq = (int)match_cpu->driver_data; 445 if (soc_dts_thres_irq) {
434 446 err = request_threaded_irq(soc_dts_thres_irq, NULL,
435 err = request_threaded_irq(soc_dts_thres_irq, NULL, 447 soc_irq_thread_fn,
436 soc_irq_thread_fn, 448 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
437 IRQF_TRIGGER_RISING | IRQF_ONESHOT, 449 "soc_dts", soc_dts);
438 "soc_dts", soc_dts); 450 if (err) {
439 if (err) { 451 pr_err("request_threaded_irq ret %d\n", err);
440 pr_err("request_threaded_irq ret %d\n", err); 452 goto err_free;
441 goto err_free; 453 }
442 } 454 }
443 455
444 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) { 456 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
@@ -451,7 +463,8 @@ static int __init intel_soc_thermal_init(void)
451 463
452err_trip_temp: 464err_trip_temp:
453 i = SOC_MAX_DTS_SENSORS; 465 i = SOC_MAX_DTS_SENSORS;
454 free_irq(soc_dts_thres_irq, soc_dts); 466 if (soc_dts_thres_irq)
467 free_irq(soc_dts_thres_irq, soc_dts);
455err_free: 468err_free:
456 while (--i >= 0) 469 while (--i >= 0)
457 free_soc_dts(soc_dts[i]); 470 free_soc_dts(soc_dts[i]);
@@ -466,7 +479,8 @@ static void __exit intel_soc_thermal_exit(void)
466 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) 479 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i)
467 update_trip_temp(soc_dts[i], 0, 0); 480 update_trip_temp(soc_dts[i], 0, 0);
468 481
469 free_irq(soc_dts_thres_irq, soc_dts); 482 if (soc_dts_thres_irq)
483 free_irq(soc_dts_thres_irq, soc_dts);
470 484
471 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) 485 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i)
472 free_soc_dts(soc_dts[i]); 486 free_soc_dts(soc_dts[i]);
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index d717f3dab6f1..668fb1bdea9e 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -497,6 +497,9 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data,
497 if (sensor_specs.np == sensor_np && id == sensor_id) { 497 if (sensor_specs.np == sensor_np && id == sensor_id) {
498 tzd = thermal_zone_of_add_sensor(child, sensor_np, 498 tzd = thermal_zone_of_add_sensor(child, sensor_np,
499 data, ops); 499 data, ops);
500 if (!IS_ERR(tzd))
501 tzd->ops->set_mode(tzd, THERMAL_DEVICE_ENABLED);
502
500 of_node_put(sensor_specs.np); 503 of_node_put(sensor_specs.np);
501 of_node_put(child); 504 of_node_put(child);
502 goto exit; 505 goto exit;
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 2580a4872f90..fe4e767018c4 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -387,21 +387,9 @@ static int rcar_thermal_probe(struct platform_device *pdev)
387 387
388 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 388 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
389 if (irq) { 389 if (irq) {
390 int ret;
391
392 /* 390 /*
393 * platform has IRQ support. 391 * platform has IRQ support.
394 * Then, driver uses common registers 392 * Then, driver uses common registers
395 */
396
397 ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
398 dev_name(dev), common);
399 if (ret) {
400 dev_err(dev, "irq request failed\n ");
401 return ret;
402 }
403
404 /*
405 * rcar_has_irq_support() will be enabled 393 * rcar_has_irq_support() will be enabled
406 */ 394 */
407 res = platform_get_resource(pdev, IORESOURCE_MEM, mres++); 395 res = platform_get_resource(pdev, IORESOURCE_MEM, mres++);
@@ -456,8 +444,16 @@ static int rcar_thermal_probe(struct platform_device *pdev)
456 } 444 }
457 445
458 /* enable temperature comparation */ 446 /* enable temperature comparation */
459 if (irq) 447 if (irq) {
448 ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
449 dev_name(dev), common);
450 if (ret) {
451 dev_err(dev, "irq request failed\n ");
452 goto error_unregister;
453 }
454
460 rcar_thermal_common_write(common, ENR, enr_bits); 455 rcar_thermal_common_write(common, ENR, enr_bits);
456 }
461 457
462 platform_set_drvdata(pdev, common); 458 platform_set_drvdata(pdev, common);
463 459
@@ -467,9 +463,9 @@ static int rcar_thermal_probe(struct platform_device *pdev)
467 463
468error_unregister: 464error_unregister:
469 rcar_thermal_for_each_priv(priv, common) { 465 rcar_thermal_for_each_priv(priv, common) {
470 thermal_zone_device_unregister(priv->zone);
471 if (rcar_has_irq_support(priv)) 466 if (rcar_has_irq_support(priv))
472 rcar_thermal_irq_disable(priv); 467 rcar_thermal_irq_disable(priv);
468 thermal_zone_device_unregister(priv->zone);
473 } 469 }
474 470
475 pm_runtime_put(dev); 471 pm_runtime_put(dev);
@@ -485,9 +481,9 @@ static int rcar_thermal_remove(struct platform_device *pdev)
485 struct rcar_thermal_priv *priv; 481 struct rcar_thermal_priv *priv;
486 482
487 rcar_thermal_for_each_priv(priv, common) { 483 rcar_thermal_for_each_priv(priv, common) {
488 thermal_zone_device_unregister(priv->zone);
489 if (rcar_has_irq_support(priv)) 484 if (rcar_has_irq_support(priv))
490 rcar_thermal_irq_disable(priv); 485 rcar_thermal_irq_disable(priv);
486 thermal_zone_device_unregister(priv->zone);
491 } 487 }
492 488
493 pm_runtime_put(dev); 489 pm_runtime_put(dev);
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 9c6ce548e363..3aa46ac7cdbc 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -193,19 +193,20 @@ static u32 rk_tsadcv2_temp_to_code(long temp)
193 193
194static long rk_tsadcv2_code_to_temp(u32 code) 194static long rk_tsadcv2_code_to_temp(u32 code)
195{ 195{
196 int high, low, mid; 196 unsigned int low = 0;
197 197 unsigned int high = ARRAY_SIZE(v2_code_table) - 1;
198 low = 0; 198 unsigned int mid = (low + high) / 2;
199 high = ARRAY_SIZE(v2_code_table) - 1; 199 unsigned int num;
200 mid = (high + low) / 2; 200 unsigned long denom;
201 201
202 if (code > v2_code_table[low].code || code < v2_code_table[high].code) 202 /* Invalid code, return -EAGAIN */
203 return 125000; /* No code available, return max temperature */ 203 if (code > TSADCV2_DATA_MASK)
204 return -EAGAIN;
204 205
205 while (low <= high) { 206 while (low <= high && mid) {
206 if (code >= v2_code_table[mid].code && code < 207 if (code >= v2_code_table[mid].code &&
207 v2_code_table[mid - 1].code) 208 code < v2_code_table[mid - 1].code)
208 return v2_code_table[mid].temp; 209 break;
209 else if (code < v2_code_table[mid].code) 210 else if (code < v2_code_table[mid].code)
210 low = mid + 1; 211 low = mid + 1;
211 else 212 else
@@ -213,7 +214,16 @@ static long rk_tsadcv2_code_to_temp(u32 code)
213 mid = (low + high) / 2; 214 mid = (low + high) / 2;
214 } 215 }
215 216
216 return 125000; 217 /*
218 * The 5C granularity provided by the table is too much. Let's
219 * assume that the relationship between sensor readings and
220 * temperature between 2 table entries is linear and interpolate
221 * to produce less granular result.
222 */
223 num = v2_code_table[mid].temp - v2_code_table[mid - 1].temp;
224 num *= v2_code_table[mid - 1].code - code;
225 denom = v2_code_table[mid - 1].code - v2_code_table[mid].code;
226 return v2_code_table[mid - 1].temp + (num / denom);
217} 227}
218 228
219/** 229/**
diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig
index c43306ecc0ab..c8e35c1a43dc 100644
--- a/drivers/thermal/samsung/Kconfig
+++ b/drivers/thermal/samsung/Kconfig
@@ -7,12 +7,3 @@ config EXYNOS_THERMAL
7 the TMU, reports temperature and handles cooling action if defined. 7 the TMU, reports temperature and handles cooling action if defined.
8 This driver uses the Exynos core thermal APIs and TMU configuration 8 This driver uses the Exynos core thermal APIs and TMU configuration
9 data from the supported SoCs. 9 data from the supported SoCs.
10
11config EXYNOS_THERMAL_CORE
12 bool "Core thermal framework support for EXYNOS SOCs"
13 depends on EXYNOS_THERMAL
14 help
15 If you say yes here you get support for EXYNOS TMU
16 (Thermal Management Unit) common registration/unregistration
17 functions to the core thermal layer and also to use the generic
18 CPU cooling APIs.
diff --git a/drivers/thermal/samsung/Makefile b/drivers/thermal/samsung/Makefile
index c09d83095dc2..1e47d0d89ce0 100644
--- a/drivers/thermal/samsung/Makefile
+++ b/drivers/thermal/samsung/Makefile
@@ -3,5 +3,3 @@
3# 3#
4obj-$(CONFIG_EXYNOS_THERMAL) += exynos_thermal.o 4obj-$(CONFIG_EXYNOS_THERMAL) += exynos_thermal.o
5exynos_thermal-y := exynos_tmu.o 5exynos_thermal-y := exynos_tmu.o
6exynos_thermal-y += exynos_tmu_data.o
7exynos_thermal-$(CONFIG_EXYNOS_THERMAL_CORE) += exynos_thermal_common.o
diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c
deleted file mode 100644
index 6dc3815cc73f..000000000000
--- a/drivers/thermal/samsung/exynos_thermal_common.c
+++ /dev/null
@@ -1,427 +0,0 @@
1/*
2 * exynos_thermal_common.c - Samsung EXYNOS common thermal file
3 *
4 * Copyright (C) 2013 Samsung Electronics
5 * Amit Daniel Kachhap <amit.daniel@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/cpu_cooling.h>
24#include <linux/err.h>
25#include <linux/slab.h>
26#include <linux/thermal.h>
27
28#include "exynos_thermal_common.h"
29
30struct exynos_thermal_zone {
31 enum thermal_device_mode mode;
32 struct thermal_zone_device *therm_dev;
33 struct thermal_cooling_device *cool_dev[MAX_COOLING_DEVICE];
34 unsigned int cool_dev_size;
35 struct platform_device *exynos4_dev;
36 struct thermal_sensor_conf *sensor_conf;
37 bool bind;
38};
39
40/* Get mode callback functions for thermal zone */
41static int exynos_get_mode(struct thermal_zone_device *thermal,
42 enum thermal_device_mode *mode)
43{
44 struct exynos_thermal_zone *th_zone = thermal->devdata;
45 if (th_zone)
46 *mode = th_zone->mode;
47 return 0;
48}
49
50/* Set mode callback functions for thermal zone */
51static int exynos_set_mode(struct thermal_zone_device *thermal,
52 enum thermal_device_mode mode)
53{
54 struct exynos_thermal_zone *th_zone = thermal->devdata;
55 if (!th_zone) {
56 dev_err(&thermal->device,
57 "thermal zone not registered\n");
58 return 0;
59 }
60
61 mutex_lock(&thermal->lock);
62
63 if (mode == THERMAL_DEVICE_ENABLED &&
64 !th_zone->sensor_conf->trip_data.trigger_falling)
65 thermal->polling_delay = IDLE_INTERVAL;
66 else
67 thermal->polling_delay = 0;
68
69 mutex_unlock(&thermal->lock);
70
71 th_zone->mode = mode;
72 thermal_zone_device_update(thermal);
73 dev_dbg(th_zone->sensor_conf->dev,
74 "thermal polling set for duration=%d msec\n",
75 thermal->polling_delay);
76 return 0;
77}
78
79
80/* Get trip type callback functions for thermal zone */
81static int exynos_get_trip_type(struct thermal_zone_device *thermal, int trip,
82 enum thermal_trip_type *type)
83{
84 struct exynos_thermal_zone *th_zone = thermal->devdata;
85 int max_trip = th_zone->sensor_conf->trip_data.trip_count;
86 int trip_type;
87
88 if (trip < 0 || trip >= max_trip)
89 return -EINVAL;
90
91 trip_type = th_zone->sensor_conf->trip_data.trip_type[trip];
92
93 if (trip_type == SW_TRIP)
94 *type = THERMAL_TRIP_CRITICAL;
95 else if (trip_type == THROTTLE_ACTIVE)
96 *type = THERMAL_TRIP_ACTIVE;
97 else if (trip_type == THROTTLE_PASSIVE)
98 *type = THERMAL_TRIP_PASSIVE;
99 else
100 return -EINVAL;
101
102 return 0;
103}
104
105/* Get trip temperature callback functions for thermal zone */
106static int exynos_get_trip_temp(struct thermal_zone_device *thermal, int trip,
107 unsigned long *temp)
108{
109 struct exynos_thermal_zone *th_zone = thermal->devdata;
110 int max_trip = th_zone->sensor_conf->trip_data.trip_count;
111
112 if (trip < 0 || trip >= max_trip)
113 return -EINVAL;
114
115 *temp = th_zone->sensor_conf->trip_data.trip_val[trip];
116 /* convert the temperature into millicelsius */
117 *temp = *temp * MCELSIUS;
118
119 return 0;
120}
121
122/* Get critical temperature callback functions for thermal zone */
123static int exynos_get_crit_temp(struct thermal_zone_device *thermal,
124 unsigned long *temp)
125{
126 struct exynos_thermal_zone *th_zone = thermal->devdata;
127 int max_trip = th_zone->sensor_conf->trip_data.trip_count;
128 /* Get the temp of highest trip*/
129 return exynos_get_trip_temp(thermal, max_trip - 1, temp);
130}
131
132/* Bind callback functions for thermal zone */
133static int exynos_bind(struct thermal_zone_device *thermal,
134 struct thermal_cooling_device *cdev)
135{
136 int ret = 0, i, tab_size, level;
137 struct freq_clip_table *tab_ptr, *clip_data;
138 struct exynos_thermal_zone *th_zone = thermal->devdata;
139 struct thermal_sensor_conf *data = th_zone->sensor_conf;
140
141 tab_ptr = (struct freq_clip_table *)data->cooling_data.freq_data;
142 tab_size = data->cooling_data.freq_clip_count;
143
144 if (tab_ptr == NULL || tab_size == 0)
145 return 0;
146
147 /* find the cooling device registered*/
148 for (i = 0; i < th_zone->cool_dev_size; i++)
149 if (cdev == th_zone->cool_dev[i])
150 break;
151
152 /* No matching cooling device */
153 if (i == th_zone->cool_dev_size)
154 return 0;
155
156 /* Bind the thermal zone to the cpufreq cooling device */
157 for (i = 0; i < tab_size; i++) {
158 clip_data = (struct freq_clip_table *)&(tab_ptr[i]);
159 level = cpufreq_cooling_get_level(0, clip_data->freq_clip_max);
160 if (level == THERMAL_CSTATE_INVALID)
161 return 0;
162 switch (GET_ZONE(i)) {
163 case MONITOR_ZONE:
164 case WARN_ZONE:
165 if (thermal_zone_bind_cooling_device(thermal, i, cdev,
166 level, 0)) {
167 dev_err(data->dev,
168 "error unbinding cdev inst=%d\n", i);
169 ret = -EINVAL;
170 }
171 th_zone->bind = true;
172 break;
173 default:
174 ret = -EINVAL;
175 }
176 }
177
178 return ret;
179}
180
181/* Unbind callback functions for thermal zone */
182static int exynos_unbind(struct thermal_zone_device *thermal,
183 struct thermal_cooling_device *cdev)
184{
185 int ret = 0, i, tab_size;
186 struct exynos_thermal_zone *th_zone = thermal->devdata;
187 struct thermal_sensor_conf *data = th_zone->sensor_conf;
188
189 if (th_zone->bind == false)
190 return 0;
191
192 tab_size = data->cooling_data.freq_clip_count;
193
194 if (tab_size == 0)
195 return 0;
196
197 /* find the cooling device registered*/
198 for (i = 0; i < th_zone->cool_dev_size; i++)
199 if (cdev == th_zone->cool_dev[i])
200 break;
201
202 /* No matching cooling device */
203 if (i == th_zone->cool_dev_size)
204 return 0;
205
206 /* Bind the thermal zone to the cpufreq cooling device */
207 for (i = 0; i < tab_size; i++) {
208 switch (GET_ZONE(i)) {
209 case MONITOR_ZONE:
210 case WARN_ZONE:
211 if (thermal_zone_unbind_cooling_device(thermal, i,
212 cdev)) {
213 dev_err(data->dev,
214 "error unbinding cdev inst=%d\n", i);
215 ret = -EINVAL;
216 }
217 th_zone->bind = false;
218 break;
219 default:
220 ret = -EINVAL;
221 }
222 }
223 return ret;
224}
225
226/* Get temperature callback functions for thermal zone */
227static int exynos_get_temp(struct thermal_zone_device *thermal,
228 unsigned long *temp)
229{
230 struct exynos_thermal_zone *th_zone = thermal->devdata;
231 void *data;
232
233 if (!th_zone->sensor_conf) {
234 dev_err(&thermal->device,
235 "Temperature sensor not initialised\n");
236 return -EINVAL;
237 }
238 data = th_zone->sensor_conf->driver_data;
239 *temp = th_zone->sensor_conf->read_temperature(data);
240 /* convert the temperature into millicelsius */
241 *temp = *temp * MCELSIUS;
242 return 0;
243}
244
245/* Get temperature callback functions for thermal zone */
246static int exynos_set_emul_temp(struct thermal_zone_device *thermal,
247 unsigned long temp)
248{
249 void *data;
250 int ret = -EINVAL;
251 struct exynos_thermal_zone *th_zone = thermal->devdata;
252
253 if (!th_zone->sensor_conf) {
254 dev_err(&thermal->device,
255 "Temperature sensor not initialised\n");
256 return -EINVAL;
257 }
258 data = th_zone->sensor_conf->driver_data;
259 if (th_zone->sensor_conf->write_emul_temp)
260 ret = th_zone->sensor_conf->write_emul_temp(data, temp);
261 return ret;
262}
263
264/* Get the temperature trend */
265static int exynos_get_trend(struct thermal_zone_device *thermal,
266 int trip, enum thermal_trend *trend)
267{
268 int ret;
269 unsigned long trip_temp;
270
271 ret = exynos_get_trip_temp(thermal, trip, &trip_temp);
272 if (ret < 0)
273 return ret;
274
275 if (thermal->temperature >= trip_temp)
276 *trend = THERMAL_TREND_RAISE_FULL;
277 else
278 *trend = THERMAL_TREND_DROP_FULL;
279
280 return 0;
281}
282/* Operation callback functions for thermal zone */
283static struct thermal_zone_device_ops exynos_dev_ops = {
284 .bind = exynos_bind,
285 .unbind = exynos_unbind,
286 .get_temp = exynos_get_temp,
287 .set_emul_temp = exynos_set_emul_temp,
288 .get_trend = exynos_get_trend,
289 .get_mode = exynos_get_mode,
290 .set_mode = exynos_set_mode,
291 .get_trip_type = exynos_get_trip_type,
292 .get_trip_temp = exynos_get_trip_temp,
293 .get_crit_temp = exynos_get_crit_temp,
294};
295
296/*
297 * This function may be called from interrupt based temperature sensor
298 * when threshold is changed.
299 */
300void exynos_report_trigger(struct thermal_sensor_conf *conf)
301{
302 unsigned int i;
303 char data[10];
304 char *envp[] = { data, NULL };
305 struct exynos_thermal_zone *th_zone;
306
307 if (!conf || !conf->pzone_data) {
308 pr_err("Invalid temperature sensor configuration data\n");
309 return;
310 }
311
312 th_zone = conf->pzone_data;
313
314 if (th_zone->bind == false) {
315 for (i = 0; i < th_zone->cool_dev_size; i++) {
316 if (!th_zone->cool_dev[i])
317 continue;
318 exynos_bind(th_zone->therm_dev,
319 th_zone->cool_dev[i]);
320 }
321 }
322
323 thermal_zone_device_update(th_zone->therm_dev);
324
325 mutex_lock(&th_zone->therm_dev->lock);
326 /* Find the level for which trip happened */
327 for (i = 0; i < th_zone->sensor_conf->trip_data.trip_count; i++) {
328 if (th_zone->therm_dev->last_temperature <
329 th_zone->sensor_conf->trip_data.trip_val[i] * MCELSIUS)
330 break;
331 }
332
333 if (th_zone->mode == THERMAL_DEVICE_ENABLED &&
334 !th_zone->sensor_conf->trip_data.trigger_falling) {
335 if (i > 0)
336 th_zone->therm_dev->polling_delay = ACTIVE_INTERVAL;
337 else
338 th_zone->therm_dev->polling_delay = IDLE_INTERVAL;
339 }
340
341 snprintf(data, sizeof(data), "%u", i);
342 kobject_uevent_env(&th_zone->therm_dev->device.kobj, KOBJ_CHANGE, envp);
343 mutex_unlock(&th_zone->therm_dev->lock);
344}
345
346/* Register with the in-kernel thermal management */
347int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
348{
349 int ret;
350 struct exynos_thermal_zone *th_zone;
351
352 if (!sensor_conf || !sensor_conf->read_temperature) {
353 pr_err("Temperature sensor not initialised\n");
354 return -EINVAL;
355 }
356
357 th_zone = devm_kzalloc(sensor_conf->dev,
358 sizeof(struct exynos_thermal_zone), GFP_KERNEL);
359 if (!th_zone)
360 return -ENOMEM;
361
362 th_zone->sensor_conf = sensor_conf;
363 /*
364 * TODO: 1) Handle multiple cooling devices in a thermal zone
365 * 2) Add a flag/name in cooling info to map to specific
366 * sensor
367 */
368 if (sensor_conf->cooling_data.freq_clip_count > 0) {
369 th_zone->cool_dev[th_zone->cool_dev_size] =
370 cpufreq_cooling_register(cpu_present_mask);
371 if (IS_ERR(th_zone->cool_dev[th_zone->cool_dev_size])) {
372 ret = PTR_ERR(th_zone->cool_dev[th_zone->cool_dev_size]);
373 if (ret != -EPROBE_DEFER)
374 dev_err(sensor_conf->dev,
375 "Failed to register cpufreq cooling device: %d\n",
376 ret);
377 goto err_unregister;
378 }
379 th_zone->cool_dev_size++;
380 }
381
382 th_zone->therm_dev = thermal_zone_device_register(
383 sensor_conf->name, sensor_conf->trip_data.trip_count,
384 0, th_zone, &exynos_dev_ops, NULL, 0,
385 sensor_conf->trip_data.trigger_falling ? 0 :
386 IDLE_INTERVAL);
387
388 if (IS_ERR(th_zone->therm_dev)) {
389 dev_err(sensor_conf->dev,
390 "Failed to register thermal zone device\n");
391 ret = PTR_ERR(th_zone->therm_dev);
392 goto err_unregister;
393 }
394 th_zone->mode = THERMAL_DEVICE_ENABLED;
395 sensor_conf->pzone_data = th_zone;
396
397 dev_info(sensor_conf->dev,
398 "Exynos: Thermal zone(%s) registered\n", sensor_conf->name);
399
400 return 0;
401
402err_unregister:
403 exynos_unregister_thermal(sensor_conf);
404 return ret;
405}
406
407/* Un-Register with the in-kernel thermal management */
408void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf)
409{
410 int i;
411 struct exynos_thermal_zone *th_zone;
412
413 if (!sensor_conf || !sensor_conf->pzone_data) {
414 pr_err("Invalid temperature sensor configuration data\n");
415 return;
416 }
417
418 th_zone = sensor_conf->pzone_data;
419
420 thermal_zone_device_unregister(th_zone->therm_dev);
421
422 for (i = 0; i < th_zone->cool_dev_size; ++i)
423 cpufreq_cooling_unregister(th_zone->cool_dev[i]);
424
425 dev_info(sensor_conf->dev,
426 "Exynos: Kernel Thermal management unregistered\n");
427}
diff --git a/drivers/thermal/samsung/exynos_thermal_common.h b/drivers/thermal/samsung/exynos_thermal_common.h
deleted file mode 100644
index cd4471925cdd..000000000000
--- a/drivers/thermal/samsung/exynos_thermal_common.h
+++ /dev/null
@@ -1,106 +0,0 @@
1/*
2 * exynos_thermal_common.h - Samsung EXYNOS common header file
3 *
4 * Copyright (C) 2013 Samsung Electronics
5 * Amit Daniel Kachhap <amit.daniel@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef _EXYNOS_THERMAL_COMMON_H
24#define _EXYNOS_THERMAL_COMMON_H
25
26/* In-kernel thermal framework related macros & definations */
27#define SENSOR_NAME_LEN 16
28#define MAX_TRIP_COUNT 8
29#define MAX_COOLING_DEVICE 4
30
31#define ACTIVE_INTERVAL 500
32#define IDLE_INTERVAL 10000
33#define MCELSIUS 1000
34
35/* CPU Zone information */
36#define PANIC_ZONE 4
37#define WARN_ZONE 3
38#define MONITOR_ZONE 2
39#define SAFE_ZONE 1
40
41#define GET_ZONE(trip) (trip + 2)
42#define GET_TRIP(zone) (zone - 2)
43
44enum trigger_type {
45 THROTTLE_ACTIVE = 1,
46 THROTTLE_PASSIVE,
47 SW_TRIP,
48 HW_TRIP,
49};
50
51/**
52 * struct freq_clip_table
53 * @freq_clip_max: maximum frequency allowed for this cooling state.
54 * @temp_level: Temperature level at which the temperature clipping will
55 * happen.
56 * @mask_val: cpumask of the allowed cpu's where the clipping will take place.
57 *
58 * This structure is required to be filled and passed to the
59 * cpufreq_cooling_unregister function.
60 */
61struct freq_clip_table {
62 unsigned int freq_clip_max;
63 unsigned int temp_level;
64 const struct cpumask *mask_val;
65};
66
67struct thermal_trip_point_conf {
68 int trip_val[MAX_TRIP_COUNT];
69 int trip_type[MAX_TRIP_COUNT];
70 int trip_count;
71 unsigned char trigger_falling;
72};
73
74struct thermal_cooling_conf {
75 struct freq_clip_table freq_data[MAX_TRIP_COUNT];
76 int freq_clip_count;
77};
78
79struct thermal_sensor_conf {
80 char name[SENSOR_NAME_LEN];
81 int (*read_temperature)(void *data);
82 int (*write_emul_temp)(void *drv_data, unsigned long temp);
83 struct thermal_trip_point_conf trip_data;
84 struct thermal_cooling_conf cooling_data;
85 void *driver_data;
86 void *pzone_data;
87 struct device *dev;
88};
89
90/*Functions used exynos based thermal sensor driver*/
91#ifdef CONFIG_EXYNOS_THERMAL_CORE
92void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf);
93int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf);
94void exynos_report_trigger(struct thermal_sensor_conf *sensor_conf);
95#else
96static inline void
97exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf) { return; }
98
99static inline int
100exynos_register_thermal(struct thermal_sensor_conf *sensor_conf) { return 0; }
101
102static inline void
103exynos_report_trigger(struct thermal_sensor_conf *sensor_conf) { return; }
104
105#endif /* CONFIG_EXYNOS_THERMAL_CORE */
106#endif /* _EXYNOS_THERMAL_COMMON_H */
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index d2f1e62a4232..1fc54ab911d2 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -1,6 +1,10 @@
1/* 1/*
2 * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit) 2 * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
3 * 3 *
4 * Copyright (C) 2014 Samsung Electronics
5 * Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
6 * Lukasz Majewski <l.majewski@samsung.com>
7 *
4 * Copyright (C) 2011 Samsung Electronics 8 * Copyright (C) 2011 Samsung Electronics
5 * Donggeun Kim <dg77.kim@samsung.com> 9 * Donggeun Kim <dg77.kim@samsung.com>
6 * Amit Daniel Kachhap <amit.kachhap@linaro.org> 10 * Amit Daniel Kachhap <amit.kachhap@linaro.org>
@@ -31,8 +35,8 @@
31#include <linux/platform_device.h> 35#include <linux/platform_device.h>
32#include <linux/regulator/consumer.h> 36#include <linux/regulator/consumer.h>
33 37
34#include "exynos_thermal_common.h"
35#include "exynos_tmu.h" 38#include "exynos_tmu.h"
39#include "../thermal_core.h"
36 40
37/* Exynos generic registers */ 41/* Exynos generic registers */
38#define EXYNOS_TMU_REG_TRIMINFO 0x0 42#define EXYNOS_TMU_REG_TRIMINFO 0x0
@@ -115,6 +119,27 @@
115#define EXYNOS5440_TMU_TH_RISE4_SHIFT 24 119#define EXYNOS5440_TMU_TH_RISE4_SHIFT 24
116#define EXYNOS5440_EFUSE_SWAP_OFFSET 8 120#define EXYNOS5440_EFUSE_SWAP_OFFSET 8
117 121
122/* Exynos7 specific registers */
123#define EXYNOS7_THD_TEMP_RISE7_6 0x50
124#define EXYNOS7_THD_TEMP_FALL7_6 0x60
125#define EXYNOS7_TMU_REG_INTEN 0x110
126#define EXYNOS7_TMU_REG_INTPEND 0x118
127#define EXYNOS7_TMU_REG_EMUL_CON 0x160
128
129#define EXYNOS7_TMU_TEMP_MASK 0x1ff
130#define EXYNOS7_PD_DET_EN_SHIFT 23
131#define EXYNOS7_TMU_INTEN_RISE0_SHIFT 0
132#define EXYNOS7_TMU_INTEN_RISE1_SHIFT 1
133#define EXYNOS7_TMU_INTEN_RISE2_SHIFT 2
134#define EXYNOS7_TMU_INTEN_RISE3_SHIFT 3
135#define EXYNOS7_TMU_INTEN_RISE4_SHIFT 4
136#define EXYNOS7_TMU_INTEN_RISE5_SHIFT 5
137#define EXYNOS7_TMU_INTEN_RISE6_SHIFT 6
138#define EXYNOS7_TMU_INTEN_RISE7_SHIFT 7
139#define EXYNOS7_EMUL_DATA_SHIFT 7
140#define EXYNOS7_EMUL_DATA_MASK 0x1ff
141
142#define MCELSIUS 1000
118/** 143/**
119 * struct exynos_tmu_data : A structure to hold the private data of the TMU 144 * struct exynos_tmu_data : A structure to hold the private data of the TMU
120 driver 145 driver
@@ -128,6 +153,7 @@
128 * @lock: lock to implement synchronization. 153 * @lock: lock to implement synchronization.
129 * @clk: pointer to the clock structure. 154 * @clk: pointer to the clock structure.
130 * @clk_sec: pointer to the clock structure for accessing the base_second. 155 * @clk_sec: pointer to the clock structure for accessing the base_second.
156 * @sclk: pointer to the clock structure for accessing the tmu special clk.
131 * @temp_error1: fused value of the first point trim. 157 * @temp_error1: fused value of the first point trim.
132 * @temp_error2: fused value of the second point trim. 158 * @temp_error2: fused value of the second point trim.
133 * @regulator: pointer to the TMU regulator structure. 159 * @regulator: pointer to the TMU regulator structure.
@@ -147,10 +173,11 @@ struct exynos_tmu_data {
147 enum soc_type soc; 173 enum soc_type soc;
148 struct work_struct irq_work; 174 struct work_struct irq_work;
149 struct mutex lock; 175 struct mutex lock;
150 struct clk *clk, *clk_sec; 176 struct clk *clk, *clk_sec, *sclk;
151 u8 temp_error1, temp_error2; 177 u16 temp_error1, temp_error2;
152 struct regulator *regulator; 178 struct regulator *regulator;
153 struct thermal_sensor_conf *reg_conf; 179 struct thermal_zone_device *tzd;
180
154 int (*tmu_initialize)(struct platform_device *pdev); 181 int (*tmu_initialize)(struct platform_device *pdev);
155 void (*tmu_control)(struct platform_device *pdev, bool on); 182 void (*tmu_control)(struct platform_device *pdev, bool on);
156 int (*tmu_read)(struct exynos_tmu_data *data); 183 int (*tmu_read)(struct exynos_tmu_data *data);
@@ -159,6 +186,33 @@ struct exynos_tmu_data {
159 void (*tmu_clear_irqs)(struct exynos_tmu_data *data); 186 void (*tmu_clear_irqs)(struct exynos_tmu_data *data);
160}; 187};
161 188
189static void exynos_report_trigger(struct exynos_tmu_data *p)
190{
191 char data[10], *envp[] = { data, NULL };
192 struct thermal_zone_device *tz = p->tzd;
193 unsigned long temp;
194 unsigned int i;
195
196 if (!tz) {
197 pr_err("No thermal zone device defined\n");
198 return;
199 }
200
201 thermal_zone_device_update(tz);
202
203 mutex_lock(&tz->lock);
204 /* Find the level for which trip happened */
205 for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
206 tz->ops->get_trip_temp(tz, i, &temp);
207 if (tz->last_temperature < temp)
208 break;
209 }
210
211 snprintf(data, sizeof(data), "%u", i);
212 kobject_uevent_env(&tz->device.kobj, KOBJ_CHANGE, envp);
213 mutex_unlock(&tz->lock);
214}
215
162/* 216/*
163 * TMU treats temperature as a mapped temperature code. 217 * TMU treats temperature as a mapped temperature code.
164 * The temperature is converted differently depending on the calibration type. 218 * The temperature is converted differently depending on the calibration type.
@@ -190,7 +244,7 @@ static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
190 * Calculate a temperature value from a temperature code. 244 * Calculate a temperature value from a temperature code.
191 * The unit of the temperature is degree Celsius. 245 * The unit of the temperature is degree Celsius.
192 */ 246 */
193static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code) 247static int code_to_temp(struct exynos_tmu_data *data, u16 temp_code)
194{ 248{
195 struct exynos_tmu_platform_data *pdata = data->pdata; 249 struct exynos_tmu_platform_data *pdata = data->pdata;
196 int temp; 250 int temp;
@@ -234,14 +288,25 @@ static void sanitize_temp_error(struct exynos_tmu_data *data, u32 trim_info)
234 288
235static u32 get_th_reg(struct exynos_tmu_data *data, u32 threshold, bool falling) 289static u32 get_th_reg(struct exynos_tmu_data *data, u32 threshold, bool falling)
236{ 290{
237 struct exynos_tmu_platform_data *pdata = data->pdata; 291 struct thermal_zone_device *tz = data->tzd;
292 const struct thermal_trip * const trips =
293 of_thermal_get_trip_points(tz);
294 unsigned long temp;
238 int i; 295 int i;
239 296
240 for (i = 0; i < pdata->non_hw_trigger_levels; i++) { 297 if (!trips) {
241 u8 temp = pdata->trigger_levels[i]; 298 pr_err("%s: Cannot get trip points from of-thermal.c!\n",
299 __func__);
300 return 0;
301 }
302
303 for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
304 if (trips[i].type == THERMAL_TRIP_CRITICAL)
305 continue;
242 306
307 temp = trips[i].temperature / MCELSIUS;
243 if (falling) 308 if (falling)
244 temp -= pdata->threshold_falling; 309 temp -= (trips[i].hysteresis / MCELSIUS);
245 else 310 else
246 threshold &= ~(0xff << 8 * i); 311 threshold &= ~(0xff << 8 * i);
247 312
@@ -305,9 +370,19 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
305static int exynos4210_tmu_initialize(struct platform_device *pdev) 370static int exynos4210_tmu_initialize(struct platform_device *pdev)
306{ 371{
307 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 372 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
308 struct exynos_tmu_platform_data *pdata = data->pdata; 373 struct thermal_zone_device *tz = data->tzd;
309 unsigned int status; 374 const struct thermal_trip * const trips =
375 of_thermal_get_trip_points(tz);
310 int ret = 0, threshold_code, i; 376 int ret = 0, threshold_code, i;
377 unsigned long reference, temp;
378 unsigned int status;
379
380 if (!trips) {
381 pr_err("%s: Cannot get trip points from of-thermal.c!\n",
382 __func__);
383 ret = -ENODEV;
384 goto out;
385 }
311 386
312 status = readb(data->base + EXYNOS_TMU_REG_STATUS); 387 status = readb(data->base + EXYNOS_TMU_REG_STATUS);
313 if (!status) { 388 if (!status) {
@@ -318,12 +393,19 @@ static int exynos4210_tmu_initialize(struct platform_device *pdev)
318 sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO)); 393 sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO));
319 394
320 /* Write temperature code for threshold */ 395 /* Write temperature code for threshold */
321 threshold_code = temp_to_code(data, pdata->threshold); 396 reference = trips[0].temperature / MCELSIUS;
397 threshold_code = temp_to_code(data, reference);
398 if (threshold_code < 0) {
399 ret = threshold_code;
400 goto out;
401 }
322 writeb(threshold_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP); 402 writeb(threshold_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
323 403
324 for (i = 0; i < pdata->non_hw_trigger_levels; i++) 404 for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
325 writeb(pdata->trigger_levels[i], data->base + 405 temp = trips[i].temperature / MCELSIUS;
406 writeb(temp - reference, data->base +
326 EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4); 407 EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
408 }
327 409
328 data->tmu_clear_irqs(data); 410 data->tmu_clear_irqs(data);
329out: 411out:
@@ -333,9 +415,11 @@ out:
333static int exynos4412_tmu_initialize(struct platform_device *pdev) 415static int exynos4412_tmu_initialize(struct platform_device *pdev)
334{ 416{
335 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 417 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
336 struct exynos_tmu_platform_data *pdata = data->pdata; 418 const struct thermal_trip * const trips =
419 of_thermal_get_trip_points(data->tzd);
337 unsigned int status, trim_info, con, ctrl, rising_threshold; 420 unsigned int status, trim_info, con, ctrl, rising_threshold;
338 int ret = 0, threshold_code, i; 421 int ret = 0, threshold_code, i;
422 unsigned long crit_temp = 0;
339 423
340 status = readb(data->base + EXYNOS_TMU_REG_STATUS); 424 status = readb(data->base + EXYNOS_TMU_REG_STATUS);
341 if (!status) { 425 if (!status) {
@@ -373,17 +457,29 @@ static int exynos4412_tmu_initialize(struct platform_device *pdev)
373 data->tmu_clear_irqs(data); 457 data->tmu_clear_irqs(data);
374 458
375 /* if last threshold limit is also present */ 459 /* if last threshold limit is also present */
376 i = pdata->max_trigger_level - 1; 460 for (i = 0; i < of_thermal_get_ntrips(data->tzd); i++) {
377 if (pdata->trigger_levels[i] && pdata->trigger_type[i] == HW_TRIP) { 461 if (trips[i].type == THERMAL_TRIP_CRITICAL) {
378 threshold_code = temp_to_code(data, pdata->trigger_levels[i]); 462 crit_temp = trips[i].temperature;
379 /* 1-4 level to be assigned in th0 reg */ 463 break;
380 rising_threshold &= ~(0xff << 8 * i); 464 }
381 rising_threshold |= threshold_code << 8 * i; 465 }
382 writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE); 466
383 con = readl(data->base + EXYNOS_TMU_REG_CONTROL); 467 if (i == of_thermal_get_ntrips(data->tzd)) {
384 con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT); 468 pr_err("%s: No CRITICAL trip point defined at of-thermal.c!\n",
385 writel(con, data->base + EXYNOS_TMU_REG_CONTROL); 469 __func__);
470 ret = -EINVAL;
471 goto out;
386 } 472 }
473
474 threshold_code = temp_to_code(data, crit_temp / MCELSIUS);
475 /* 1-4 level to be assigned in th0 reg */
476 rising_threshold &= ~(0xff << 8 * i);
477 rising_threshold |= threshold_code << 8 * i;
478 writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
479 con = readl(data->base + EXYNOS_TMU_REG_CONTROL);
480 con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
481 writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
482
387out: 483out:
388 return ret; 484 return ret;
389} 485}
@@ -391,9 +487,9 @@ out:
391static int exynos5440_tmu_initialize(struct platform_device *pdev) 487static int exynos5440_tmu_initialize(struct platform_device *pdev)
392{ 488{
393 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 489 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
394 struct exynos_tmu_platform_data *pdata = data->pdata;
395 unsigned int trim_info = 0, con, rising_threshold; 490 unsigned int trim_info = 0, con, rising_threshold;
396 int ret = 0, threshold_code, i; 491 int ret = 0, threshold_code;
492 unsigned long crit_temp = 0;
397 493
398 /* 494 /*
399 * For exynos5440 soc triminfo value is swapped between TMU0 and 495 * For exynos5440 soc triminfo value is swapped between TMU0 and
@@ -422,9 +518,8 @@ static int exynos5440_tmu_initialize(struct platform_device *pdev)
422 data->tmu_clear_irqs(data); 518 data->tmu_clear_irqs(data);
423 519
424 /* if last threshold limit is also present */ 520 /* if last threshold limit is also present */
425 i = pdata->max_trigger_level - 1; 521 if (!data->tzd->ops->get_crit_temp(data->tzd, &crit_temp)) {
426 if (pdata->trigger_levels[i] && pdata->trigger_type[i] == HW_TRIP) { 522 threshold_code = temp_to_code(data, crit_temp / MCELSIUS);
427 threshold_code = temp_to_code(data, pdata->trigger_levels[i]);
428 /* 5th level to be assigned in th2 reg */ 523 /* 5th level to be assigned in th2 reg */
429 rising_threshold = 524 rising_threshold =
430 threshold_code << EXYNOS5440_TMU_TH_RISE4_SHIFT; 525 threshold_code << EXYNOS5440_TMU_TH_RISE4_SHIFT;
@@ -439,10 +534,88 @@ static int exynos5440_tmu_initialize(struct platform_device *pdev)
439 return ret; 534 return ret;
440} 535}
441 536
442static void exynos4210_tmu_control(struct platform_device *pdev, bool on) 537static int exynos7_tmu_initialize(struct platform_device *pdev)
443{ 538{
444 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 539 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
540 struct thermal_zone_device *tz = data->tzd;
445 struct exynos_tmu_platform_data *pdata = data->pdata; 541 struct exynos_tmu_platform_data *pdata = data->pdata;
542 unsigned int status, trim_info;
543 unsigned int rising_threshold = 0, falling_threshold = 0;
544 int ret = 0, threshold_code, i;
545 unsigned long temp, temp_hist;
546 unsigned int reg_off, bit_off;
547
548 status = readb(data->base + EXYNOS_TMU_REG_STATUS);
549 if (!status) {
550 ret = -EBUSY;
551 goto out;
552 }
553
554 trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
555
556 data->temp_error1 = trim_info & EXYNOS7_TMU_TEMP_MASK;
557 if (!data->temp_error1 ||
558 (pdata->min_efuse_value > data->temp_error1) ||
559 (data->temp_error1 > pdata->max_efuse_value))
560 data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
561
562 /* Write temperature code for rising and falling threshold */
563 for (i = (of_thermal_get_ntrips(tz) - 1); i >= 0; i--) {
564 /*
565 * On exynos7 there are 4 rising and 4 falling threshold
566 * registers (0x50-0x5c and 0x60-0x6c respectively). Each
567 * register holds the value of two threshold levels (at bit
568 * offsets 0 and 16). Based on the fact that there are atmost
569 * eight possible trigger levels, calculate the register and
570 * bit offsets where the threshold levels are to be written.
571 *
572 * e.g. EXYNOS7_THD_TEMP_RISE7_6 (0x50)
573 * [24:16] - Threshold level 7
574 * [8:0] - Threshold level 6
575 * e.g. EXYNOS7_THD_TEMP_RISE5_4 (0x54)
576 * [24:16] - Threshold level 5
577 * [8:0] - Threshold level 4
578 *
579 * and similarly for falling thresholds.
580 *
581 * Based on the above, calculate the register and bit offsets
582 * for rising/falling threshold levels and populate them.
583 */
584 reg_off = ((7 - i) / 2) * 4;
585 bit_off = ((8 - i) % 2);
586
587 tz->ops->get_trip_temp(tz, i, &temp);
588 temp /= MCELSIUS;
589
590 tz->ops->get_trip_hyst(tz, i, &temp_hist);
591 temp_hist = temp - (temp_hist / MCELSIUS);
592
593 /* Set 9-bit temperature code for rising threshold levels */
594 threshold_code = temp_to_code(data, temp);
595 rising_threshold = readl(data->base +
596 EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
597 rising_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
598 rising_threshold |= threshold_code << (16 * bit_off);
599 writel(rising_threshold,
600 data->base + EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
601
602 /* Set 9-bit temperature code for falling threshold levels */
603 threshold_code = temp_to_code(data, temp_hist);
604 falling_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
605 falling_threshold |= threshold_code << (16 * bit_off);
606 writel(falling_threshold,
607 data->base + EXYNOS7_THD_TEMP_FALL7_6 + reg_off);
608 }
609
610 data->tmu_clear_irqs(data);
611out:
612 return ret;
613}
614
615static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
616{
617 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
618 struct thermal_zone_device *tz = data->tzd;
446 unsigned int con, interrupt_en; 619 unsigned int con, interrupt_en;
447 620
448 con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL)); 621 con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
@@ -450,10 +623,15 @@ static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
450 if (on) { 623 if (on) {
451 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); 624 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
452 interrupt_en = 625 interrupt_en =
453 pdata->trigger_enable[3] << EXYNOS_TMU_INTEN_RISE3_SHIFT | 626 (of_thermal_is_trip_valid(tz, 3)
454 pdata->trigger_enable[2] << EXYNOS_TMU_INTEN_RISE2_SHIFT | 627 << EXYNOS_TMU_INTEN_RISE3_SHIFT) |
455 pdata->trigger_enable[1] << EXYNOS_TMU_INTEN_RISE1_SHIFT | 628 (of_thermal_is_trip_valid(tz, 2)
456 pdata->trigger_enable[0] << EXYNOS_TMU_INTEN_RISE0_SHIFT; 629 << EXYNOS_TMU_INTEN_RISE2_SHIFT) |
630 (of_thermal_is_trip_valid(tz, 1)
631 << EXYNOS_TMU_INTEN_RISE1_SHIFT) |
632 (of_thermal_is_trip_valid(tz, 0)
633 << EXYNOS_TMU_INTEN_RISE0_SHIFT);
634
457 if (data->soc != SOC_ARCH_EXYNOS4210) 635 if (data->soc != SOC_ARCH_EXYNOS4210)
458 interrupt_en |= 636 interrupt_en |=
459 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; 637 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
@@ -468,7 +646,7 @@ static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
468static void exynos5440_tmu_control(struct platform_device *pdev, bool on) 646static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
469{ 647{
470 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 648 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
471 struct exynos_tmu_platform_data *pdata = data->pdata; 649 struct thermal_zone_device *tz = data->tzd;
472 unsigned int con, interrupt_en; 650 unsigned int con, interrupt_en;
473 651
474 con = get_con_reg(data, readl(data->base + EXYNOS5440_TMU_S0_7_CTRL)); 652 con = get_con_reg(data, readl(data->base + EXYNOS5440_TMU_S0_7_CTRL));
@@ -476,11 +654,16 @@ static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
476 if (on) { 654 if (on) {
477 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); 655 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
478 interrupt_en = 656 interrupt_en =
479 pdata->trigger_enable[3] << EXYNOS5440_TMU_INTEN_RISE3_SHIFT | 657 (of_thermal_is_trip_valid(tz, 3)
480 pdata->trigger_enable[2] << EXYNOS5440_TMU_INTEN_RISE2_SHIFT | 658 << EXYNOS5440_TMU_INTEN_RISE3_SHIFT) |
481 pdata->trigger_enable[1] << EXYNOS5440_TMU_INTEN_RISE1_SHIFT | 659 (of_thermal_is_trip_valid(tz, 2)
482 pdata->trigger_enable[0] << EXYNOS5440_TMU_INTEN_RISE0_SHIFT; 660 << EXYNOS5440_TMU_INTEN_RISE2_SHIFT) |
483 interrupt_en |= interrupt_en << EXYNOS5440_TMU_INTEN_FALL0_SHIFT; 661 (of_thermal_is_trip_valid(tz, 1)
662 << EXYNOS5440_TMU_INTEN_RISE1_SHIFT) |
663 (of_thermal_is_trip_valid(tz, 0)
664 << EXYNOS5440_TMU_INTEN_RISE0_SHIFT);
665 interrupt_en |=
666 interrupt_en << EXYNOS5440_TMU_INTEN_FALL0_SHIFT;
484 } else { 667 } else {
485 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); 668 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
486 interrupt_en = 0; /* Disable all interrupts */ 669 interrupt_en = 0; /* Disable all interrupts */
@@ -489,19 +672,62 @@ static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
489 writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL); 672 writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL);
490} 673}
491 674
492static int exynos_tmu_read(struct exynos_tmu_data *data) 675static void exynos7_tmu_control(struct platform_device *pdev, bool on)
493{ 676{
494 int ret; 677 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
678 struct thermal_zone_device *tz = data->tzd;
679 unsigned int con, interrupt_en;
680
681 con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
682
683 if (on) {
684 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
685 interrupt_en =
686 (of_thermal_is_trip_valid(tz, 7)
687 << EXYNOS7_TMU_INTEN_RISE7_SHIFT) |
688 (of_thermal_is_trip_valid(tz, 6)
689 << EXYNOS7_TMU_INTEN_RISE6_SHIFT) |
690 (of_thermal_is_trip_valid(tz, 5)
691 << EXYNOS7_TMU_INTEN_RISE5_SHIFT) |
692 (of_thermal_is_trip_valid(tz, 4)
693 << EXYNOS7_TMU_INTEN_RISE4_SHIFT) |
694 (of_thermal_is_trip_valid(tz, 3)
695 << EXYNOS7_TMU_INTEN_RISE3_SHIFT) |
696 (of_thermal_is_trip_valid(tz, 2)
697 << EXYNOS7_TMU_INTEN_RISE2_SHIFT) |
698 (of_thermal_is_trip_valid(tz, 1)
699 << EXYNOS7_TMU_INTEN_RISE1_SHIFT) |
700 (of_thermal_is_trip_valid(tz, 0)
701 << EXYNOS7_TMU_INTEN_RISE0_SHIFT);
702
703 interrupt_en |=
704 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
705 } else {
706 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
707 interrupt_en = 0; /* Disable all interrupts */
708 }
709 con |= 1 << EXYNOS7_PD_DET_EN_SHIFT;
710
711 writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN);
712 writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
713}
714
715static int exynos_get_temp(void *p, long *temp)
716{
717 struct exynos_tmu_data *data = p;
718
719 if (!data || !data->tmu_read)
720 return -EINVAL;
495 721
496 mutex_lock(&data->lock); 722 mutex_lock(&data->lock);
497 clk_enable(data->clk); 723 clk_enable(data->clk);
498 ret = data->tmu_read(data); 724
499 if (ret >= 0) 725 *temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS;
500 ret = code_to_temp(data, ret); 726
501 clk_disable(data->clk); 727 clk_disable(data->clk);
502 mutex_unlock(&data->lock); 728 mutex_unlock(&data->lock);
503 729
504 return ret; 730 return 0;
505} 731}
506 732
507#ifdef CONFIG_THERMAL_EMULATION 733#ifdef CONFIG_THERMAL_EMULATION
@@ -515,9 +741,19 @@ static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val,
515 val &= ~(EXYNOS_EMUL_TIME_MASK << EXYNOS_EMUL_TIME_SHIFT); 741 val &= ~(EXYNOS_EMUL_TIME_MASK << EXYNOS_EMUL_TIME_SHIFT);
516 val |= (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT); 742 val |= (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT);
517 } 743 }
518 val &= ~(EXYNOS_EMUL_DATA_MASK << EXYNOS_EMUL_DATA_SHIFT); 744 if (data->soc == SOC_ARCH_EXYNOS7) {
519 val |= (temp_to_code(data, temp) << EXYNOS_EMUL_DATA_SHIFT) | 745 val &= ~(EXYNOS7_EMUL_DATA_MASK <<
520 EXYNOS_EMUL_ENABLE; 746 EXYNOS7_EMUL_DATA_SHIFT);
747 val |= (temp_to_code(data, temp) <<
748 EXYNOS7_EMUL_DATA_SHIFT) |
749 EXYNOS_EMUL_ENABLE;
750 } else {
751 val &= ~(EXYNOS_EMUL_DATA_MASK <<
752 EXYNOS_EMUL_DATA_SHIFT);
753 val |= (temp_to_code(data, temp) <<
754 EXYNOS_EMUL_DATA_SHIFT) |
755 EXYNOS_EMUL_ENABLE;
756 }
521 } else { 757 } else {
522 val &= ~EXYNOS_EMUL_ENABLE; 758 val &= ~EXYNOS_EMUL_ENABLE;
523 } 759 }
@@ -533,6 +769,8 @@ static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
533 769
534 if (data->soc == SOC_ARCH_EXYNOS5260) 770 if (data->soc == SOC_ARCH_EXYNOS5260)
535 emul_con = EXYNOS5260_EMUL_CON; 771 emul_con = EXYNOS5260_EMUL_CON;
772 else if (data->soc == SOC_ARCH_EXYNOS7)
773 emul_con = EXYNOS7_TMU_REG_EMUL_CON;
536 else 774 else
537 emul_con = EXYNOS_EMUL_CON; 775 emul_con = EXYNOS_EMUL_CON;
538 776
@@ -576,7 +814,7 @@ out:
576#define exynos5440_tmu_set_emulation NULL 814#define exynos5440_tmu_set_emulation NULL
577static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp) 815static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
578 { return -EINVAL; } 816 { return -EINVAL; }
579#endif/*CONFIG_THERMAL_EMULATION*/ 817#endif /* CONFIG_THERMAL_EMULATION */
580 818
581static int exynos4210_tmu_read(struct exynos_tmu_data *data) 819static int exynos4210_tmu_read(struct exynos_tmu_data *data)
582{ 820{
@@ -596,6 +834,12 @@ static int exynos5440_tmu_read(struct exynos_tmu_data *data)
596 return readb(data->base + EXYNOS5440_TMU_S0_7_TEMP); 834 return readb(data->base + EXYNOS5440_TMU_S0_7_TEMP);
597} 835}
598 836
837static int exynos7_tmu_read(struct exynos_tmu_data *data)
838{
839 return readw(data->base + EXYNOS_TMU_REG_CURRENT_TEMP) &
840 EXYNOS7_TMU_TEMP_MASK;
841}
842
599static void exynos_tmu_work(struct work_struct *work) 843static void exynos_tmu_work(struct work_struct *work)
600{ 844{
601 struct exynos_tmu_data *data = container_of(work, 845 struct exynos_tmu_data *data = container_of(work,
@@ -613,7 +857,7 @@ static void exynos_tmu_work(struct work_struct *work)
613 if (!IS_ERR(data->clk_sec)) 857 if (!IS_ERR(data->clk_sec))
614 clk_disable(data->clk_sec); 858 clk_disable(data->clk_sec);
615 859
616 exynos_report_trigger(data->reg_conf); 860 exynos_report_trigger(data);
617 mutex_lock(&data->lock); 861 mutex_lock(&data->lock);
618 clk_enable(data->clk); 862 clk_enable(data->clk);
619 863
@@ -634,6 +878,9 @@ static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data)
634 if (data->soc == SOC_ARCH_EXYNOS5260) { 878 if (data->soc == SOC_ARCH_EXYNOS5260) {
635 tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT; 879 tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT;
636 tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR; 880 tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR;
881 } else if (data->soc == SOC_ARCH_EXYNOS7) {
882 tmu_intstat = EXYNOS7_TMU_REG_INTPEND;
883 tmu_intclear = EXYNOS7_TMU_REG_INTPEND;
637 } else { 884 } else {
638 tmu_intstat = EXYNOS_TMU_REG_INTSTAT; 885 tmu_intstat = EXYNOS_TMU_REG_INTSTAT;
639 tmu_intclear = EXYNOS_TMU_REG_INTCLEAR; 886 tmu_intclear = EXYNOS_TMU_REG_INTCLEAR;
@@ -671,57 +918,78 @@ static irqreturn_t exynos_tmu_irq(int irq, void *id)
671} 918}
672 919
673static const struct of_device_id exynos_tmu_match[] = { 920static const struct of_device_id exynos_tmu_match[] = {
674 { 921 { .compatible = "samsung,exynos3250-tmu", },
675 .compatible = "samsung,exynos3250-tmu", 922 { .compatible = "samsung,exynos4210-tmu", },
676 .data = &exynos3250_default_tmu_data, 923 { .compatible = "samsung,exynos4412-tmu", },
677 }, 924 { .compatible = "samsung,exynos5250-tmu", },
678 { 925 { .compatible = "samsung,exynos5260-tmu", },
679 .compatible = "samsung,exynos4210-tmu", 926 { .compatible = "samsung,exynos5420-tmu", },
680 .data = &exynos4210_default_tmu_data, 927 { .compatible = "samsung,exynos5420-tmu-ext-triminfo", },
681 }, 928 { .compatible = "samsung,exynos5440-tmu", },
682 { 929 { .compatible = "samsung,exynos7-tmu", },
683 .compatible = "samsung,exynos4412-tmu", 930 { /* sentinel */ },
684 .data = &exynos4412_default_tmu_data,
685 },
686 {
687 .compatible = "samsung,exynos5250-tmu",
688 .data = &exynos5250_default_tmu_data,
689 },
690 {
691 .compatible = "samsung,exynos5260-tmu",
692 .data = &exynos5260_default_tmu_data,
693 },
694 {
695 .compatible = "samsung,exynos5420-tmu",
696 .data = &exynos5420_default_tmu_data,
697 },
698 {
699 .compatible = "samsung,exynos5420-tmu-ext-triminfo",
700 .data = &exynos5420_default_tmu_data,
701 },
702 {
703 .compatible = "samsung,exynos5440-tmu",
704 .data = &exynos5440_default_tmu_data,
705 },
706 {},
707}; 931};
708MODULE_DEVICE_TABLE(of, exynos_tmu_match); 932MODULE_DEVICE_TABLE(of, exynos_tmu_match);
709 933
710static inline struct exynos_tmu_platform_data *exynos_get_driver_data( 934static int exynos_of_get_soc_type(struct device_node *np)
711 struct platform_device *pdev, int id) 935{
936 if (of_device_is_compatible(np, "samsung,exynos3250-tmu"))
937 return SOC_ARCH_EXYNOS3250;
938 else if (of_device_is_compatible(np, "samsung,exynos4210-tmu"))
939 return SOC_ARCH_EXYNOS4210;
940 else if (of_device_is_compatible(np, "samsung,exynos4412-tmu"))
941 return SOC_ARCH_EXYNOS4412;
942 else if (of_device_is_compatible(np, "samsung,exynos5250-tmu"))
943 return SOC_ARCH_EXYNOS5250;
944 else if (of_device_is_compatible(np, "samsung,exynos5260-tmu"))
945 return SOC_ARCH_EXYNOS5260;
946 else if (of_device_is_compatible(np, "samsung,exynos5420-tmu"))
947 return SOC_ARCH_EXYNOS5420;
948 else if (of_device_is_compatible(np,
949 "samsung,exynos5420-tmu-ext-triminfo"))
950 return SOC_ARCH_EXYNOS5420_TRIMINFO;
951 else if (of_device_is_compatible(np, "samsung,exynos5440-tmu"))
952 return SOC_ARCH_EXYNOS5440;
953 else if (of_device_is_compatible(np, "samsung,exynos7-tmu"))
954 return SOC_ARCH_EXYNOS7;
955
956 return -EINVAL;
957}
958
959static int exynos_of_sensor_conf(struct device_node *np,
960 struct exynos_tmu_platform_data *pdata)
712{ 961{
713 struct exynos_tmu_init_data *data_table; 962 u32 value;
714 struct exynos_tmu_platform_data *tmu_data; 963 int ret;
715 const struct of_device_id *match;
716 964
717 match = of_match_node(exynos_tmu_match, pdev->dev.of_node); 965 of_node_get(np);
718 if (!match) 966
719 return NULL; 967 ret = of_property_read_u32(np, "samsung,tmu_gain", &value);
720 data_table = (struct exynos_tmu_init_data *) match->data; 968 pdata->gain = (u8)value;
721 if (!data_table || id >= data_table->tmu_count) 969 of_property_read_u32(np, "samsung,tmu_reference_voltage", &value);
722 return NULL; 970 pdata->reference_voltage = (u8)value;
723 tmu_data = data_table->tmu_data; 971 of_property_read_u32(np, "samsung,tmu_noise_cancel_mode", &value);
724 return (struct exynos_tmu_platform_data *) (tmu_data + id); 972 pdata->noise_cancel_mode = (u8)value;
973
974 of_property_read_u32(np, "samsung,tmu_efuse_value",
975 &pdata->efuse_value);
976 of_property_read_u32(np, "samsung,tmu_min_efuse_value",
977 &pdata->min_efuse_value);
978 of_property_read_u32(np, "samsung,tmu_max_efuse_value",
979 &pdata->max_efuse_value);
980
981 of_property_read_u32(np, "samsung,tmu_first_point_trim", &value);
982 pdata->first_point_trim = (u8)value;
983 of_property_read_u32(np, "samsung,tmu_second_point_trim", &value);
984 pdata->second_point_trim = (u8)value;
985 of_property_read_u32(np, "samsung,tmu_default_temp_offset", &value);
986 pdata->default_temp_offset = (u8)value;
987
988 of_property_read_u32(np, "samsung,tmu_cal_type", &pdata->cal_type);
989 of_property_read_u32(np, "samsung,tmu_cal_mode", &pdata->cal_mode);
990
991 of_node_put(np);
992 return 0;
725} 993}
726 994
727static int exynos_map_dt_data(struct platform_device *pdev) 995static int exynos_map_dt_data(struct platform_device *pdev)
@@ -771,14 +1039,15 @@ static int exynos_map_dt_data(struct platform_device *pdev)
771 return -EADDRNOTAVAIL; 1039 return -EADDRNOTAVAIL;
772 } 1040 }
773 1041
774 pdata = exynos_get_driver_data(pdev, data->id); 1042 pdata = devm_kzalloc(&pdev->dev,
775 if (!pdata) { 1043 sizeof(struct exynos_tmu_platform_data),
776 dev_err(&pdev->dev, "No platform init data supplied.\n"); 1044 GFP_KERNEL);
777 return -ENODEV; 1045 if (!pdata)
778 } 1046 return -ENOMEM;
779 1047
1048 exynos_of_sensor_conf(pdev->dev.of_node, pdata);
780 data->pdata = pdata; 1049 data->pdata = pdata;
781 data->soc = pdata->type; 1050 data->soc = exynos_of_get_soc_type(pdev->dev.of_node);
782 1051
783 switch (data->soc) { 1052 switch (data->soc) {
784 case SOC_ARCH_EXYNOS4210: 1053 case SOC_ARCH_EXYNOS4210:
@@ -806,6 +1075,13 @@ static int exynos_map_dt_data(struct platform_device *pdev)
806 data->tmu_set_emulation = exynos5440_tmu_set_emulation; 1075 data->tmu_set_emulation = exynos5440_tmu_set_emulation;
807 data->tmu_clear_irqs = exynos5440_tmu_clear_irqs; 1076 data->tmu_clear_irqs = exynos5440_tmu_clear_irqs;
808 break; 1077 break;
1078 case SOC_ARCH_EXYNOS7:
1079 data->tmu_initialize = exynos7_tmu_initialize;
1080 data->tmu_control = exynos7_tmu_control;
1081 data->tmu_read = exynos7_tmu_read;
1082 data->tmu_set_emulation = exynos4412_tmu_set_emulation;
1083 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
1084 break;
809 default: 1085 default:
810 dev_err(&pdev->dev, "Platform not supported\n"); 1086 dev_err(&pdev->dev, "Platform not supported\n");
811 return -EINVAL; 1087 return -EINVAL;
@@ -834,12 +1110,16 @@ static int exynos_map_dt_data(struct platform_device *pdev)
834 return 0; 1110 return 0;
835} 1111}
836 1112
1113static struct thermal_zone_of_device_ops exynos_sensor_ops = {
1114 .get_temp = exynos_get_temp,
1115 .set_emul_temp = exynos_tmu_set_emulation,
1116};
1117
837static int exynos_tmu_probe(struct platform_device *pdev) 1118static int exynos_tmu_probe(struct platform_device *pdev)
838{ 1119{
839 struct exynos_tmu_data *data;
840 struct exynos_tmu_platform_data *pdata; 1120 struct exynos_tmu_platform_data *pdata;
841 struct thermal_sensor_conf *sensor_conf; 1121 struct exynos_tmu_data *data;
842 int ret, i; 1122 int ret;
843 1123
844 data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data), 1124 data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
845 GFP_KERNEL); 1125 GFP_KERNEL);
@@ -849,9 +1129,15 @@ static int exynos_tmu_probe(struct platform_device *pdev)
849 platform_set_drvdata(pdev, data); 1129 platform_set_drvdata(pdev, data);
850 mutex_init(&data->lock); 1130 mutex_init(&data->lock);
851 1131
1132 data->tzd = thermal_zone_of_sensor_register(&pdev->dev, 0, data,
1133 &exynos_sensor_ops);
1134 if (IS_ERR(data->tzd)) {
1135 pr_err("thermal: tz: %p ERROR\n", data->tzd);
1136 return PTR_ERR(data->tzd);
1137 }
852 ret = exynos_map_dt_data(pdev); 1138 ret = exynos_map_dt_data(pdev);
853 if (ret) 1139 if (ret)
854 return ret; 1140 goto err_sensor;
855 1141
856 pdata = data->pdata; 1142 pdata = data->pdata;
857 1143
@@ -860,20 +1146,22 @@ static int exynos_tmu_probe(struct platform_device *pdev)
860 data->clk = devm_clk_get(&pdev->dev, "tmu_apbif"); 1146 data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
861 if (IS_ERR(data->clk)) { 1147 if (IS_ERR(data->clk)) {
862 dev_err(&pdev->dev, "Failed to get clock\n"); 1148 dev_err(&pdev->dev, "Failed to get clock\n");
863 return PTR_ERR(data->clk); 1149 ret = PTR_ERR(data->clk);
1150 goto err_sensor;
864 } 1151 }
865 1152
866 data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif"); 1153 data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif");
867 if (IS_ERR(data->clk_sec)) { 1154 if (IS_ERR(data->clk_sec)) {
868 if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) { 1155 if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) {
869 dev_err(&pdev->dev, "Failed to get triminfo clock\n"); 1156 dev_err(&pdev->dev, "Failed to get triminfo clock\n");
870 return PTR_ERR(data->clk_sec); 1157 ret = PTR_ERR(data->clk_sec);
1158 goto err_sensor;
871 } 1159 }
872 } else { 1160 } else {
873 ret = clk_prepare(data->clk_sec); 1161 ret = clk_prepare(data->clk_sec);
874 if (ret) { 1162 if (ret) {
875 dev_err(&pdev->dev, "Failed to get clock\n"); 1163 dev_err(&pdev->dev, "Failed to get clock\n");
876 return ret; 1164 goto err_sensor;
877 } 1165 }
878 } 1166 }
879 1167
@@ -883,82 +1171,57 @@ static int exynos_tmu_probe(struct platform_device *pdev)
883 goto err_clk_sec; 1171 goto err_clk_sec;
884 } 1172 }
885 1173
886 ret = exynos_tmu_initialize(pdev); 1174 if (data->soc == SOC_ARCH_EXYNOS7) {
887 if (ret) { 1175 data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk");
888 dev_err(&pdev->dev, "Failed to initialize TMU\n"); 1176 if (IS_ERR(data->sclk)) {
889 goto err_clk; 1177 dev_err(&pdev->dev, "Failed to get sclk\n");
1178 goto err_clk;
1179 } else {
1180 ret = clk_prepare_enable(data->sclk);
1181 if (ret) {
1182 dev_err(&pdev->dev, "Failed to enable sclk\n");
1183 goto err_clk;
1184 }
1185 }
890 } 1186 }
891 1187
892 exynos_tmu_control(pdev, true); 1188 ret = exynos_tmu_initialize(pdev);
893
894 /* Allocate a structure to register with the exynos core thermal */
895 sensor_conf = devm_kzalloc(&pdev->dev,
896 sizeof(struct thermal_sensor_conf), GFP_KERNEL);
897 if (!sensor_conf) {
898 ret = -ENOMEM;
899 goto err_clk;
900 }
901 sprintf(sensor_conf->name, "therm_zone%d", data->id);
902 sensor_conf->read_temperature = (int (*)(void *))exynos_tmu_read;
903 sensor_conf->write_emul_temp =
904 (int (*)(void *, unsigned long))exynos_tmu_set_emulation;
905 sensor_conf->driver_data = data;
906 sensor_conf->trip_data.trip_count = pdata->trigger_enable[0] +
907 pdata->trigger_enable[1] + pdata->trigger_enable[2]+
908 pdata->trigger_enable[3];
909
910 for (i = 0; i < sensor_conf->trip_data.trip_count; i++) {
911 sensor_conf->trip_data.trip_val[i] =
912 pdata->threshold + pdata->trigger_levels[i];
913 sensor_conf->trip_data.trip_type[i] =
914 pdata->trigger_type[i];
915 }
916
917 sensor_conf->trip_data.trigger_falling = pdata->threshold_falling;
918
919 sensor_conf->cooling_data.freq_clip_count = pdata->freq_tab_count;
920 for (i = 0; i < pdata->freq_tab_count; i++) {
921 sensor_conf->cooling_data.freq_data[i].freq_clip_max =
922 pdata->freq_tab[i].freq_clip_max;
923 sensor_conf->cooling_data.freq_data[i].temp_level =
924 pdata->freq_tab[i].temp_level;
925 }
926 sensor_conf->dev = &pdev->dev;
927 /* Register the sensor with thermal management interface */
928 ret = exynos_register_thermal(sensor_conf);
929 if (ret) { 1189 if (ret) {
930 if (ret != -EPROBE_DEFER) 1190 dev_err(&pdev->dev, "Failed to initialize TMU\n");
931 dev_err(&pdev->dev, 1191 goto err_sclk;
932 "Failed to register thermal interface: %d\n",
933 ret);
934 goto err_clk;
935 } 1192 }
936 data->reg_conf = sensor_conf;
937 1193
938 ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq, 1194 ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
939 IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data); 1195 IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
940 if (ret) { 1196 if (ret) {
941 dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq); 1197 dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
942 goto err_clk; 1198 goto err_sclk;
943 } 1199 }
944 1200
1201 exynos_tmu_control(pdev, true);
945 return 0; 1202 return 0;
1203err_sclk:
1204 clk_disable_unprepare(data->sclk);
946err_clk: 1205err_clk:
947 clk_unprepare(data->clk); 1206 clk_unprepare(data->clk);
948err_clk_sec: 1207err_clk_sec:
949 if (!IS_ERR(data->clk_sec)) 1208 if (!IS_ERR(data->clk_sec))
950 clk_unprepare(data->clk_sec); 1209 clk_unprepare(data->clk_sec);
1210err_sensor:
1211 thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
1212
951 return ret; 1213 return ret;
952} 1214}
953 1215
954static int exynos_tmu_remove(struct platform_device *pdev) 1216static int exynos_tmu_remove(struct platform_device *pdev)
955{ 1217{
956 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 1218 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
1219 struct thermal_zone_device *tzd = data->tzd;
957 1220
958 exynos_unregister_thermal(data->reg_conf); 1221 thermal_zone_of_sensor_unregister(&pdev->dev, tzd);
959
960 exynos_tmu_control(pdev, false); 1222 exynos_tmu_control(pdev, false);
961 1223
1224 clk_disable_unprepare(data->sclk);
962 clk_unprepare(data->clk); 1225 clk_unprepare(data->clk);
963 if (!IS_ERR(data->clk_sec)) 1226 if (!IS_ERR(data->clk_sec))
964 clk_unprepare(data->clk_sec); 1227 clk_unprepare(data->clk_sec);
diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h
index da3009bff6c4..4d71ec6c9aa0 100644
--- a/drivers/thermal/samsung/exynos_tmu.h
+++ b/drivers/thermal/samsung/exynos_tmu.h
@@ -23,16 +23,7 @@
23#ifndef _EXYNOS_TMU_H 23#ifndef _EXYNOS_TMU_H
24#define _EXYNOS_TMU_H 24#define _EXYNOS_TMU_H
25#include <linux/cpu_cooling.h> 25#include <linux/cpu_cooling.h>
26 26#include <dt-bindings/thermal/thermal_exynos.h>
27#include "exynos_thermal_common.h"
28
29enum calibration_type {
30 TYPE_ONE_POINT_TRIMMING,
31 TYPE_ONE_POINT_TRIMMING_25,
32 TYPE_ONE_POINT_TRIMMING_85,
33 TYPE_TWO_POINT_TRIMMING,
34 TYPE_NONE,
35};
36 27
37enum soc_type { 28enum soc_type {
38 SOC_ARCH_EXYNOS3250 = 1, 29 SOC_ARCH_EXYNOS3250 = 1,
@@ -43,38 +34,11 @@ enum soc_type {
43 SOC_ARCH_EXYNOS5420, 34 SOC_ARCH_EXYNOS5420,
44 SOC_ARCH_EXYNOS5420_TRIMINFO, 35 SOC_ARCH_EXYNOS5420_TRIMINFO,
45 SOC_ARCH_EXYNOS5440, 36 SOC_ARCH_EXYNOS5440,
37 SOC_ARCH_EXYNOS7,
46}; 38};
47 39
48/** 40/**
49 * struct exynos_tmu_platform_data 41 * struct exynos_tmu_platform_data
50 * @threshold: basic temperature for generating interrupt
51 * 25 <= threshold <= 125 [unit: degree Celsius]
52 * @threshold_falling: differntial value for setting threshold
53 * of temperature falling interrupt.
54 * @trigger_levels: array for each interrupt levels
55 * [unit: degree Celsius]
56 * 0: temperature for trigger_level0 interrupt
57 * condition for trigger_level0 interrupt:
58 * current temperature > threshold + trigger_levels[0]
59 * 1: temperature for trigger_level1 interrupt
60 * condition for trigger_level1 interrupt:
61 * current temperature > threshold + trigger_levels[1]
62 * 2: temperature for trigger_level2 interrupt
63 * condition for trigger_level2 interrupt:
64 * current temperature > threshold + trigger_levels[2]
65 * 3: temperature for trigger_level3 interrupt
66 * condition for trigger_level3 interrupt:
67 * current temperature > threshold + trigger_levels[3]
68 * @trigger_type: defines the type of trigger. Possible values are,
69 * THROTTLE_ACTIVE trigger type
70 * THROTTLE_PASSIVE trigger type
71 * SW_TRIP trigger type
72 * HW_TRIP
73 * @trigger_enable[]: array to denote which trigger levels are enabled.
74 * 1 = enable trigger_level[] interrupt,
75 * 0 = disable trigger_level[] interrupt
76 * @max_trigger_level: max trigger level supported by the TMU
77 * @non_hw_trigger_levels: number of defined non-hardware trigger levels
78 * @gain: gain of amplifier in the positive-TC generator block 42 * @gain: gain of amplifier in the positive-TC generator block
79 * 0 < gain <= 15 43 * 0 < gain <= 15
80 * @reference_voltage: reference voltage of amplifier 44 * @reference_voltage: reference voltage of amplifier
@@ -86,24 +50,12 @@ enum soc_type {
86 * @efuse_value: platform defined fuse value 50 * @efuse_value: platform defined fuse value
87 * @min_efuse_value: minimum valid trimming data 51 * @min_efuse_value: minimum valid trimming data
88 * @max_efuse_value: maximum valid trimming data 52 * @max_efuse_value: maximum valid trimming data
89 * @first_point_trim: temp value of the first point trimming
90 * @second_point_trim: temp value of the second point trimming
91 * @default_temp_offset: default temperature offset in case of no trimming 53 * @default_temp_offset: default temperature offset in case of no trimming
92 * @cal_type: calibration type for temperature 54 * @cal_type: calibration type for temperature
93 * @freq_clip_table: Table representing frequency reduction percentage.
94 * @freq_tab_count: Count of the above table as frequency reduction may
95 * applicable to only some of the trigger levels.
96 * 55 *
97 * This structure is required for configuration of exynos_tmu driver. 56 * This structure is required for configuration of exynos_tmu driver.
98 */ 57 */
99struct exynos_tmu_platform_data { 58struct exynos_tmu_platform_data {
100 u8 threshold;
101 u8 threshold_falling;
102 u8 trigger_levels[MAX_TRIP_COUNT];
103 enum trigger_type trigger_type[MAX_TRIP_COUNT];
104 bool trigger_enable[MAX_TRIP_COUNT];
105 u8 max_trigger_level;
106 u8 non_hw_trigger_levels;
107 u8 gain; 59 u8 gain;
108 u8 reference_voltage; 60 u8 reference_voltage;
109 u8 noise_cancel_mode; 61 u8 noise_cancel_mode;
@@ -115,30 +67,9 @@ struct exynos_tmu_platform_data {
115 u8 second_point_trim; 67 u8 second_point_trim;
116 u8 default_temp_offset; 68 u8 default_temp_offset;
117 69
118 enum calibration_type cal_type;
119 enum soc_type type; 70 enum soc_type type;
120 struct freq_clip_table freq_tab[4]; 71 u32 cal_type;
121 unsigned int freq_tab_count; 72 u32 cal_mode;
122};
123
124/**
125 * struct exynos_tmu_init_data
126 * @tmu_count: number of TMU instances.
127 * @tmu_data: platform data of all TMU instances.
128 * This structure is required to store data for multi-instance exynos tmu
129 * driver.
130 */
131struct exynos_tmu_init_data {
132 int tmu_count;
133 struct exynos_tmu_platform_data tmu_data[];
134}; 73};
135 74
136extern struct exynos_tmu_init_data const exynos3250_default_tmu_data;
137extern struct exynos_tmu_init_data const exynos4210_default_tmu_data;
138extern struct exynos_tmu_init_data const exynos4412_default_tmu_data;
139extern struct exynos_tmu_init_data const exynos5250_default_tmu_data;
140extern struct exynos_tmu_init_data const exynos5260_default_tmu_data;
141extern struct exynos_tmu_init_data const exynos5420_default_tmu_data;
142extern struct exynos_tmu_init_data const exynos5440_default_tmu_data;
143
144#endif /* _EXYNOS_TMU_H */ 75#endif /* _EXYNOS_TMU_H */
diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c
deleted file mode 100644
index b23910069f68..000000000000
--- a/drivers/thermal/samsung/exynos_tmu_data.c
+++ /dev/null
@@ -1,264 +0,0 @@
1/*
2 * exynos_tmu_data.c - Samsung EXYNOS tmu data file
3 *
4 * Copyright (C) 2013 Samsung Electronics
5 * Amit Daniel Kachhap <amit.daniel@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include "exynos_thermal_common.h"
24#include "exynos_tmu.h"
25
26struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
27 .tmu_data = {
28 {
29 .threshold = 80,
30 .trigger_levels[0] = 5,
31 .trigger_levels[1] = 20,
32 .trigger_levels[2] = 30,
33 .trigger_enable[0] = true,
34 .trigger_enable[1] = true,
35 .trigger_enable[2] = true,
36 .trigger_enable[3] = false,
37 .trigger_type[0] = THROTTLE_ACTIVE,
38 .trigger_type[1] = THROTTLE_ACTIVE,
39 .trigger_type[2] = SW_TRIP,
40 .max_trigger_level = 4,
41 .non_hw_trigger_levels = 3,
42 .gain = 15,
43 .reference_voltage = 7,
44 .cal_type = TYPE_ONE_POINT_TRIMMING,
45 .min_efuse_value = 40,
46 .max_efuse_value = 100,
47 .first_point_trim = 25,
48 .second_point_trim = 85,
49 .default_temp_offset = 50,
50 .freq_tab[0] = {
51 .freq_clip_max = 800 * 1000,
52 .temp_level = 85,
53 },
54 .freq_tab[1] = {
55 .freq_clip_max = 200 * 1000,
56 .temp_level = 100,
57 },
58 .freq_tab_count = 2,
59 .type = SOC_ARCH_EXYNOS4210,
60 },
61 },
62 .tmu_count = 1,
63};
64
65#define EXYNOS3250_TMU_DATA \
66 .threshold_falling = 10, \
67 .trigger_levels[0] = 70, \
68 .trigger_levels[1] = 95, \
69 .trigger_levels[2] = 110, \
70 .trigger_levels[3] = 120, \
71 .trigger_enable[0] = true, \
72 .trigger_enable[1] = true, \
73 .trigger_enable[2] = true, \
74 .trigger_enable[3] = false, \
75 .trigger_type[0] = THROTTLE_ACTIVE, \
76 .trigger_type[1] = THROTTLE_ACTIVE, \
77 .trigger_type[2] = SW_TRIP, \
78 .trigger_type[3] = HW_TRIP, \
79 .max_trigger_level = 4, \
80 .non_hw_trigger_levels = 3, \
81 .gain = 8, \
82 .reference_voltage = 16, \
83 .noise_cancel_mode = 4, \
84 .cal_type = TYPE_TWO_POINT_TRIMMING, \
85 .efuse_value = 55, \
86 .min_efuse_value = 40, \
87 .max_efuse_value = 100, \
88 .first_point_trim = 25, \
89 .second_point_trim = 85, \
90 .default_temp_offset = 50, \
91 .freq_tab[0] = { \
92 .freq_clip_max = 800 * 1000, \
93 .temp_level = 70, \
94 }, \
95 .freq_tab[1] = { \
96 .freq_clip_max = 400 * 1000, \
97 .temp_level = 95, \
98 }, \
99 .freq_tab_count = 2
100
101struct exynos_tmu_init_data const exynos3250_default_tmu_data = {
102 .tmu_data = {
103 {
104 EXYNOS3250_TMU_DATA,
105 .type = SOC_ARCH_EXYNOS3250,
106 },
107 },
108 .tmu_count = 1,
109};
110
111#define EXYNOS4412_TMU_DATA \
112 .threshold_falling = 10, \
113 .trigger_levels[0] = 70, \
114 .trigger_levels[1] = 95, \
115 .trigger_levels[2] = 110, \
116 .trigger_levels[3] = 120, \
117 .trigger_enable[0] = true, \
118 .trigger_enable[1] = true, \
119 .trigger_enable[2] = true, \
120 .trigger_enable[3] = false, \
121 .trigger_type[0] = THROTTLE_ACTIVE, \
122 .trigger_type[1] = THROTTLE_ACTIVE, \
123 .trigger_type[2] = SW_TRIP, \
124 .trigger_type[3] = HW_TRIP, \
125 .max_trigger_level = 4, \
126 .non_hw_trigger_levels = 3, \
127 .gain = 8, \
128 .reference_voltage = 16, \
129 .noise_cancel_mode = 4, \
130 .cal_type = TYPE_ONE_POINT_TRIMMING, \
131 .efuse_value = 55, \
132 .min_efuse_value = 40, \
133 .max_efuse_value = 100, \
134 .first_point_trim = 25, \
135 .second_point_trim = 85, \
136 .default_temp_offset = 50, \
137 .freq_tab[0] = { \
138 .freq_clip_max = 1400 * 1000, \
139 .temp_level = 70, \
140 }, \
141 .freq_tab[1] = { \
142 .freq_clip_max = 400 * 1000, \
143 .temp_level = 95, \
144 }, \
145 .freq_tab_count = 2
146
147struct exynos_tmu_init_data const exynos4412_default_tmu_data = {
148 .tmu_data = {
149 {
150 EXYNOS4412_TMU_DATA,
151 .type = SOC_ARCH_EXYNOS4412,
152 },
153 },
154 .tmu_count = 1,
155};
156
157struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
158 .tmu_data = {
159 {
160 EXYNOS4412_TMU_DATA,
161 .type = SOC_ARCH_EXYNOS5250,
162 },
163 },
164 .tmu_count = 1,
165};
166
167#define __EXYNOS5260_TMU_DATA \
168 .threshold_falling = 10, \
169 .trigger_levels[0] = 85, \
170 .trigger_levels[1] = 103, \
171 .trigger_levels[2] = 110, \
172 .trigger_levels[3] = 120, \
173 .trigger_enable[0] = true, \
174 .trigger_enable[1] = true, \
175 .trigger_enable[2] = true, \
176 .trigger_enable[3] = false, \
177 .trigger_type[0] = THROTTLE_ACTIVE, \
178 .trigger_type[1] = THROTTLE_ACTIVE, \
179 .trigger_type[2] = SW_TRIP, \
180 .trigger_type[3] = HW_TRIP, \
181 .max_trigger_level = 4, \
182 .non_hw_trigger_levels = 3, \
183 .gain = 8, \
184 .reference_voltage = 16, \
185 .noise_cancel_mode = 4, \
186 .cal_type = TYPE_ONE_POINT_TRIMMING, \
187 .efuse_value = 55, \
188 .min_efuse_value = 40, \
189 .max_efuse_value = 100, \
190 .first_point_trim = 25, \
191 .second_point_trim = 85, \
192 .default_temp_offset = 50, \
193 .freq_tab[0] = { \
194 .freq_clip_max = 800 * 1000, \
195 .temp_level = 85, \
196 }, \
197 .freq_tab[1] = { \
198 .freq_clip_max = 200 * 1000, \
199 .temp_level = 103, \
200 }, \
201 .freq_tab_count = 2, \
202
203#define EXYNOS5260_TMU_DATA \
204 __EXYNOS5260_TMU_DATA \
205 .type = SOC_ARCH_EXYNOS5260
206
207struct exynos_tmu_init_data const exynos5260_default_tmu_data = {
208 .tmu_data = {
209 { EXYNOS5260_TMU_DATA },
210 { EXYNOS5260_TMU_DATA },
211 { EXYNOS5260_TMU_DATA },
212 { EXYNOS5260_TMU_DATA },
213 { EXYNOS5260_TMU_DATA },
214 },
215 .tmu_count = 5,
216};
217
218#define EXYNOS5420_TMU_DATA \
219 __EXYNOS5260_TMU_DATA \
220 .type = SOC_ARCH_EXYNOS5420
221
222#define EXYNOS5420_TMU_DATA_SHARED \
223 __EXYNOS5260_TMU_DATA \
224 .type = SOC_ARCH_EXYNOS5420_TRIMINFO
225
226struct exynos_tmu_init_data const exynos5420_default_tmu_data = {
227 .tmu_data = {
228 { EXYNOS5420_TMU_DATA },
229 { EXYNOS5420_TMU_DATA },
230 { EXYNOS5420_TMU_DATA_SHARED },
231 { EXYNOS5420_TMU_DATA_SHARED },
232 { EXYNOS5420_TMU_DATA_SHARED },
233 },
234 .tmu_count = 5,
235};
236
237#define EXYNOS5440_TMU_DATA \
238 .trigger_levels[0] = 100, \
239 .trigger_levels[4] = 105, \
240 .trigger_enable[0] = 1, \
241 .trigger_type[0] = SW_TRIP, \
242 .trigger_type[4] = HW_TRIP, \
243 .max_trigger_level = 5, \
244 .non_hw_trigger_levels = 1, \
245 .gain = 5, \
246 .reference_voltage = 16, \
247 .noise_cancel_mode = 4, \
248 .cal_type = TYPE_ONE_POINT_TRIMMING, \
249 .efuse_value = 0x5b2d, \
250 .min_efuse_value = 16, \
251 .max_efuse_value = 76, \
252 .first_point_trim = 25, \
253 .second_point_trim = 70, \
254 .default_temp_offset = 25, \
255 .type = SOC_ARCH_EXYNOS5440
256
257struct exynos_tmu_init_data const exynos5440_default_tmu_data = {
258 .tmu_data = {
259 { EXYNOS5440_TMU_DATA } ,
260 { EXYNOS5440_TMU_DATA } ,
261 { EXYNOS5440_TMU_DATA } ,
262 },
263 .tmu_count = 3,
264};
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index fdd1f523a1ed..5a0f12d08e8b 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -45,7 +45,7 @@
45 * c. if the trend is THERMAL_TREND_RAISE_FULL, do nothing 45 * c. if the trend is THERMAL_TREND_RAISE_FULL, do nothing
46 * d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit, 46 * d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit,
47 * if the cooling state already equals lower limit, 47 * if the cooling state already equals lower limit,
48 * deactive the thermal instance 48 * deactivate the thermal instance
49 */ 49 */
50static unsigned long get_target_state(struct thermal_instance *instance, 50static unsigned long get_target_state(struct thermal_instance *instance,
51 enum thermal_trend trend, bool throttle) 51 enum thermal_trend trend, bool throttle)
@@ -169,7 +169,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
169} 169}
170 170
171/** 171/**
172 * step_wise_throttle - throttles devices asscciated with the given zone 172 * step_wise_throttle - throttles devices associated with the given zone
173 * @tz - thermal_zone_device 173 * @tz - thermal_zone_device
174 * @trip - the trip point 174 * @trip - the trip point
175 * @trip_type - type of the trip point 175 * @trip_type - type of the trip point
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index 634b6ce0e63a..62a5d449c388 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -1402,7 +1402,7 @@ int ti_bandgap_remove(struct platform_device *pdev)
1402 return 0; 1402 return 0;
1403} 1403}
1404 1404
1405#ifdef CONFIG_PM 1405#ifdef CONFIG_PM_SLEEP
1406static int ti_bandgap_save_ctxt(struct ti_bandgap *bgp) 1406static int ti_bandgap_save_ctxt(struct ti_bandgap *bgp)
1407{ 1407{
1408 int i; 1408 int i;
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 3fb054a10f6a..a38c1756442a 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -429,7 +429,7 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id)
429 429
430 data = ti_bandgap_get_sensor_data(bgp, id); 430 data = ti_bandgap_get_sensor_data(bgp, id);
431 431
432 if (data && data->cool_dev) 432 if (data)
433 cpufreq_cooling_unregister(data->cool_dev); 433 cpufreq_cooling_unregister(data->cool_dev);
434 434
435 return 0; 435 return 0;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 5d916c7a216b..d2501f01cd03 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -489,7 +489,7 @@ config SERIAL_MFD_HSU
489 select SERIAL_CORE 489 select SERIAL_CORE
490 490
491config SERIAL_MFD_HSU_CONSOLE 491config SERIAL_MFD_HSU_CONSOLE
492 boolean "Medfile HSU serial console support" 492 bool "Medfile HSU serial console support"
493 depends on SERIAL_MFD_HSU=y 493 depends on SERIAL_MFD_HSU=y
494 select SERIAL_CORE_CONSOLE 494 select SERIAL_CORE_CONSOLE
495 495
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 96539038c03a..b454d05be583 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -45,7 +45,7 @@ menuconfig USB_GADGET
45if USB_GADGET 45if USB_GADGET
46 46
47config USB_GADGET_DEBUG 47config USB_GADGET_DEBUG
48 boolean "Debugging messages (DEVELOPMENT)" 48 bool "Debugging messages (DEVELOPMENT)"
49 depends on DEBUG_KERNEL 49 depends on DEBUG_KERNEL
50 help 50 help
51 Many controller and gadget drivers will print some debugging 51 Many controller and gadget drivers will print some debugging
@@ -73,7 +73,7 @@ config USB_GADGET_VERBOSE
73 production build. 73 production build.
74 74
75config USB_GADGET_DEBUG_FILES 75config USB_GADGET_DEBUG_FILES
76 boolean "Debugging information files (DEVELOPMENT)" 76 bool "Debugging information files (DEVELOPMENT)"
77 depends on PROC_FS 77 depends on PROC_FS
78 help 78 help
79 Some of the drivers in the "gadget" framework can expose 79 Some of the drivers in the "gadget" framework can expose
@@ -84,7 +84,7 @@ config USB_GADGET_DEBUG_FILES
84 here. If in doubt, or to conserve kernel memory, say "N". 84 here. If in doubt, or to conserve kernel memory, say "N".
85 85
86config USB_GADGET_DEBUG_FS 86config USB_GADGET_DEBUG_FS
87 boolean "Debugging information files in debugfs (DEVELOPMENT)" 87 bool "Debugging information files in debugfs (DEVELOPMENT)"
88 depends on DEBUG_FS 88 depends on DEBUG_FS
89 help 89 help
90 Some of the drivers in the "gadget" framework can expose 90 Some of the drivers in the "gadget" framework can expose
@@ -230,7 +230,7 @@ config USB_CONFIGFS
230 For more information see Documentation/usb/gadget_configfs.txt. 230 For more information see Documentation/usb/gadget_configfs.txt.
231 231
232config USB_CONFIGFS_SERIAL 232config USB_CONFIGFS_SERIAL
233 boolean "Generic serial bulk in/out" 233 bool "Generic serial bulk in/out"
234 depends on USB_CONFIGFS 234 depends on USB_CONFIGFS
235 depends on TTY 235 depends on TTY
236 select USB_U_SERIAL 236 select USB_U_SERIAL
@@ -239,7 +239,7 @@ config USB_CONFIGFS_SERIAL
239 The function talks to the Linux-USB generic serial driver. 239 The function talks to the Linux-USB generic serial driver.
240 240
241config USB_CONFIGFS_ACM 241config USB_CONFIGFS_ACM
242 boolean "Abstract Control Model (CDC ACM)" 242 bool "Abstract Control Model (CDC ACM)"
243 depends on USB_CONFIGFS 243 depends on USB_CONFIGFS
244 depends on TTY 244 depends on TTY
245 select USB_U_SERIAL 245 select USB_U_SERIAL
@@ -249,7 +249,7 @@ config USB_CONFIGFS_ACM
249 MS-Windows hosts or with the Linux-USB "cdc-acm" driver. 249 MS-Windows hosts or with the Linux-USB "cdc-acm" driver.
250 250
251config USB_CONFIGFS_OBEX 251config USB_CONFIGFS_OBEX
252 boolean "Object Exchange Model (CDC OBEX)" 252 bool "Object Exchange Model (CDC OBEX)"
253 depends on USB_CONFIGFS 253 depends on USB_CONFIGFS
254 depends on TTY 254 depends on TTY
255 select USB_U_SERIAL 255 select USB_U_SERIAL
@@ -259,7 +259,7 @@ config USB_CONFIGFS_OBEX
259 since the kernel itself doesn't implement the OBEX protocol. 259 since the kernel itself doesn't implement the OBEX protocol.
260 260
261config USB_CONFIGFS_NCM 261config USB_CONFIGFS_NCM
262 boolean "Network Control Model (CDC NCM)" 262 bool "Network Control Model (CDC NCM)"
263 depends on USB_CONFIGFS 263 depends on USB_CONFIGFS
264 depends on NET 264 depends on NET
265 select USB_U_ETHER 265 select USB_U_ETHER
@@ -270,7 +270,7 @@ config USB_CONFIGFS_NCM
270 different alignment possibilities. 270 different alignment possibilities.
271 271
272config USB_CONFIGFS_ECM 272config USB_CONFIGFS_ECM
273 boolean "Ethernet Control Model (CDC ECM)" 273 bool "Ethernet Control Model (CDC ECM)"
274 depends on USB_CONFIGFS 274 depends on USB_CONFIGFS
275 depends on NET 275 depends on NET
276 select USB_U_ETHER 276 select USB_U_ETHER
@@ -282,7 +282,7 @@ config USB_CONFIGFS_ECM
282 supported by firmware for smart network devices. 282 supported by firmware for smart network devices.
283 283
284config USB_CONFIGFS_ECM_SUBSET 284config USB_CONFIGFS_ECM_SUBSET
285 boolean "Ethernet Control Model (CDC ECM) subset" 285 bool "Ethernet Control Model (CDC ECM) subset"
286 depends on USB_CONFIGFS 286 depends on USB_CONFIGFS
287 depends on NET 287 depends on NET
288 select USB_U_ETHER 288 select USB_U_ETHER
@@ -323,7 +323,7 @@ config USB_CONFIGFS_EEM
323 the host is the same (a usbX device), so the differences are minimal. 323 the host is the same (a usbX device), so the differences are minimal.
324 324
325config USB_CONFIGFS_PHONET 325config USB_CONFIGFS_PHONET
326 boolean "Phonet protocol" 326 bool "Phonet protocol"
327 depends on USB_CONFIGFS 327 depends on USB_CONFIGFS
328 depends on NET 328 depends on NET
329 depends on PHONET 329 depends on PHONET
@@ -333,7 +333,7 @@ config USB_CONFIGFS_PHONET
333 The Phonet protocol implementation for USB device. 333 The Phonet protocol implementation for USB device.
334 334
335config USB_CONFIGFS_MASS_STORAGE 335config USB_CONFIGFS_MASS_STORAGE
336 boolean "Mass storage" 336 bool "Mass storage"
337 depends on USB_CONFIGFS 337 depends on USB_CONFIGFS
338 depends on BLOCK 338 depends on BLOCK
339 select USB_F_MASS_STORAGE 339 select USB_F_MASS_STORAGE
@@ -344,7 +344,7 @@ config USB_CONFIGFS_MASS_STORAGE
344 specified as a module parameter or sysfs option. 344 specified as a module parameter or sysfs option.
345 345
346config USB_CONFIGFS_F_LB_SS 346config USB_CONFIGFS_F_LB_SS
347 boolean "Loopback and sourcesink function (for testing)" 347 bool "Loopback and sourcesink function (for testing)"
348 depends on USB_CONFIGFS 348 depends on USB_CONFIGFS
349 select USB_F_SS_LB 349 select USB_F_SS_LB
350 help 350 help
@@ -357,7 +357,7 @@ config USB_CONFIGFS_F_LB_SS
357 and its driver through a basic set of functional tests. 357 and its driver through a basic set of functional tests.
358 358
359config USB_CONFIGFS_F_FS 359config USB_CONFIGFS_F_FS
360 boolean "Function filesystem (FunctionFS)" 360 bool "Function filesystem (FunctionFS)"
361 depends on USB_CONFIGFS 361 depends on USB_CONFIGFS
362 select USB_F_FS 362 select USB_F_FS
363 help 363 help
@@ -369,7 +369,7 @@ config USB_CONFIGFS_F_FS
369 mass storage) and other are implemented in user space. 369 mass storage) and other are implemented in user space.
370 370
371config USB_CONFIGFS_F_UAC1 371config USB_CONFIGFS_F_UAC1
372 boolean "Audio Class 1.0" 372 bool "Audio Class 1.0"
373 depends on USB_CONFIGFS 373 depends on USB_CONFIGFS
374 depends on SND 374 depends on SND
375 select USB_LIBCOMPOSITE 375 select USB_LIBCOMPOSITE
@@ -382,7 +382,7 @@ config USB_CONFIGFS_F_UAC1
382 on the device. 382 on the device.
383 383
384config USB_CONFIGFS_F_UAC2 384config USB_CONFIGFS_F_UAC2
385 boolean "Audio Class 2.0" 385 bool "Audio Class 2.0"
386 depends on USB_CONFIGFS 386 depends on USB_CONFIGFS
387 depends on SND 387 depends on SND
388 select USB_LIBCOMPOSITE 388 select USB_LIBCOMPOSITE
@@ -400,7 +400,7 @@ config USB_CONFIGFS_F_UAC2
400 wants as audio data to the USB Host. 400 wants as audio data to the USB Host.
401 401
402config USB_CONFIGFS_F_MIDI 402config USB_CONFIGFS_F_MIDI
403 boolean "MIDI function" 403 bool "MIDI function"
404 depends on USB_CONFIGFS 404 depends on USB_CONFIGFS
405 depends on SND 405 depends on SND
406 select USB_LIBCOMPOSITE 406 select USB_LIBCOMPOSITE
@@ -414,7 +414,7 @@ config USB_CONFIGFS_F_MIDI
414 ALSA's aconnect utility etc. 414 ALSA's aconnect utility etc.
415 415
416config USB_CONFIGFS_F_HID 416config USB_CONFIGFS_F_HID
417 boolean "HID function" 417 bool "HID function"
418 depends on USB_CONFIGFS 418 depends on USB_CONFIGFS
419 select USB_F_HID 419 select USB_F_HID
420 help 420 help
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index fd48ef3af4eb..113c87e22117 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -40,7 +40,7 @@ config USB_ZERO
40 dynamically linked module called "g_zero". 40 dynamically linked module called "g_zero".
41 41
42config USB_ZERO_HNPTEST 42config USB_ZERO_HNPTEST
43 boolean "HNP Test Device" 43 bool "HNP Test Device"
44 depends on USB_ZERO && USB_OTG 44 depends on USB_ZERO && USB_OTG
45 help 45 help
46 You can configure this device to enumerate using the device 46 You can configure this device to enumerate using the device
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 366e551aeff0..9a3a6b00391a 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -199,7 +199,7 @@ config USB_S3C2410
199 S3C2440 processors. 199 S3C2440 processors.
200 200
201config USB_S3C2410_DEBUG 201config USB_S3C2410_DEBUG
202 boolean "S3C2410 udc debug messages" 202 bool "S3C2410 udc debug messages"
203 depends on USB_S3C2410 203 depends on USB_S3C2410
204 204
205config USB_S3C_HSUDC 205config USB_S3C_HSUDC
@@ -288,7 +288,7 @@ config USB_NET2272
288 gadget drivers to also be dynamically linked. 288 gadget drivers to also be dynamically linked.
289 289
290config USB_NET2272_DMA 290config USB_NET2272_DMA
291 boolean "Support external DMA controller" 291 bool "Support external DMA controller"
292 depends on USB_NET2272 && HAS_DMA 292 depends on USB_NET2272 && HAS_DMA
293 help 293 help
294 The NET2272 part can optionally support an external DMA 294 The NET2272 part can optionally support an external DMA
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index c6d0c8e745b9..52d3d58252e1 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -119,7 +119,7 @@ config TAHVO_USB
119 119
120config TAHVO_USB_HOST_BY_DEFAULT 120config TAHVO_USB_HOST_BY_DEFAULT
121 depends on TAHVO_USB 121 depends on TAHVO_USB
122 boolean "Device in USB host mode by default" 122 bool "Device in USB host mode by default"
123 help 123 help
124 Say Y here, if you want the device to enter USB host mode 124 Say Y here, if you want the device to enter USB host mode
125 by default on bootup. 125 by default on bootup.
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 7cc0122a18ce..f8a186381ae8 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -239,9 +239,12 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
239 239
240 return (flags & PCI_MSIX_FLAGS_QSIZE) + 1; 240 return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
241 } 241 }
242 } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) 242 } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
243 if (pci_is_pcie(vdev->pdev)) 243 if (pci_is_pcie(vdev->pdev))
244 return 1; 244 return 1;
245 } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
246 return 1;
247 }
245 248
246 return 0; 249 return 0;
247} 250}
@@ -464,6 +467,7 @@ static long vfio_pci_ioctl(void *device_data,
464 467
465 switch (info.index) { 468 switch (info.index) {
466 case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX: 469 case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
470 case VFIO_PCI_REQ_IRQ_INDEX:
467 break; 471 break;
468 case VFIO_PCI_ERR_IRQ_INDEX: 472 case VFIO_PCI_ERR_IRQ_INDEX:
469 if (pci_is_pcie(vdev->pdev)) 473 if (pci_is_pcie(vdev->pdev))
@@ -828,6 +832,20 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
828 req_len, vma->vm_page_prot); 832 req_len, vma->vm_page_prot);
829} 833}
830 834
835static void vfio_pci_request(void *device_data, unsigned int count)
836{
837 struct vfio_pci_device *vdev = device_data;
838
839 mutex_lock(&vdev->igate);
840
841 if (vdev->req_trigger) {
842 dev_dbg(&vdev->pdev->dev, "Requesting device from user\n");
843 eventfd_signal(vdev->req_trigger, 1);
844 }
845
846 mutex_unlock(&vdev->igate);
847}
848
831static const struct vfio_device_ops vfio_pci_ops = { 849static const struct vfio_device_ops vfio_pci_ops = {
832 .name = "vfio-pci", 850 .name = "vfio-pci",
833 .open = vfio_pci_open, 851 .open = vfio_pci_open,
@@ -836,6 +854,7 @@ static const struct vfio_device_ops vfio_pci_ops = {
836 .read = vfio_pci_read, 854 .read = vfio_pci_read,
837 .write = vfio_pci_write, 855 .write = vfio_pci_write,
838 .mmap = vfio_pci_mmap, 856 .mmap = vfio_pci_mmap,
857 .request = vfio_pci_request,
839}; 858};
840 859
841static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 860static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index e8d695b3f54e..f88bfdf5b6a0 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -763,46 +763,70 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
763 return 0; 763 return 0;
764} 764}
765 765
766static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev, 766static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
767 unsigned index, unsigned start, 767 uint32_t flags, void *data)
768 unsigned count, uint32_t flags, void *data)
769{ 768{
770 int32_t fd = *(int32_t *)data; 769 int32_t fd = *(int32_t *)data;
771 770
772 if ((index != VFIO_PCI_ERR_IRQ_INDEX) || 771 if (!(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
773 !(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
774 return -EINVAL; 772 return -EINVAL;
775 773
776 /* DATA_NONE/DATA_BOOL enables loopback testing */ 774 /* DATA_NONE/DATA_BOOL enables loopback testing */
777 if (flags & VFIO_IRQ_SET_DATA_NONE) { 775 if (flags & VFIO_IRQ_SET_DATA_NONE) {
778 if (vdev->err_trigger) 776 if (*ctx)
779 eventfd_signal(vdev->err_trigger, 1); 777 eventfd_signal(*ctx, 1);
780 return 0; 778 return 0;
781 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { 779 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
782 uint8_t trigger = *(uint8_t *)data; 780 uint8_t trigger = *(uint8_t *)data;
783 if (trigger && vdev->err_trigger) 781 if (trigger && *ctx)
784 eventfd_signal(vdev->err_trigger, 1); 782 eventfd_signal(*ctx, 1);
785 return 0; 783 return 0;
786 } 784 }
787 785
788 /* Handle SET_DATA_EVENTFD */ 786 /* Handle SET_DATA_EVENTFD */
789 if (fd == -1) { 787 if (fd == -1) {
790 if (vdev->err_trigger) 788 if (*ctx)
791 eventfd_ctx_put(vdev->err_trigger); 789 eventfd_ctx_put(*ctx);
792 vdev->err_trigger = NULL; 790 *ctx = NULL;
793 return 0; 791 return 0;
794 } else if (fd >= 0) { 792 } else if (fd >= 0) {
795 struct eventfd_ctx *efdctx; 793 struct eventfd_ctx *efdctx;
796 efdctx = eventfd_ctx_fdget(fd); 794 efdctx = eventfd_ctx_fdget(fd);
797 if (IS_ERR(efdctx)) 795 if (IS_ERR(efdctx))
798 return PTR_ERR(efdctx); 796 return PTR_ERR(efdctx);
799 if (vdev->err_trigger) 797 if (*ctx)
800 eventfd_ctx_put(vdev->err_trigger); 798 eventfd_ctx_put(*ctx);
801 vdev->err_trigger = efdctx; 799 *ctx = efdctx;
802 return 0; 800 return 0;
803 } else 801 } else
804 return -EINVAL; 802 return -EINVAL;
805} 803}
804
805static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
806 unsigned index, unsigned start,
807 unsigned count, uint32_t flags, void *data)
808{
809 if (index != VFIO_PCI_ERR_IRQ_INDEX)
810 return -EINVAL;
811
812 /*
813 * We should sanitize start & count, but that wasn't caught
814 * originally, so this IRQ index must forever ignore them :-(
815 */
816
817 return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, flags, data);
818}
819
820static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
821 unsigned index, unsigned start,
822 unsigned count, uint32_t flags, void *data)
823{
824 if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count != 1)
825 return -EINVAL;
826
827 return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, flags, data);
828}
829
806int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, 830int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
807 unsigned index, unsigned start, unsigned count, 831 unsigned index, unsigned start, unsigned count,
808 void *data) 832 void *data)
@@ -844,6 +868,12 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
844 func = vfio_pci_set_err_trigger; 868 func = vfio_pci_set_err_trigger;
845 break; 869 break;
846 } 870 }
871 case VFIO_PCI_REQ_IRQ_INDEX:
872 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
873 case VFIO_IRQ_SET_ACTION_TRIGGER:
874 func = vfio_pci_set_req_trigger;
875 break;
876 }
847 } 877 }
848 878
849 if (!func) 879 if (!func)
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 671c17a6e6d0..c9f9b323f152 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -58,6 +58,7 @@ struct vfio_pci_device {
58 struct pci_saved_state *pci_saved_state; 58 struct pci_saved_state *pci_saved_state;
59 int refcnt; 59 int refcnt;
60 struct eventfd_ctx *err_trigger; 60 struct eventfd_ctx *err_trigger;
61 struct eventfd_ctx *req_trigger;
61}; 62};
62 63
63#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) 64#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index f018d8d0f975..4cde85501444 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -63,6 +63,11 @@ struct vfio_container {
63 void *iommu_data; 63 void *iommu_data;
64}; 64};
65 65
66struct vfio_unbound_dev {
67 struct device *dev;
68 struct list_head unbound_next;
69};
70
66struct vfio_group { 71struct vfio_group {
67 struct kref kref; 72 struct kref kref;
68 int minor; 73 int minor;
@@ -75,6 +80,8 @@ struct vfio_group {
75 struct notifier_block nb; 80 struct notifier_block nb;
76 struct list_head vfio_next; 81 struct list_head vfio_next;
77 struct list_head container_next; 82 struct list_head container_next;
83 struct list_head unbound_list;
84 struct mutex unbound_lock;
78 atomic_t opened; 85 atomic_t opened;
79}; 86};
80 87
@@ -204,6 +211,8 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
204 kref_init(&group->kref); 211 kref_init(&group->kref);
205 INIT_LIST_HEAD(&group->device_list); 212 INIT_LIST_HEAD(&group->device_list);
206 mutex_init(&group->device_lock); 213 mutex_init(&group->device_lock);
214 INIT_LIST_HEAD(&group->unbound_list);
215 mutex_init(&group->unbound_lock);
207 atomic_set(&group->container_users, 0); 216 atomic_set(&group->container_users, 0);
208 atomic_set(&group->opened, 0); 217 atomic_set(&group->opened, 0);
209 group->iommu_group = iommu_group; 218 group->iommu_group = iommu_group;
@@ -264,13 +273,22 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
264static void vfio_group_release(struct kref *kref) 273static void vfio_group_release(struct kref *kref)
265{ 274{
266 struct vfio_group *group = container_of(kref, struct vfio_group, kref); 275 struct vfio_group *group = container_of(kref, struct vfio_group, kref);
276 struct vfio_unbound_dev *unbound, *tmp;
277 struct iommu_group *iommu_group = group->iommu_group;
267 278
268 WARN_ON(!list_empty(&group->device_list)); 279 WARN_ON(!list_empty(&group->device_list));
269 280
281 list_for_each_entry_safe(unbound, tmp,
282 &group->unbound_list, unbound_next) {
283 list_del(&unbound->unbound_next);
284 kfree(unbound);
285 }
286
270 device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor)); 287 device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
271 list_del(&group->vfio_next); 288 list_del(&group->vfio_next);
272 vfio_free_group_minor(group->minor); 289 vfio_free_group_minor(group->minor);
273 vfio_group_unlock_and_free(group); 290 vfio_group_unlock_and_free(group);
291 iommu_group_put(iommu_group);
274} 292}
275 293
276static void vfio_group_put(struct vfio_group *group) 294static void vfio_group_put(struct vfio_group *group)
@@ -440,17 +458,36 @@ static bool vfio_whitelisted_driver(struct device_driver *drv)
440} 458}
441 459
442/* 460/*
443 * A vfio group is viable for use by userspace if all devices are either 461 * A vfio group is viable for use by userspace if all devices are in
444 * driver-less or bound to a vfio or whitelisted driver. We test the 462 * one of the following states:
445 * latter by the existence of a struct vfio_device matching the dev. 463 * - driver-less
464 * - bound to a vfio driver
465 * - bound to a whitelisted driver
466 *
467 * We use two methods to determine whether a device is bound to a vfio
468 * driver. The first is to test whether the device exists in the vfio
469 * group. The second is to test if the device exists on the group
470 * unbound_list, indicating it's in the middle of transitioning from
471 * a vfio driver to driver-less.
446 */ 472 */
447static int vfio_dev_viable(struct device *dev, void *data) 473static int vfio_dev_viable(struct device *dev, void *data)
448{ 474{
449 struct vfio_group *group = data; 475 struct vfio_group *group = data;
450 struct vfio_device *device; 476 struct vfio_device *device;
451 struct device_driver *drv = ACCESS_ONCE(dev->driver); 477 struct device_driver *drv = ACCESS_ONCE(dev->driver);
478 struct vfio_unbound_dev *unbound;
479 int ret = -EINVAL;
452 480
453 if (!drv || vfio_whitelisted_driver(drv)) 481 mutex_lock(&group->unbound_lock);
482 list_for_each_entry(unbound, &group->unbound_list, unbound_next) {
483 if (dev == unbound->dev) {
484 ret = 0;
485 break;
486 }
487 }
488 mutex_unlock(&group->unbound_lock);
489
490 if (!ret || !drv || vfio_whitelisted_driver(drv))
454 return 0; 491 return 0;
455 492
456 device = vfio_group_get_device(group, dev); 493 device = vfio_group_get_device(group, dev);
@@ -459,7 +496,7 @@ static int vfio_dev_viable(struct device *dev, void *data)
459 return 0; 496 return 0;
460 } 497 }
461 498
462 return -EINVAL; 499 return ret;
463} 500}
464 501
465/** 502/**
@@ -501,6 +538,7 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
501{ 538{
502 struct vfio_group *group = container_of(nb, struct vfio_group, nb); 539 struct vfio_group *group = container_of(nb, struct vfio_group, nb);
503 struct device *dev = data; 540 struct device *dev = data;
541 struct vfio_unbound_dev *unbound;
504 542
505 /* 543 /*
506 * Need to go through a group_lock lookup to get a reference or we 544 * Need to go through a group_lock lookup to get a reference or we
@@ -550,6 +588,17 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
550 * stop the system to maintain isolation. At a minimum, we'd 588 * stop the system to maintain isolation. At a minimum, we'd
551 * want a toggle to disable driver auto probe for this device. 589 * want a toggle to disable driver auto probe for this device.
552 */ 590 */
591
592 mutex_lock(&group->unbound_lock);
593 list_for_each_entry(unbound,
594 &group->unbound_list, unbound_next) {
595 if (dev == unbound->dev) {
596 list_del(&unbound->unbound_next);
597 kfree(unbound);
598 break;
599 }
600 }
601 mutex_unlock(&group->unbound_lock);
553 break; 602 break;
554 } 603 }
555 604
@@ -578,6 +627,12 @@ int vfio_add_group_dev(struct device *dev,
578 iommu_group_put(iommu_group); 627 iommu_group_put(iommu_group);
579 return PTR_ERR(group); 628 return PTR_ERR(group);
580 } 629 }
630 } else {
631 /*
632 * A found vfio_group already holds a reference to the
633 * iommu_group. A created vfio_group keeps the reference.
634 */
635 iommu_group_put(iommu_group);
581 } 636 }
582 637
583 device = vfio_group_get_device(group, dev); 638 device = vfio_group_get_device(group, dev);
@@ -586,21 +641,19 @@ int vfio_add_group_dev(struct device *dev,
586 dev_name(dev), iommu_group_id(iommu_group)); 641 dev_name(dev), iommu_group_id(iommu_group));
587 vfio_device_put(device); 642 vfio_device_put(device);
588 vfio_group_put(group); 643 vfio_group_put(group);
589 iommu_group_put(iommu_group);
590 return -EBUSY; 644 return -EBUSY;
591 } 645 }
592 646
593 device = vfio_group_create_device(group, dev, ops, device_data); 647 device = vfio_group_create_device(group, dev, ops, device_data);
594 if (IS_ERR(device)) { 648 if (IS_ERR(device)) {
595 vfio_group_put(group); 649 vfio_group_put(group);
596 iommu_group_put(iommu_group);
597 return PTR_ERR(device); 650 return PTR_ERR(device);
598 } 651 }
599 652
600 /* 653 /*
601 * Added device holds reference to iommu_group and vfio_device 654 * Drop all but the vfio_device reference. The vfio_device holds
602 * (which in turn holds reference to vfio_group). Drop extra 655 * a reference to the vfio_group, which holds a reference to the
603 * group reference used while acquiring device. 656 * iommu_group.
604 */ 657 */
605 vfio_group_put(group); 658 vfio_group_put(group);
606 659
@@ -655,8 +708,9 @@ void *vfio_del_group_dev(struct device *dev)
655{ 708{
656 struct vfio_device *device = dev_get_drvdata(dev); 709 struct vfio_device *device = dev_get_drvdata(dev);
657 struct vfio_group *group = device->group; 710 struct vfio_group *group = device->group;
658 struct iommu_group *iommu_group = group->iommu_group;
659 void *device_data = device->device_data; 711 void *device_data = device->device_data;
712 struct vfio_unbound_dev *unbound;
713 unsigned int i = 0;
660 714
661 /* 715 /*
662 * The group exists so long as we have a device reference. Get 716 * The group exists so long as we have a device reference. Get
@@ -664,14 +718,49 @@ void *vfio_del_group_dev(struct device *dev)
664 */ 718 */
665 vfio_group_get(group); 719 vfio_group_get(group);
666 720
721 /*
722 * When the device is removed from the group, the group suddenly
723 * becomes non-viable; the device has a driver (until the unbind
724 * completes), but it's not present in the group. This is bad news
725 * for any external users that need to re-acquire a group reference
726 * in order to match and release their existing reference. To
727 * solve this, we track such devices on the unbound_list to bridge
728 * the gap until they're fully unbound.
729 */
730 unbound = kzalloc(sizeof(*unbound), GFP_KERNEL);
731 if (unbound) {
732 unbound->dev = dev;
733 mutex_lock(&group->unbound_lock);
734 list_add(&unbound->unbound_next, &group->unbound_list);
735 mutex_unlock(&group->unbound_lock);
736 }
737 WARN_ON(!unbound);
738
667 vfio_device_put(device); 739 vfio_device_put(device);
668 740
669 /* TODO send a signal to encourage this to be released */ 741 /*
670 wait_event(vfio.release_q, !vfio_dev_present(group, dev)); 742 * If the device is still present in the group after the above
743 * 'put', then it is in use and we need to request it from the
744 * bus driver. The driver may in turn need to request the
745 * device from the user. We send the request on an arbitrary
746 * interval with counter to allow the driver to take escalating
747 * measures to release the device if it has the ability to do so.
748 */
749 do {
750 device = vfio_group_get_device(group, dev);
751 if (!device)
752 break;
671 753
672 vfio_group_put(group); 754 if (device->ops->request)
755 device->ops->request(device_data, i++);
673 756
674 iommu_group_put(iommu_group); 757 vfio_device_put(device);
758
759 } while (wait_event_interruptible_timeout(vfio.release_q,
760 !vfio_dev_present(group, dev),
761 HZ * 10) <= 0);
762
763 vfio_group_put(group);
675 764
676 return device_data; 765 return device_data;
677} 766}
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 4a9d666f1e91..57d8c37a002b 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -66,6 +66,7 @@ struct vfio_domain {
66 struct list_head next; 66 struct list_head next;
67 struct list_head group_list; 67 struct list_head group_list;
68 int prot; /* IOMMU_CACHE */ 68 int prot; /* IOMMU_CACHE */
69 bool fgsp; /* Fine-grained super pages */
69}; 70};
70 71
71struct vfio_dma { 72struct vfio_dma {
@@ -264,6 +265,7 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
264 unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 265 unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
265 bool lock_cap = capable(CAP_IPC_LOCK); 266 bool lock_cap = capable(CAP_IPC_LOCK);
266 long ret, i; 267 long ret, i;
268 bool rsvd;
267 269
268 if (!current->mm) 270 if (!current->mm)
269 return -ENODEV; 271 return -ENODEV;
@@ -272,10 +274,9 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
272 if (ret) 274 if (ret)
273 return ret; 275 return ret;
274 276
275 if (is_invalid_reserved_pfn(*pfn_base)) 277 rsvd = is_invalid_reserved_pfn(*pfn_base);
276 return 1;
277 278
278 if (!lock_cap && current->mm->locked_vm + 1 > limit) { 279 if (!rsvd && !lock_cap && current->mm->locked_vm + 1 > limit) {
279 put_pfn(*pfn_base, prot); 280 put_pfn(*pfn_base, prot);
280 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, 281 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
281 limit << PAGE_SHIFT); 282 limit << PAGE_SHIFT);
@@ -283,7 +284,8 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
283 } 284 }
284 285
285 if (unlikely(disable_hugepages)) { 286 if (unlikely(disable_hugepages)) {
286 vfio_lock_acct(1); 287 if (!rsvd)
288 vfio_lock_acct(1);
287 return 1; 289 return 1;
288 } 290 }
289 291
@@ -295,12 +297,14 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
295 if (ret) 297 if (ret)
296 break; 298 break;
297 299
298 if (pfn != *pfn_base + i || is_invalid_reserved_pfn(pfn)) { 300 if (pfn != *pfn_base + i ||
301 rsvd != is_invalid_reserved_pfn(pfn)) {
299 put_pfn(pfn, prot); 302 put_pfn(pfn, prot);
300 break; 303 break;
301 } 304 }
302 305
303 if (!lock_cap && current->mm->locked_vm + i + 1 > limit) { 306 if (!rsvd && !lock_cap &&
307 current->mm->locked_vm + i + 1 > limit) {
304 put_pfn(pfn, prot); 308 put_pfn(pfn, prot);
305 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", 309 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
306 __func__, limit << PAGE_SHIFT); 310 __func__, limit << PAGE_SHIFT);
@@ -308,7 +312,8 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
308 } 312 }
309 } 313 }
310 314
311 vfio_lock_acct(i); 315 if (!rsvd)
316 vfio_lock_acct(i);
312 317
313 return i; 318 return i;
314} 319}
@@ -346,12 +351,14 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
346 domain = d = list_first_entry(&iommu->domain_list, 351 domain = d = list_first_entry(&iommu->domain_list,
347 struct vfio_domain, next); 352 struct vfio_domain, next);
348 353
349 list_for_each_entry_continue(d, &iommu->domain_list, next) 354 list_for_each_entry_continue(d, &iommu->domain_list, next) {
350 iommu_unmap(d->domain, dma->iova, dma->size); 355 iommu_unmap(d->domain, dma->iova, dma->size);
356 cond_resched();
357 }
351 358
352 while (iova < end) { 359 while (iova < end) {
353 size_t unmapped; 360 size_t unmapped, len;
354 phys_addr_t phys; 361 phys_addr_t phys, next;
355 362
356 phys = iommu_iova_to_phys(domain->domain, iova); 363 phys = iommu_iova_to_phys(domain->domain, iova);
357 if (WARN_ON(!phys)) { 364 if (WARN_ON(!phys)) {
@@ -359,7 +366,19 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
359 continue; 366 continue;
360 } 367 }
361 368
362 unmapped = iommu_unmap(domain->domain, iova, PAGE_SIZE); 369 /*
370 * To optimize for fewer iommu_unmap() calls, each of which
371 * may require hardware cache flushing, try to find the
372 * largest contiguous physical memory chunk to unmap.
373 */
374 for (len = PAGE_SIZE;
375 !domain->fgsp && iova + len < end; len += PAGE_SIZE) {
376 next = iommu_iova_to_phys(domain->domain, iova + len);
377 if (next != phys + len)
378 break;
379 }
380
381 unmapped = iommu_unmap(domain->domain, iova, len);
363 if (WARN_ON(!unmapped)) 382 if (WARN_ON(!unmapped))
364 break; 383 break;
365 384
@@ -367,6 +386,8 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
367 unmapped >> PAGE_SHIFT, 386 unmapped >> PAGE_SHIFT,
368 dma->prot, false); 387 dma->prot, false);
369 iova += unmapped; 388 iova += unmapped;
389
390 cond_resched();
370 } 391 }
371 392
372 vfio_lock_acct(-unlocked); 393 vfio_lock_acct(-unlocked);
@@ -511,6 +532,8 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
511 map_try_harder(d, iova, pfn, npage, prot)) 532 map_try_harder(d, iova, pfn, npage, prot))
512 goto unwind; 533 goto unwind;
513 } 534 }
535
536 cond_resched();
514 } 537 }
515 538
516 return 0; 539 return 0;
@@ -665,6 +688,39 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
665 return 0; 688 return 0;
666} 689}
667 690
691/*
692 * We change our unmap behavior slightly depending on whether the IOMMU
693 * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage
694 * for practically any contiguous power-of-two mapping we give it. This means
695 * we don't need to look for contiguous chunks ourselves to make unmapping
696 * more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d
697 * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
698 * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
699 * hugetlbfs is in use.
700 */
701static void vfio_test_domain_fgsp(struct vfio_domain *domain)
702{
703 struct page *pages;
704 int ret, order = get_order(PAGE_SIZE * 2);
705
706 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
707 if (!pages)
708 return;
709
710 ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
711 IOMMU_READ | IOMMU_WRITE | domain->prot);
712 if (!ret) {
713 size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
714
715 if (unmapped == PAGE_SIZE)
716 iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
717 else
718 domain->fgsp = true;
719 }
720
721 __free_pages(pages, order);
722}
723
668static int vfio_iommu_type1_attach_group(void *iommu_data, 724static int vfio_iommu_type1_attach_group(void *iommu_data,
669 struct iommu_group *iommu_group) 725 struct iommu_group *iommu_group)
670{ 726{
@@ -758,6 +814,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
758 } 814 }
759 } 815 }
760 816
817 vfio_test_domain_fgsp(domain);
818
761 /* replay mappings on new domains */ 819 /* replay mappings on new domains */
762 ret = vfio_iommu_replay(iommu, domain); 820 ret = vfio_iommu_replay(iommu, domain);
763 if (ret) 821 if (ret)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 633012cc9a57..18f05bff8826 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -591,11 +591,6 @@ static void handle_rx(struct vhost_net *net)
591 * TODO: support TSO. 591 * TODO: support TSO.
592 */ 592 */
593 iov_iter_advance(&msg.msg_iter, vhost_hlen); 593 iov_iter_advance(&msg.msg_iter, vhost_hlen);
594 } else {
595 /* It'll come from socket; we'll need to patch
596 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
597 */
598 iov_iter_advance(&fixup, sizeof(hdr));
599 } 594 }
600 err = sock->ops->recvmsg(sock, &msg, 595 err = sock->ops->recvmsg(sock, &msg,
601 sock_len, MSG_DONTWAIT | MSG_TRUNC); 596 sock_len, MSG_DONTWAIT | MSG_TRUNC);
@@ -609,17 +604,25 @@ static void handle_rx(struct vhost_net *net)
609 continue; 604 continue;
610 } 605 }
611 /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */ 606 /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
612 if (unlikely(vhost_hlen) && 607 if (unlikely(vhost_hlen)) {
613 copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) { 608 if (copy_to_iter(&hdr, sizeof(hdr),
614 vq_err(vq, "Unable to write vnet_hdr at addr %p\n", 609 &fixup) != sizeof(hdr)) {
615 vq->iov->iov_base); 610 vq_err(vq, "Unable to write vnet_hdr "
616 break; 611 "at addr %p\n", vq->iov->iov_base);
612 break;
613 }
614 } else {
615 /* Header came from socket; we'll need to patch
616 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
617 */
618 iov_iter_advance(&fixup, sizeof(hdr));
617 } 619 }
618 /* TODO: Should check and handle checksum. */ 620 /* TODO: Should check and handle checksum. */
619 621
620 num_buffers = cpu_to_vhost16(vq, headcount); 622 num_buffers = cpu_to_vhost16(vq, headcount);
621 if (likely(mergeable) && 623 if (likely(mergeable) &&
622 copy_to_iter(&num_buffers, 2, &fixup) != 2) { 624 copy_to_iter(&num_buffers, sizeof num_buffers,
625 &fixup) != sizeof num_buffers) {
623 vq_err(vq, "Failed num_buffers write"); 626 vq_err(vq, "Failed num_buffers write");
624 vhost_discard_vq_desc(vq, headcount); 627 vhost_discard_vq_desc(vq, headcount);
625 break; 628 break;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index dc78d87e0fc2..8d4f3f1ff799 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -38,7 +38,6 @@
38#include <linux/miscdevice.h> 38#include <linux/miscdevice.h>
39#include <asm/unaligned.h> 39#include <asm/unaligned.h>
40#include <scsi/scsi.h> 40#include <scsi/scsi.h>
41#include <scsi/scsi_tcq.h>
42#include <target/target_core_base.h> 41#include <target/target_core_base.h>
43#include <target/target_core_fabric.h> 42#include <target/target_core_fabric.h>
44#include <target/target_core_fabric_configfs.h> 43#include <target/target_core_fabric_configfs.h>
@@ -52,13 +51,13 @@
52 51
53#include "vhost.h" 52#include "vhost.h"
54 53
55#define TCM_VHOST_VERSION "v0.1" 54#define VHOST_SCSI_VERSION "v0.1"
56#define TCM_VHOST_NAMELEN 256 55#define VHOST_SCSI_NAMELEN 256
57#define TCM_VHOST_MAX_CDB_SIZE 32 56#define VHOST_SCSI_MAX_CDB_SIZE 32
58#define TCM_VHOST_DEFAULT_TAGS 256 57#define VHOST_SCSI_DEFAULT_TAGS 256
59#define TCM_VHOST_PREALLOC_SGLS 2048 58#define VHOST_SCSI_PREALLOC_SGLS 2048
60#define TCM_VHOST_PREALLOC_UPAGES 2048 59#define VHOST_SCSI_PREALLOC_UPAGES 2048
61#define TCM_VHOST_PREALLOC_PROT_SGLS 512 60#define VHOST_SCSI_PREALLOC_PROT_SGLS 512
62 61
63struct vhost_scsi_inflight { 62struct vhost_scsi_inflight {
64 /* Wait for the flush operation to finish */ 63 /* Wait for the flush operation to finish */
@@ -67,11 +66,13 @@ struct vhost_scsi_inflight {
67 struct kref kref; 66 struct kref kref;
68}; 67};
69 68
70struct tcm_vhost_cmd { 69struct vhost_scsi_cmd {
71 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */ 70 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
72 int tvc_vq_desc; 71 int tvc_vq_desc;
73 /* virtio-scsi initiator task attribute */ 72 /* virtio-scsi initiator task attribute */
74 int tvc_task_attr; 73 int tvc_task_attr;
74 /* virtio-scsi response incoming iovecs */
75 int tvc_in_iovs;
75 /* virtio-scsi initiator data direction */ 76 /* virtio-scsi initiator data direction */
76 enum dma_data_direction tvc_data_direction; 77 enum dma_data_direction tvc_data_direction;
77 /* Expected data transfer length from virtio-scsi header */ 78 /* Expected data transfer length from virtio-scsi header */
@@ -81,26 +82,26 @@ struct tcm_vhost_cmd {
81 /* The number of scatterlists associated with this cmd */ 82 /* The number of scatterlists associated with this cmd */
82 u32 tvc_sgl_count; 83 u32 tvc_sgl_count;
83 u32 tvc_prot_sgl_count; 84 u32 tvc_prot_sgl_count;
84 /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */ 85 /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
85 u32 tvc_lun; 86 u32 tvc_lun;
86 /* Pointer to the SGL formatted memory from virtio-scsi */ 87 /* Pointer to the SGL formatted memory from virtio-scsi */
87 struct scatterlist *tvc_sgl; 88 struct scatterlist *tvc_sgl;
88 struct scatterlist *tvc_prot_sgl; 89 struct scatterlist *tvc_prot_sgl;
89 struct page **tvc_upages; 90 struct page **tvc_upages;
90 /* Pointer to response */ 91 /* Pointer to response header iovec */
91 struct virtio_scsi_cmd_resp __user *tvc_resp; 92 struct iovec *tvc_resp_iov;
92 /* Pointer to vhost_scsi for our device */ 93 /* Pointer to vhost_scsi for our device */
93 struct vhost_scsi *tvc_vhost; 94 struct vhost_scsi *tvc_vhost;
94 /* Pointer to vhost_virtqueue for the cmd */ 95 /* Pointer to vhost_virtqueue for the cmd */
95 struct vhost_virtqueue *tvc_vq; 96 struct vhost_virtqueue *tvc_vq;
96 /* Pointer to vhost nexus memory */ 97 /* Pointer to vhost nexus memory */
97 struct tcm_vhost_nexus *tvc_nexus; 98 struct vhost_scsi_nexus *tvc_nexus;
98 /* The TCM I/O descriptor that is accessed via container_of() */ 99 /* The TCM I/O descriptor that is accessed via container_of() */
99 struct se_cmd tvc_se_cmd; 100 struct se_cmd tvc_se_cmd;
100 /* work item used for cmwq dispatch to tcm_vhost_submission_work() */ 101 /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
101 struct work_struct work; 102 struct work_struct work;
102 /* Copy of the incoming SCSI command descriptor block (CDB) */ 103 /* Copy of the incoming SCSI command descriptor block (CDB) */
103 unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE]; 104 unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
104 /* Sense buffer that will be mapped into outgoing status */ 105 /* Sense buffer that will be mapped into outgoing status */
105 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER]; 106 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
106 /* Completed commands list, serviced from vhost worker thread */ 107 /* Completed commands list, serviced from vhost worker thread */
@@ -109,53 +110,53 @@ struct tcm_vhost_cmd {
109 struct vhost_scsi_inflight *inflight; 110 struct vhost_scsi_inflight *inflight;
110}; 111};
111 112
112struct tcm_vhost_nexus { 113struct vhost_scsi_nexus {
113 /* Pointer to TCM session for I_T Nexus */ 114 /* Pointer to TCM session for I_T Nexus */
114 struct se_session *tvn_se_sess; 115 struct se_session *tvn_se_sess;
115}; 116};
116 117
117struct tcm_vhost_nacl { 118struct vhost_scsi_nacl {
118 /* Binary World Wide unique Port Name for Vhost Initiator port */ 119 /* Binary World Wide unique Port Name for Vhost Initiator port */
119 u64 iport_wwpn; 120 u64 iport_wwpn;
120 /* ASCII formatted WWPN for Sas Initiator port */ 121 /* ASCII formatted WWPN for Sas Initiator port */
121 char iport_name[TCM_VHOST_NAMELEN]; 122 char iport_name[VHOST_SCSI_NAMELEN];
122 /* Returned by tcm_vhost_make_nodeacl() */ 123 /* Returned by vhost_scsi_make_nodeacl() */
123 struct se_node_acl se_node_acl; 124 struct se_node_acl se_node_acl;
124}; 125};
125 126
126struct tcm_vhost_tpg { 127struct vhost_scsi_tpg {
127 /* Vhost port target portal group tag for TCM */ 128 /* Vhost port target portal group tag for TCM */
128 u16 tport_tpgt; 129 u16 tport_tpgt;
129 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */ 130 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
130 int tv_tpg_port_count; 131 int tv_tpg_port_count;
131 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */ 132 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
132 int tv_tpg_vhost_count; 133 int tv_tpg_vhost_count;
133 /* list for tcm_vhost_list */ 134 /* list for vhost_scsi_list */
134 struct list_head tv_tpg_list; 135 struct list_head tv_tpg_list;
135 /* Used to protect access for tpg_nexus */ 136 /* Used to protect access for tpg_nexus */
136 struct mutex tv_tpg_mutex; 137 struct mutex tv_tpg_mutex;
137 /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */ 138 /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
138 struct tcm_vhost_nexus *tpg_nexus; 139 struct vhost_scsi_nexus *tpg_nexus;
139 /* Pointer back to tcm_vhost_tport */ 140 /* Pointer back to vhost_scsi_tport */
140 struct tcm_vhost_tport *tport; 141 struct vhost_scsi_tport *tport;
141 /* Returned by tcm_vhost_make_tpg() */ 142 /* Returned by vhost_scsi_make_tpg() */
142 struct se_portal_group se_tpg; 143 struct se_portal_group se_tpg;
143 /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */ 144 /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
144 struct vhost_scsi *vhost_scsi; 145 struct vhost_scsi *vhost_scsi;
145}; 146};
146 147
147struct tcm_vhost_tport { 148struct vhost_scsi_tport {
148 /* SCSI protocol the tport is providing */ 149 /* SCSI protocol the tport is providing */
149 u8 tport_proto_id; 150 u8 tport_proto_id;
150 /* Binary World Wide unique Port Name for Vhost Target port */ 151 /* Binary World Wide unique Port Name for Vhost Target port */
151 u64 tport_wwpn; 152 u64 tport_wwpn;
152 /* ASCII formatted WWPN for Vhost Target port */ 153 /* ASCII formatted WWPN for Vhost Target port */
153 char tport_name[TCM_VHOST_NAMELEN]; 154 char tport_name[VHOST_SCSI_NAMELEN];
154 /* Returned by tcm_vhost_make_tport() */ 155 /* Returned by vhost_scsi_make_tport() */
155 struct se_wwn tport_wwn; 156 struct se_wwn tport_wwn;
156}; 157};
157 158
158struct tcm_vhost_evt { 159struct vhost_scsi_evt {
159 /* event to be sent to guest */ 160 /* event to be sent to guest */
160 struct virtio_scsi_event event; 161 struct virtio_scsi_event event;
161 /* event list, serviced from vhost worker thread */ 162 /* event list, serviced from vhost worker thread */
@@ -171,7 +172,9 @@ enum {
171/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */ 172/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
172enum { 173enum {
173 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | 174 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
174 (1ULL << VIRTIO_SCSI_F_T10_PI) 175 (1ULL << VIRTIO_SCSI_F_T10_PI) |
176 (1ULL << VIRTIO_F_ANY_LAYOUT) |
177 (1ULL << VIRTIO_F_VERSION_1)
175}; 178};
176 179
177#define VHOST_SCSI_MAX_TARGET 256 180#define VHOST_SCSI_MAX_TARGET 256
@@ -195,7 +198,7 @@ struct vhost_scsi_virtqueue {
195 198
196struct vhost_scsi { 199struct vhost_scsi {
197 /* Protected by vhost_scsi->dev.mutex */ 200 /* Protected by vhost_scsi->dev.mutex */
198 struct tcm_vhost_tpg **vs_tpg; 201 struct vhost_scsi_tpg **vs_tpg;
199 char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; 202 char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
200 203
201 struct vhost_dev dev; 204 struct vhost_dev dev;
@@ -212,21 +215,21 @@ struct vhost_scsi {
212}; 215};
213 216
214/* Local pointer to allocated TCM configfs fabric module */ 217/* Local pointer to allocated TCM configfs fabric module */
215static struct target_fabric_configfs *tcm_vhost_fabric_configfs; 218static struct target_fabric_configfs *vhost_scsi_fabric_configfs;
216 219
217static struct workqueue_struct *tcm_vhost_workqueue; 220static struct workqueue_struct *vhost_scsi_workqueue;
218 221
219/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */ 222/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
220static DEFINE_MUTEX(tcm_vhost_mutex); 223static DEFINE_MUTEX(vhost_scsi_mutex);
221static LIST_HEAD(tcm_vhost_list); 224static LIST_HEAD(vhost_scsi_list);
222 225
223static int iov_num_pages(struct iovec *iov) 226static int iov_num_pages(void __user *iov_base, size_t iov_len)
224{ 227{
225 return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) - 228 return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
226 ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT; 229 ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
227} 230}
228 231
229static void tcm_vhost_done_inflight(struct kref *kref) 232static void vhost_scsi_done_inflight(struct kref *kref)
230{ 233{
231 struct vhost_scsi_inflight *inflight; 234 struct vhost_scsi_inflight *inflight;
232 235
@@ -234,7 +237,7 @@ static void tcm_vhost_done_inflight(struct kref *kref)
234 complete(&inflight->comp); 237 complete(&inflight->comp);
235} 238}
236 239
237static void tcm_vhost_init_inflight(struct vhost_scsi *vs, 240static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
238 struct vhost_scsi_inflight *old_inflight[]) 241 struct vhost_scsi_inflight *old_inflight[])
239{ 242{
240 struct vhost_scsi_inflight *new_inflight; 243 struct vhost_scsi_inflight *new_inflight;
@@ -262,7 +265,7 @@ static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
262} 265}
263 266
264static struct vhost_scsi_inflight * 267static struct vhost_scsi_inflight *
265tcm_vhost_get_inflight(struct vhost_virtqueue *vq) 268vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
266{ 269{
267 struct vhost_scsi_inflight *inflight; 270 struct vhost_scsi_inflight *inflight;
268 struct vhost_scsi_virtqueue *svq; 271 struct vhost_scsi_virtqueue *svq;
@@ -274,31 +277,31 @@ tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
274 return inflight; 277 return inflight;
275} 278}
276 279
277static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight) 280static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
278{ 281{
279 kref_put(&inflight->kref, tcm_vhost_done_inflight); 282 kref_put(&inflight->kref, vhost_scsi_done_inflight);
280} 283}
281 284
282static int tcm_vhost_check_true(struct se_portal_group *se_tpg) 285static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
283{ 286{
284 return 1; 287 return 1;
285} 288}
286 289
287static int tcm_vhost_check_false(struct se_portal_group *se_tpg) 290static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
288{ 291{
289 return 0; 292 return 0;
290} 293}
291 294
292static char *tcm_vhost_get_fabric_name(void) 295static char *vhost_scsi_get_fabric_name(void)
293{ 296{
294 return "vhost"; 297 return "vhost";
295} 298}
296 299
297static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg) 300static u8 vhost_scsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
298{ 301{
299 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 302 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
300 struct tcm_vhost_tpg, se_tpg); 303 struct vhost_scsi_tpg, se_tpg);
301 struct tcm_vhost_tport *tport = tpg->tport; 304 struct vhost_scsi_tport *tport = tpg->tport;
302 305
303 switch (tport->tport_proto_id) { 306 switch (tport->tport_proto_id) {
304 case SCSI_PROTOCOL_SAS: 307 case SCSI_PROTOCOL_SAS:
@@ -316,37 +319,37 @@ static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
316 return sas_get_fabric_proto_ident(se_tpg); 319 return sas_get_fabric_proto_ident(se_tpg);
317} 320}
318 321
319static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg) 322static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
320{ 323{
321 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 324 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
322 struct tcm_vhost_tpg, se_tpg); 325 struct vhost_scsi_tpg, se_tpg);
323 struct tcm_vhost_tport *tport = tpg->tport; 326 struct vhost_scsi_tport *tport = tpg->tport;
324 327
325 return &tport->tport_name[0]; 328 return &tport->tport_name[0];
326} 329}
327 330
328static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg) 331static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
329{ 332{
330 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 333 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
331 struct tcm_vhost_tpg, se_tpg); 334 struct vhost_scsi_tpg, se_tpg);
332 return tpg->tport_tpgt; 335 return tpg->tport_tpgt;
333} 336}
334 337
335static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg) 338static u32 vhost_scsi_get_default_depth(struct se_portal_group *se_tpg)
336{ 339{
337 return 1; 340 return 1;
338} 341}
339 342
340static u32 343static u32
341tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg, 344vhost_scsi_get_pr_transport_id(struct se_portal_group *se_tpg,
342 struct se_node_acl *se_nacl, 345 struct se_node_acl *se_nacl,
343 struct t10_pr_registration *pr_reg, 346 struct t10_pr_registration *pr_reg,
344 int *format_code, 347 int *format_code,
345 unsigned char *buf) 348 unsigned char *buf)
346{ 349{
347 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 350 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
348 struct tcm_vhost_tpg, se_tpg); 351 struct vhost_scsi_tpg, se_tpg);
349 struct tcm_vhost_tport *tport = tpg->tport; 352 struct vhost_scsi_tport *tport = tpg->tport;
350 353
351 switch (tport->tport_proto_id) { 354 switch (tport->tport_proto_id) {
352 case SCSI_PROTOCOL_SAS: 355 case SCSI_PROTOCOL_SAS:
@@ -369,14 +372,14 @@ tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
369} 372}
370 373
371static u32 374static u32
372tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg, 375vhost_scsi_get_pr_transport_id_len(struct se_portal_group *se_tpg,
373 struct se_node_acl *se_nacl, 376 struct se_node_acl *se_nacl,
374 struct t10_pr_registration *pr_reg, 377 struct t10_pr_registration *pr_reg,
375 int *format_code) 378 int *format_code)
376{ 379{
377 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 380 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
378 struct tcm_vhost_tpg, se_tpg); 381 struct vhost_scsi_tpg, se_tpg);
379 struct tcm_vhost_tport *tport = tpg->tport; 382 struct vhost_scsi_tport *tport = tpg->tport;
380 383
381 switch (tport->tport_proto_id) { 384 switch (tport->tport_proto_id) {
382 case SCSI_PROTOCOL_SAS: 385 case SCSI_PROTOCOL_SAS:
@@ -399,14 +402,14 @@ tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
399} 402}
400 403
401static char * 404static char *
402tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg, 405vhost_scsi_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
403 const char *buf, 406 const char *buf,
404 u32 *out_tid_len, 407 u32 *out_tid_len,
405 char **port_nexus_ptr) 408 char **port_nexus_ptr)
406{ 409{
407 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 410 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
408 struct tcm_vhost_tpg, se_tpg); 411 struct vhost_scsi_tpg, se_tpg);
409 struct tcm_vhost_tport *tport = tpg->tport; 412 struct vhost_scsi_tport *tport = tpg->tport;
410 413
411 switch (tport->tport_proto_id) { 414 switch (tport->tport_proto_id) {
412 case SCSI_PROTOCOL_SAS: 415 case SCSI_PROTOCOL_SAS:
@@ -429,13 +432,13 @@ tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
429} 432}
430 433
431static struct se_node_acl * 434static struct se_node_acl *
432tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg) 435vhost_scsi_alloc_fabric_acl(struct se_portal_group *se_tpg)
433{ 436{
434 struct tcm_vhost_nacl *nacl; 437 struct vhost_scsi_nacl *nacl;
435 438
436 nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL); 439 nacl = kzalloc(sizeof(struct vhost_scsi_nacl), GFP_KERNEL);
437 if (!nacl) { 440 if (!nacl) {
438 pr_err("Unable to allocate struct tcm_vhost_nacl\n"); 441 pr_err("Unable to allocate struct vhost_scsi_nacl\n");
439 return NULL; 442 return NULL;
440 } 443 }
441 444
@@ -443,24 +446,24 @@ tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
443} 446}
444 447
445static void 448static void
446tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg, 449vhost_scsi_release_fabric_acl(struct se_portal_group *se_tpg,
447 struct se_node_acl *se_nacl) 450 struct se_node_acl *se_nacl)
448{ 451{
449 struct tcm_vhost_nacl *nacl = container_of(se_nacl, 452 struct vhost_scsi_nacl *nacl = container_of(se_nacl,
450 struct tcm_vhost_nacl, se_node_acl); 453 struct vhost_scsi_nacl, se_node_acl);
451 kfree(nacl); 454 kfree(nacl);
452} 455}
453 456
454static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg) 457static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
455{ 458{
456 return 1; 459 return 1;
457} 460}
458 461
459static void tcm_vhost_release_cmd(struct se_cmd *se_cmd) 462static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
460{ 463{
461 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, 464 struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
462 struct tcm_vhost_cmd, tvc_se_cmd); 465 struct vhost_scsi_cmd, tvc_se_cmd);
463 struct se_session *se_sess = se_cmd->se_sess; 466 struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
464 int i; 467 int i;
465 468
466 if (tv_cmd->tvc_sgl_count) { 469 if (tv_cmd->tvc_sgl_count) {
@@ -472,53 +475,53 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
472 put_page(sg_page(&tv_cmd->tvc_prot_sgl[i])); 475 put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
473 } 476 }
474 477
475 tcm_vhost_put_inflight(tv_cmd->inflight); 478 vhost_scsi_put_inflight(tv_cmd->inflight);
476 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); 479 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
477} 480}
478 481
479static int tcm_vhost_shutdown_session(struct se_session *se_sess) 482static int vhost_scsi_shutdown_session(struct se_session *se_sess)
480{ 483{
481 return 0; 484 return 0;
482} 485}
483 486
484static void tcm_vhost_close_session(struct se_session *se_sess) 487static void vhost_scsi_close_session(struct se_session *se_sess)
485{ 488{
486 return; 489 return;
487} 490}
488 491
489static u32 tcm_vhost_sess_get_index(struct se_session *se_sess) 492static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
490{ 493{
491 return 0; 494 return 0;
492} 495}
493 496
494static int tcm_vhost_write_pending(struct se_cmd *se_cmd) 497static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
495{ 498{
496 /* Go ahead and process the write immediately */ 499 /* Go ahead and process the write immediately */
497 target_execute_cmd(se_cmd); 500 target_execute_cmd(se_cmd);
498 return 0; 501 return 0;
499} 502}
500 503
501static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd) 504static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd)
502{ 505{
503 return 0; 506 return 0;
504} 507}
505 508
506static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl) 509static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
507{ 510{
508 return; 511 return;
509} 512}
510 513
511static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd) 514static u32 vhost_scsi_get_task_tag(struct se_cmd *se_cmd)
512{ 515{
513 return 0; 516 return 0;
514} 517}
515 518
516static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd) 519static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
517{ 520{
518 return 0; 521 return 0;
519} 522}
520 523
521static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd) 524static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
522{ 525{
523 struct vhost_scsi *vs = cmd->tvc_vhost; 526 struct vhost_scsi *vs = cmd->tvc_vhost;
524 527
@@ -527,44 +530,44 @@ static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
527 vhost_work_queue(&vs->dev, &vs->vs_completion_work); 530 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
528} 531}
529 532
530static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd) 533static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
531{ 534{
532 struct tcm_vhost_cmd *cmd = container_of(se_cmd, 535 struct vhost_scsi_cmd *cmd = container_of(se_cmd,
533 struct tcm_vhost_cmd, tvc_se_cmd); 536 struct vhost_scsi_cmd, tvc_se_cmd);
534 vhost_scsi_complete_cmd(cmd); 537 vhost_scsi_complete_cmd(cmd);
535 return 0; 538 return 0;
536} 539}
537 540
538static int tcm_vhost_queue_status(struct se_cmd *se_cmd) 541static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
539{ 542{
540 struct tcm_vhost_cmd *cmd = container_of(se_cmd, 543 struct vhost_scsi_cmd *cmd = container_of(se_cmd,
541 struct tcm_vhost_cmd, tvc_se_cmd); 544 struct vhost_scsi_cmd, tvc_se_cmd);
542 vhost_scsi_complete_cmd(cmd); 545 vhost_scsi_complete_cmd(cmd);
543 return 0; 546 return 0;
544} 547}
545 548
546static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd) 549static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
547{ 550{
548 return; 551 return;
549} 552}
550 553
551static void tcm_vhost_aborted_task(struct se_cmd *se_cmd) 554static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
552{ 555{
553 return; 556 return;
554} 557}
555 558
556static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) 559static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
557{ 560{
558 vs->vs_events_nr--; 561 vs->vs_events_nr--;
559 kfree(evt); 562 kfree(evt);
560} 563}
561 564
562static struct tcm_vhost_evt * 565static struct vhost_scsi_evt *
563tcm_vhost_allocate_evt(struct vhost_scsi *vs, 566vhost_scsi_allocate_evt(struct vhost_scsi *vs,
564 u32 event, u32 reason) 567 u32 event, u32 reason)
565{ 568{
566 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 569 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
567 struct tcm_vhost_evt *evt; 570 struct vhost_scsi_evt *evt;
568 571
569 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { 572 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
570 vs->vs_events_missed = true; 573 vs->vs_events_missed = true;
@@ -573,7 +576,7 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs,
573 576
574 evt = kzalloc(sizeof(*evt), GFP_KERNEL); 577 evt = kzalloc(sizeof(*evt), GFP_KERNEL);
575 if (!evt) { 578 if (!evt) {
576 vq_err(vq, "Failed to allocate tcm_vhost_evt\n"); 579 vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
577 vs->vs_events_missed = true; 580 vs->vs_events_missed = true;
578 return NULL; 581 return NULL;
579 } 582 }
@@ -585,7 +588,7 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs,
585 return evt; 588 return evt;
586} 589}
587 590
588static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd) 591static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
589{ 592{
590 struct se_cmd *se_cmd = &cmd->tvc_se_cmd; 593 struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
591 594
@@ -600,7 +603,7 @@ static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
600} 603}
601 604
602static void 605static void
603tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) 606vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
604{ 607{
605 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 608 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
606 struct virtio_scsi_event *event = &evt->event; 609 struct virtio_scsi_event *event = &evt->event;
@@ -646,24 +649,24 @@ again:
646 if (!ret) 649 if (!ret)
647 vhost_add_used_and_signal(&vs->dev, vq, head, 0); 650 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
648 else 651 else
649 vq_err(vq, "Faulted on tcm_vhost_send_event\n"); 652 vq_err(vq, "Faulted on vhost_scsi_send_event\n");
650} 653}
651 654
652static void tcm_vhost_evt_work(struct vhost_work *work) 655static void vhost_scsi_evt_work(struct vhost_work *work)
653{ 656{
654 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, 657 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
655 vs_event_work); 658 vs_event_work);
656 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 659 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
657 struct tcm_vhost_evt *evt; 660 struct vhost_scsi_evt *evt;
658 struct llist_node *llnode; 661 struct llist_node *llnode;
659 662
660 mutex_lock(&vq->mutex); 663 mutex_lock(&vq->mutex);
661 llnode = llist_del_all(&vs->vs_event_list); 664 llnode = llist_del_all(&vs->vs_event_list);
662 while (llnode) { 665 while (llnode) {
663 evt = llist_entry(llnode, struct tcm_vhost_evt, list); 666 evt = llist_entry(llnode, struct vhost_scsi_evt, list);
664 llnode = llist_next(llnode); 667 llnode = llist_next(llnode);
665 tcm_vhost_do_evt_work(vs, evt); 668 vhost_scsi_do_evt_work(vs, evt);
666 tcm_vhost_free_evt(vs, evt); 669 vhost_scsi_free_evt(vs, evt);
667 } 670 }
668 mutex_unlock(&vq->mutex); 671 mutex_unlock(&vq->mutex);
669} 672}
@@ -679,15 +682,16 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
679 vs_completion_work); 682 vs_completion_work);
680 DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ); 683 DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
681 struct virtio_scsi_cmd_resp v_rsp; 684 struct virtio_scsi_cmd_resp v_rsp;
682 struct tcm_vhost_cmd *cmd; 685 struct vhost_scsi_cmd *cmd;
683 struct llist_node *llnode; 686 struct llist_node *llnode;
684 struct se_cmd *se_cmd; 687 struct se_cmd *se_cmd;
688 struct iov_iter iov_iter;
685 int ret, vq; 689 int ret, vq;
686 690
687 bitmap_zero(signal, VHOST_SCSI_MAX_VQ); 691 bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
688 llnode = llist_del_all(&vs->vs_completion_list); 692 llnode = llist_del_all(&vs->vs_completion_list);
689 while (llnode) { 693 while (llnode) {
690 cmd = llist_entry(llnode, struct tcm_vhost_cmd, 694 cmd = llist_entry(llnode, struct vhost_scsi_cmd,
691 tvc_completion_list); 695 tvc_completion_list);
692 llnode = llist_next(llnode); 696 llnode = llist_next(llnode);
693 se_cmd = &cmd->tvc_se_cmd; 697 se_cmd = &cmd->tvc_se_cmd;
@@ -703,8 +707,11 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
703 se_cmd->scsi_sense_length); 707 se_cmd->scsi_sense_length);
704 memcpy(v_rsp.sense, cmd->tvc_sense_buf, 708 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
705 se_cmd->scsi_sense_length); 709 se_cmd->scsi_sense_length);
706 ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp)); 710
707 if (likely(ret == 0)) { 711 iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
712 cmd->tvc_in_iovs, sizeof(v_rsp));
713 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
714 if (likely(ret == sizeof(v_rsp))) {
708 struct vhost_scsi_virtqueue *q; 715 struct vhost_scsi_virtqueue *q;
709 vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0); 716 vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
710 q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); 717 q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
@@ -722,13 +729,13 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
722 vhost_signal(&vs->dev, &vs->vqs[vq].vq); 729 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
723} 730}
724 731
725static struct tcm_vhost_cmd * 732static struct vhost_scsi_cmd *
726vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg, 733vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
727 unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr, 734 unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
728 u32 exp_data_len, int data_direction) 735 u32 exp_data_len, int data_direction)
729{ 736{
730 struct tcm_vhost_cmd *cmd; 737 struct vhost_scsi_cmd *cmd;
731 struct tcm_vhost_nexus *tv_nexus; 738 struct vhost_scsi_nexus *tv_nexus;
732 struct se_session *se_sess; 739 struct se_session *se_sess;
733 struct scatterlist *sg, *prot_sg; 740 struct scatterlist *sg, *prot_sg;
734 struct page **pages; 741 struct page **pages;
@@ -736,22 +743,22 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
736 743
737 tv_nexus = tpg->tpg_nexus; 744 tv_nexus = tpg->tpg_nexus;
738 if (!tv_nexus) { 745 if (!tv_nexus) {
739 pr_err("Unable to locate active struct tcm_vhost_nexus\n"); 746 pr_err("Unable to locate active struct vhost_scsi_nexus\n");
740 return ERR_PTR(-EIO); 747 return ERR_PTR(-EIO);
741 } 748 }
742 se_sess = tv_nexus->tvn_se_sess; 749 se_sess = tv_nexus->tvn_se_sess;
743 750
744 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); 751 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
745 if (tag < 0) { 752 if (tag < 0) {
746 pr_err("Unable to obtain tag for tcm_vhost_cmd\n"); 753 pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
747 return ERR_PTR(-ENOMEM); 754 return ERR_PTR(-ENOMEM);
748 } 755 }
749 756
750 cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag]; 757 cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
751 sg = cmd->tvc_sgl; 758 sg = cmd->tvc_sgl;
752 prot_sg = cmd->tvc_prot_sgl; 759 prot_sg = cmd->tvc_prot_sgl;
753 pages = cmd->tvc_upages; 760 pages = cmd->tvc_upages;
754 memset(cmd, 0, sizeof(struct tcm_vhost_cmd)); 761 memset(cmd, 0, sizeof(struct vhost_scsi_cmd));
755 762
756 cmd->tvc_sgl = sg; 763 cmd->tvc_sgl = sg;
757 cmd->tvc_prot_sgl = prot_sg; 764 cmd->tvc_prot_sgl = prot_sg;
@@ -763,9 +770,9 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
763 cmd->tvc_exp_data_len = exp_data_len; 770 cmd->tvc_exp_data_len = exp_data_len;
764 cmd->tvc_data_direction = data_direction; 771 cmd->tvc_data_direction = data_direction;
765 cmd->tvc_nexus = tv_nexus; 772 cmd->tvc_nexus = tv_nexus;
766 cmd->inflight = tcm_vhost_get_inflight(vq); 773 cmd->inflight = vhost_scsi_get_inflight(vq);
767 774
768 memcpy(cmd->tvc_cdb, cdb, TCM_VHOST_MAX_CDB_SIZE); 775 memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
769 776
770 return cmd; 777 return cmd;
771} 778}
@@ -776,29 +783,22 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
776 * Returns the number of scatterlist entries used or -errno on error. 783 * Returns the number of scatterlist entries used or -errno on error.
777 */ 784 */
778static int 785static int
779vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd, 786vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
787 void __user *ptr,
788 size_t len,
780 struct scatterlist *sgl, 789 struct scatterlist *sgl,
781 unsigned int sgl_count,
782 struct iovec *iov,
783 struct page **pages,
784 bool write) 790 bool write)
785{ 791{
786 unsigned int npages = 0, pages_nr, offset, nbytes; 792 unsigned int npages = 0, offset, nbytes;
793 unsigned int pages_nr = iov_num_pages(ptr, len);
787 struct scatterlist *sg = sgl; 794 struct scatterlist *sg = sgl;
788 void __user *ptr = iov->iov_base; 795 struct page **pages = cmd->tvc_upages;
789 size_t len = iov->iov_len;
790 int ret, i; 796 int ret, i;
791 797
792 pages_nr = iov_num_pages(iov); 798 if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) {
793 if (pages_nr > sgl_count) {
794 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
795 " sgl_count: %u\n", pages_nr, sgl_count);
796 return -ENOBUFS;
797 }
798 if (pages_nr > TCM_VHOST_PREALLOC_UPAGES) {
799 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than" 799 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
800 " preallocated TCM_VHOST_PREALLOC_UPAGES: %u\n", 800 " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n",
801 pages_nr, TCM_VHOST_PREALLOC_UPAGES); 801 pages_nr, VHOST_SCSI_PREALLOC_UPAGES);
802 return -ENOBUFS; 802 return -ENOBUFS;
803 } 803 }
804 804
@@ -829,84 +829,94 @@ out:
829} 829}
830 830
831static int 831static int
832vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd, 832vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
833 struct iovec *iov,
834 int niov,
835 bool write)
836{ 833{
837 struct scatterlist *sg = cmd->tvc_sgl; 834 int sgl_count = 0;
838 unsigned int sgl_count = 0;
839 int ret, i;
840 835
841 for (i = 0; i < niov; i++) 836 if (!iter || !iter->iov) {
842 sgl_count += iov_num_pages(&iov[i]); 837 pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
838 " present\n", __func__, bytes);
839 return -EINVAL;
840 }
843 841
844 if (sgl_count > TCM_VHOST_PREALLOC_SGLS) { 842 sgl_count = iov_iter_npages(iter, 0xffff);
845 pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than" 843 if (sgl_count > max_sgls) {
846 " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n", 844 pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
847 sgl_count, TCM_VHOST_PREALLOC_SGLS); 845 " max_sgls: %d\n", __func__, sgl_count, max_sgls);
848 return -ENOBUFS; 846 return -EINVAL;
849 } 847 }
848 return sgl_count;
849}
850 850
851 pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count); 851static int
852 sg_init_table(sg, sgl_count); 852vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
853 cmd->tvc_sgl_count = sgl_count; 853 struct iov_iter *iter,
854 struct scatterlist *sg, int sg_count)
855{
856 size_t off = iter->iov_offset;
857 int i, ret;
854 858
855 pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count); 859 for (i = 0; i < iter->nr_segs; i++) {
860 void __user *base = iter->iov[i].iov_base + off;
861 size_t len = iter->iov[i].iov_len - off;
856 862
857 for (i = 0; i < niov; i++) { 863 ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
858 ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
859 cmd->tvc_upages, write);
860 if (ret < 0) { 864 if (ret < 0) {
861 for (i = 0; i < cmd->tvc_sgl_count; i++) 865 for (i = 0; i < sg_count; i++) {
862 put_page(sg_page(&cmd->tvc_sgl[i])); 866 struct page *page = sg_page(&sg[i]);
863 867 if (page)
864 cmd->tvc_sgl_count = 0; 868 put_page(page);
869 }
865 return ret; 870 return ret;
866 } 871 }
867 sg += ret; 872 sg += ret;
868 sgl_count -= ret; 873 off = 0;
869 } 874 }
870 return 0; 875 return 0;
871} 876}
872 877
873static int 878static int
874vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd, 879vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
875 struct iovec *iov, 880 size_t prot_bytes, struct iov_iter *prot_iter,
876 int niov, 881 size_t data_bytes, struct iov_iter *data_iter)
877 bool write) 882{
878{ 883 int sgl_count, ret;
879 struct scatterlist *prot_sg = cmd->tvc_prot_sgl; 884 bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
880 unsigned int prot_sgl_count = 0; 885
881 int ret, i; 886 if (prot_bytes) {
882 887 sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
883 for (i = 0; i < niov; i++) 888 VHOST_SCSI_PREALLOC_PROT_SGLS);
884 prot_sgl_count += iov_num_pages(&iov[i]); 889 if (sgl_count < 0)
885 890 return sgl_count;
886 if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) { 891
887 pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than" 892 sg_init_table(cmd->tvc_prot_sgl, sgl_count);
888 " preallocated TCM_VHOST_PREALLOC_PROT_SGLS: %u\n", 893 cmd->tvc_prot_sgl_count = sgl_count;
889 prot_sgl_count, TCM_VHOST_PREALLOC_PROT_SGLS); 894 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
890 return -ENOBUFS; 895 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
891 } 896
892 897 ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
893 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__, 898 cmd->tvc_prot_sgl,
894 prot_sg, prot_sgl_count); 899 cmd->tvc_prot_sgl_count);
895 sg_init_table(prot_sg, prot_sgl_count);
896 cmd->tvc_prot_sgl_count = prot_sgl_count;
897
898 for (i = 0; i < niov; i++) {
899 ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i],
900 cmd->tvc_upages, write);
901 if (ret < 0) { 900 if (ret < 0) {
902 for (i = 0; i < cmd->tvc_prot_sgl_count; i++)
903 put_page(sg_page(&cmd->tvc_prot_sgl[i]));
904
905 cmd->tvc_prot_sgl_count = 0; 901 cmd->tvc_prot_sgl_count = 0;
906 return ret; 902 return ret;
907 } 903 }
908 prot_sg += ret; 904 }
909 prot_sgl_count -= ret; 905 sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
906 VHOST_SCSI_PREALLOC_SGLS);
907 if (sgl_count < 0)
908 return sgl_count;
909
910 sg_init_table(cmd->tvc_sgl, sgl_count);
911 cmd->tvc_sgl_count = sgl_count;
912 pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
913 cmd->tvc_sgl, cmd->tvc_sgl_count);
914
915 ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
916 cmd->tvc_sgl, cmd->tvc_sgl_count);
917 if (ret < 0) {
918 cmd->tvc_sgl_count = 0;
919 return ret;
910 } 920 }
911 return 0; 921 return 0;
912} 922}
@@ -928,11 +938,11 @@ static int vhost_scsi_to_tcm_attr(int attr)
928 return TCM_SIMPLE_TAG; 938 return TCM_SIMPLE_TAG;
929} 939}
930 940
931static void tcm_vhost_submission_work(struct work_struct *work) 941static void vhost_scsi_submission_work(struct work_struct *work)
932{ 942{
933 struct tcm_vhost_cmd *cmd = 943 struct vhost_scsi_cmd *cmd =
934 container_of(work, struct tcm_vhost_cmd, work); 944 container_of(work, struct vhost_scsi_cmd, work);
935 struct tcm_vhost_nexus *tv_nexus; 945 struct vhost_scsi_nexus *tv_nexus;
936 struct se_cmd *se_cmd = &cmd->tvc_se_cmd; 946 struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
937 struct scatterlist *sg_ptr, *sg_prot_ptr = NULL; 947 struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
938 int rc; 948 int rc;
@@ -986,19 +996,20 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs,
986static void 996static void
987vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) 997vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
988{ 998{
989 struct tcm_vhost_tpg **vs_tpg; 999 struct vhost_scsi_tpg **vs_tpg, *tpg;
990 struct virtio_scsi_cmd_req v_req; 1000 struct virtio_scsi_cmd_req v_req;
991 struct virtio_scsi_cmd_req_pi v_req_pi; 1001 struct virtio_scsi_cmd_req_pi v_req_pi;
992 struct tcm_vhost_tpg *tpg; 1002 struct vhost_scsi_cmd *cmd;
993 struct tcm_vhost_cmd *cmd; 1003 struct iov_iter out_iter, in_iter, prot_iter, data_iter;
994 u64 tag; 1004 u64 tag;
995 u32 exp_data_len, data_first, data_num, data_direction, prot_first; 1005 u32 exp_data_len, data_direction;
996 unsigned out, in, i; 1006 unsigned out, in;
997 int head, ret, data_niov, prot_niov, prot_bytes; 1007 int head, ret, prot_bytes;
998 size_t req_size; 1008 size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
1009 size_t out_size, in_size;
999 u16 lun; 1010 u16 lun;
1000 u8 *target, *lunp, task_attr; 1011 u8 *target, *lunp, task_attr;
1001 bool hdr_pi; 1012 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
1002 void *req, *cdb; 1013 void *req, *cdb;
1003 1014
1004 mutex_lock(&vq->mutex); 1015 mutex_lock(&vq->mutex);
@@ -1014,10 +1025,10 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1014 1025
1015 for (;;) { 1026 for (;;) {
1016 head = vhost_get_vq_desc(vq, vq->iov, 1027 head = vhost_get_vq_desc(vq, vq->iov,
1017 ARRAY_SIZE(vq->iov), &out, &in, 1028 ARRAY_SIZE(vq->iov), &out, &in,
1018 NULL, NULL); 1029 NULL, NULL);
1019 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", 1030 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
1020 head, out, in); 1031 head, out, in);
1021 /* On error, stop handling until the next kick. */ 1032 /* On error, stop handling until the next kick. */
1022 if (unlikely(head < 0)) 1033 if (unlikely(head < 0))
1023 break; 1034 break;
@@ -1029,113 +1040,134 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1029 } 1040 }
1030 break; 1041 break;
1031 } 1042 }
1032
1033 /* FIXME: BIDI operation */
1034 if (out == 1 && in == 1) {
1035 data_direction = DMA_NONE;
1036 data_first = 0;
1037 data_num = 0;
1038 } else if (out == 1 && in > 1) {
1039 data_direction = DMA_FROM_DEVICE;
1040 data_first = out + 1;
1041 data_num = in - 1;
1042 } else if (out > 1 && in == 1) {
1043 data_direction = DMA_TO_DEVICE;
1044 data_first = 1;
1045 data_num = out - 1;
1046 } else {
1047 vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
1048 out, in);
1049 break;
1050 }
1051
1052 /* 1043 /*
1053 * Check for a sane resp buffer so we can report errors to 1044 * Check for a sane response buffer so we can report early
1054 * the guest. 1045 * errors back to the guest.
1055 */ 1046 */
1056 if (unlikely(vq->iov[out].iov_len != 1047 if (unlikely(vq->iov[out].iov_len < rsp_size)) {
1057 sizeof(struct virtio_scsi_cmd_resp))) { 1048 vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
1058 vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" 1049 " size, got %zu bytes\n", vq->iov[out].iov_len);
1059 " bytes\n", vq->iov[out].iov_len);
1060 break; 1050 break;
1061 } 1051 }
1062 1052 /*
1063 if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI)) { 1053 * Setup pointers and values based upon different virtio-scsi
1054 * request header if T10_PI is enabled in KVM guest.
1055 */
1056 if (t10_pi) {
1064 req = &v_req_pi; 1057 req = &v_req_pi;
1058 req_size = sizeof(v_req_pi);
1065 lunp = &v_req_pi.lun[0]; 1059 lunp = &v_req_pi.lun[0];
1066 target = &v_req_pi.lun[1]; 1060 target = &v_req_pi.lun[1];
1067 req_size = sizeof(v_req_pi);
1068 hdr_pi = true;
1069 } else { 1061 } else {
1070 req = &v_req; 1062 req = &v_req;
1063 req_size = sizeof(v_req);
1071 lunp = &v_req.lun[0]; 1064 lunp = &v_req.lun[0];
1072 target = &v_req.lun[1]; 1065 target = &v_req.lun[1];
1073 req_size = sizeof(v_req);
1074 hdr_pi = false;
1075 } 1066 }
1067 /*
1068 * FIXME: Not correct for BIDI operation
1069 */
1070 out_size = iov_length(vq->iov, out);
1071 in_size = iov_length(&vq->iov[out], in);
1076 1072
1077 if (unlikely(vq->iov[0].iov_len < req_size)) { 1073 /*
1078 pr_err("Expecting virtio-scsi header: %zu, got %zu\n", 1074 * Copy over the virtio-scsi request header, which for a
1079 req_size, vq->iov[0].iov_len); 1075 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
1080 break; 1076 * single iovec may contain both the header + outgoing
1081 } 1077 * WRITE payloads.
1082 ret = copy_from_user(req, vq->iov[0].iov_base, req_size); 1078 *
1083 if (unlikely(ret)) { 1079 * copy_from_iter() will advance out_iter, so that it will
1084 vq_err(vq, "Faulted on virtio_scsi_cmd_req\n"); 1080 * point at the start of the outgoing WRITE payload, if
1085 break; 1081 * DMA_TO_DEVICE is set.
1086 } 1082 */
1083 iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
1087 1084
1085 ret = copy_from_iter(req, req_size, &out_iter);
1086 if (unlikely(ret != req_size)) {
1087 vq_err(vq, "Faulted on copy_from_iter\n");
1088 vhost_scsi_send_bad_target(vs, vq, head, out);
1089 continue;
1090 }
1088 /* virtio-scsi spec requires byte 0 of the lun to be 1 */ 1091 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
1089 if (unlikely(*lunp != 1)) { 1092 if (unlikely(*lunp != 1)) {
1093 vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
1090 vhost_scsi_send_bad_target(vs, vq, head, out); 1094 vhost_scsi_send_bad_target(vs, vq, head, out);
1091 continue; 1095 continue;
1092 } 1096 }
1093 1097
1094 tpg = ACCESS_ONCE(vs_tpg[*target]); 1098 tpg = ACCESS_ONCE(vs_tpg[*target]);
1095
1096 /* Target does not exist, fail the request */
1097 if (unlikely(!tpg)) { 1099 if (unlikely(!tpg)) {
1100 /* Target does not exist, fail the request */
1098 vhost_scsi_send_bad_target(vs, vq, head, out); 1101 vhost_scsi_send_bad_target(vs, vq, head, out);
1099 continue; 1102 continue;
1100 } 1103 }
1101
1102 data_niov = data_num;
1103 prot_niov = prot_first = prot_bytes = 0;
1104 /* 1104 /*
1105 * Determine if any protection information iovecs are preceeding 1105 * Determine data_direction by calculating the total outgoing
1106 * the actual data payload, and adjust data_first + data_niov 1106 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
1107 * values accordingly for vhost_scsi_map_iov_to_sgl() below. 1107 * response headers respectively.
1108 * 1108 *
1109 * Also extract virtio_scsi header bits for vhost_scsi_get_tag() 1109 * For DMA_TO_DEVICE this is out_iter, which is already pointing
1110 * to the right place.
1111 *
1112 * For DMA_FROM_DEVICE, the iovec will be just past the end
1113 * of the virtio-scsi response header in either the same
1114 * or immediately following iovec.
1115 *
1116 * Any associated T10_PI bytes for the outgoing / incoming
1117 * payloads are included in calculation of exp_data_len here.
1110 */ 1118 */
1111 if (hdr_pi) { 1119 prot_bytes = 0;
1120
1121 if (out_size > req_size) {
1122 data_direction = DMA_TO_DEVICE;
1123 exp_data_len = out_size - req_size;
1124 data_iter = out_iter;
1125 } else if (in_size > rsp_size) {
1126 data_direction = DMA_FROM_DEVICE;
1127 exp_data_len = in_size - rsp_size;
1128
1129 iov_iter_init(&in_iter, READ, &vq->iov[out], in,
1130 rsp_size + exp_data_len);
1131 iov_iter_advance(&in_iter, rsp_size);
1132 data_iter = in_iter;
1133 } else {
1134 data_direction = DMA_NONE;
1135 exp_data_len = 0;
1136 }
1137 /*
1138 * If T10_PI header + payload is present, setup prot_iter values
1139 * and recalculate data_iter for vhost_scsi_mapal() mapping to
1140 * host scatterlists via get_user_pages_fast().
1141 */
1142 if (t10_pi) {
1112 if (v_req_pi.pi_bytesout) { 1143 if (v_req_pi.pi_bytesout) {
1113 if (data_direction != DMA_TO_DEVICE) { 1144 if (data_direction != DMA_TO_DEVICE) {
1114 vq_err(vq, "Received non zero do_pi_niov" 1145 vq_err(vq, "Received non zero pi_bytesout,"
1115 ", but wrong data_direction\n"); 1146 " but wrong data_direction\n");
1116 goto err_cmd; 1147 vhost_scsi_send_bad_target(vs, vq, head, out);
1148 continue;
1117 } 1149 }
1118 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout); 1150 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1119 } else if (v_req_pi.pi_bytesin) { 1151 } else if (v_req_pi.pi_bytesin) {
1120 if (data_direction != DMA_FROM_DEVICE) { 1152 if (data_direction != DMA_FROM_DEVICE) {
1121 vq_err(vq, "Received non zero di_pi_niov" 1153 vq_err(vq, "Received non zero pi_bytesin,"
1122 ", but wrong data_direction\n"); 1154 " but wrong data_direction\n");
1123 goto err_cmd; 1155 vhost_scsi_send_bad_target(vs, vq, head, out);
1156 continue;
1124 } 1157 }
1125 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); 1158 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1126 } 1159 }
1160 /*
1161 * Set prot_iter to data_iter, and advance past any
1162 * preceeding prot_bytes that may be present.
1163 *
1164 * Also fix up the exp_data_len to reflect only the
1165 * actual data payload length.
1166 */
1127 if (prot_bytes) { 1167 if (prot_bytes) {
1128 int tmp = 0; 1168 exp_data_len -= prot_bytes;
1129 1169 prot_iter = data_iter;
1130 for (i = 0; i < data_num; i++) { 1170 iov_iter_advance(&data_iter, prot_bytes);
1131 tmp += vq->iov[data_first + i].iov_len;
1132 prot_niov++;
1133 if (tmp >= prot_bytes)
1134 break;
1135 }
1136 prot_first = data_first;
1137 data_first += prot_niov;
1138 data_niov = data_num - prot_niov;
1139 } 1171 }
1140 tag = vhost64_to_cpu(vq, v_req_pi.tag); 1172 tag = vhost64_to_cpu(vq, v_req_pi.tag);
1141 task_attr = v_req_pi.task_attr; 1173 task_attr = v_req_pi.task_attr;
@@ -1147,83 +1179,65 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1147 cdb = &v_req.cdb[0]; 1179 cdb = &v_req.cdb[0];
1148 lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; 1180 lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1149 } 1181 }
1150 exp_data_len = 0;
1151 for (i = 0; i < data_niov; i++)
1152 exp_data_len += vq->iov[data_first + i].iov_len;
1153 /* 1182 /*
1154 * Check that the recieved CDB size does not exceeded our 1183 * Check that the received CDB size does not exceeded our
1155 * hardcoded max for vhost-scsi 1184 * hardcoded max for vhost-scsi, then get a pre-allocated
1185 * cmd descriptor for the new virtio-scsi tag.
1156 * 1186 *
1157 * TODO what if cdb was too small for varlen cdb header? 1187 * TODO what if cdb was too small for varlen cdb header?
1158 */ 1188 */
1159 if (unlikely(scsi_command_size(cdb) > TCM_VHOST_MAX_CDB_SIZE)) { 1189 if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1160 vq_err(vq, "Received SCSI CDB with command_size: %d that" 1190 vq_err(vq, "Received SCSI CDB with command_size: %d that"
1161 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1191 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1162 scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE); 1192 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1163 goto err_cmd; 1193 vhost_scsi_send_bad_target(vs, vq, head, out);
1194 continue;
1164 } 1195 }
1165
1166 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr, 1196 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1167 exp_data_len + prot_bytes, 1197 exp_data_len + prot_bytes,
1168 data_direction); 1198 data_direction);
1169 if (IS_ERR(cmd)) { 1199 if (IS_ERR(cmd)) {
1170 vq_err(vq, "vhost_scsi_get_tag failed %ld\n", 1200 vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1171 PTR_ERR(cmd)); 1201 PTR_ERR(cmd));
1172 goto err_cmd; 1202 vhost_scsi_send_bad_target(vs, vq, head, out);
1203 continue;
1173 } 1204 }
1174
1175 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
1176 ": %d\n", cmd, exp_data_len, data_direction);
1177
1178 cmd->tvc_vhost = vs; 1205 cmd->tvc_vhost = vs;
1179 cmd->tvc_vq = vq; 1206 cmd->tvc_vq = vq;
1180 cmd->tvc_resp = vq->iov[out].iov_base; 1207 cmd->tvc_resp_iov = &vq->iov[out];
1208 cmd->tvc_in_iovs = in;
1181 1209
1182 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", 1210 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1183 cmd->tvc_cdb[0], cmd->tvc_lun); 1211 cmd->tvc_cdb[0], cmd->tvc_lun);
1212 pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1213 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1184 1214
1185 if (prot_niov) {
1186 ret = vhost_scsi_map_iov_to_prot(cmd,
1187 &vq->iov[prot_first], prot_niov,
1188 data_direction == DMA_FROM_DEVICE);
1189 if (unlikely(ret)) {
1190 vq_err(vq, "Failed to map iov to"
1191 " prot_sgl\n");
1192 goto err_free;
1193 }
1194 }
1195 if (data_direction != DMA_NONE) { 1215 if (data_direction != DMA_NONE) {
1196 ret = vhost_scsi_map_iov_to_sgl(cmd, 1216 ret = vhost_scsi_mapal(cmd,
1197 &vq->iov[data_first], data_niov, 1217 prot_bytes, &prot_iter,
1198 data_direction == DMA_FROM_DEVICE); 1218 exp_data_len, &data_iter);
1199 if (unlikely(ret)) { 1219 if (unlikely(ret)) {
1200 vq_err(vq, "Failed to map iov to sgl\n"); 1220 vq_err(vq, "Failed to map iov to sgl\n");
1201 goto err_free; 1221 vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
1222 vhost_scsi_send_bad_target(vs, vq, head, out);
1223 continue;
1202 } 1224 }
1203 } 1225 }
1204 /* 1226 /*
1205 * Save the descriptor from vhost_get_vq_desc() to be used to 1227 * Save the descriptor from vhost_get_vq_desc() to be used to
1206 * complete the virtio-scsi request in TCM callback context via 1228 * complete the virtio-scsi request in TCM callback context via
1207 * tcm_vhost_queue_data_in() and tcm_vhost_queue_status() 1229 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1208 */ 1230 */
1209 cmd->tvc_vq_desc = head; 1231 cmd->tvc_vq_desc = head;
1210 /* 1232 /*
1211 * Dispatch tv_cmd descriptor for cmwq execution in process 1233 * Dispatch cmd descriptor for cmwq execution in process
1212 * context provided by tcm_vhost_workqueue. This also ensures 1234 * context provided by vhost_scsi_workqueue. This also ensures
1213 * tv_cmd is executed on the same kworker CPU as this vhost 1235 * cmd is executed on the same kworker CPU as this vhost
1214 * thread to gain positive L2 cache locality effects.. 1236 * thread to gain positive L2 cache locality effects.
1215 */ 1237 */
1216 INIT_WORK(&cmd->work, tcm_vhost_submission_work); 1238 INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1217 queue_work(tcm_vhost_workqueue, &cmd->work); 1239 queue_work(vhost_scsi_workqueue, &cmd->work);
1218 } 1240 }
1219
1220 mutex_unlock(&vq->mutex);
1221 return;
1222
1223err_free:
1224 vhost_scsi_free_cmd(cmd);
1225err_cmd:
1226 vhost_scsi_send_bad_target(vs, vq, head, out);
1227out: 1241out:
1228 mutex_unlock(&vq->mutex); 1242 mutex_unlock(&vq->mutex);
1229} 1243}
@@ -1234,15 +1248,15 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1234} 1248}
1235 1249
1236static void 1250static void
1237tcm_vhost_send_evt(struct vhost_scsi *vs, 1251vhost_scsi_send_evt(struct vhost_scsi *vs,
1238 struct tcm_vhost_tpg *tpg, 1252 struct vhost_scsi_tpg *tpg,
1239 struct se_lun *lun, 1253 struct se_lun *lun,
1240 u32 event, 1254 u32 event,
1241 u32 reason) 1255 u32 reason)
1242{ 1256{
1243 struct tcm_vhost_evt *evt; 1257 struct vhost_scsi_evt *evt;
1244 1258
1245 evt = tcm_vhost_allocate_evt(vs, event, reason); 1259 evt = vhost_scsi_allocate_evt(vs, event, reason);
1246 if (!evt) 1260 if (!evt)
1247 return; 1261 return;
1248 1262
@@ -1253,7 +1267,7 @@ tcm_vhost_send_evt(struct vhost_scsi *vs,
1253 * lun[4-7] need to be zero according to virtio-scsi spec. 1267 * lun[4-7] need to be zero according to virtio-scsi spec.
1254 */ 1268 */
1255 evt->event.lun[0] = 0x01; 1269 evt->event.lun[0] = 0x01;
1256 evt->event.lun[1] = tpg->tport_tpgt & 0xFF; 1270 evt->event.lun[1] = tpg->tport_tpgt;
1257 if (lun->unpacked_lun >= 256) 1271 if (lun->unpacked_lun >= 256)
1258 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ; 1272 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1259 evt->event.lun[3] = lun->unpacked_lun & 0xFF; 1273 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
@@ -1274,7 +1288,7 @@ static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1274 goto out; 1288 goto out;
1275 1289
1276 if (vs->vs_events_missed) 1290 if (vs->vs_events_missed)
1277 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); 1291 vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1278out: 1292out:
1279 mutex_unlock(&vq->mutex); 1293 mutex_unlock(&vq->mutex);
1280} 1294}
@@ -1300,7 +1314,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
1300 int i; 1314 int i;
1301 1315
1302 /* Init new inflight and remember the old inflight */ 1316 /* Init new inflight and remember the old inflight */
1303 tcm_vhost_init_inflight(vs, old_inflight); 1317 vhost_scsi_init_inflight(vs, old_inflight);
1304 1318
1305 /* 1319 /*
1306 * The inflight->kref was initialized to 1. We decrement it here to 1320 * The inflight->kref was initialized to 1. We decrement it here to
@@ -1308,7 +1322,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
1308 * when all the reqs are finished. 1322 * when all the reqs are finished.
1309 */ 1323 */
1310 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) 1324 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1311 kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight); 1325 kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1312 1326
1313 /* Flush both the vhost poll and vhost work */ 1327 /* Flush both the vhost poll and vhost work */
1314 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) 1328 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
@@ -1323,24 +1337,24 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
1323 1337
1324/* 1338/*
1325 * Called from vhost_scsi_ioctl() context to walk the list of available 1339 * Called from vhost_scsi_ioctl() context to walk the list of available
1326 * tcm_vhost_tpg with an active struct tcm_vhost_nexus 1340 * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1327 * 1341 *
1328 * The lock nesting rule is: 1342 * The lock nesting rule is:
1329 * tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex 1343 * vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1330 */ 1344 */
1331static int 1345static int
1332vhost_scsi_set_endpoint(struct vhost_scsi *vs, 1346vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1333 struct vhost_scsi_target *t) 1347 struct vhost_scsi_target *t)
1334{ 1348{
1335 struct se_portal_group *se_tpg; 1349 struct se_portal_group *se_tpg;
1336 struct tcm_vhost_tport *tv_tport; 1350 struct vhost_scsi_tport *tv_tport;
1337 struct tcm_vhost_tpg *tpg; 1351 struct vhost_scsi_tpg *tpg;
1338 struct tcm_vhost_tpg **vs_tpg; 1352 struct vhost_scsi_tpg **vs_tpg;
1339 struct vhost_virtqueue *vq; 1353 struct vhost_virtqueue *vq;
1340 int index, ret, i, len; 1354 int index, ret, i, len;
1341 bool match = false; 1355 bool match = false;
1342 1356
1343 mutex_lock(&tcm_vhost_mutex); 1357 mutex_lock(&vhost_scsi_mutex);
1344 mutex_lock(&vs->dev.mutex); 1358 mutex_lock(&vs->dev.mutex);
1345 1359
1346 /* Verify that ring has been setup correctly. */ 1360 /* Verify that ring has been setup correctly. */
@@ -1361,7 +1375,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1361 if (vs->vs_tpg) 1375 if (vs->vs_tpg)
1362 memcpy(vs_tpg, vs->vs_tpg, len); 1376 memcpy(vs_tpg, vs->vs_tpg, len);
1363 1377
1364 list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) { 1378 list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1365 mutex_lock(&tpg->tv_tpg_mutex); 1379 mutex_lock(&tpg->tv_tpg_mutex);
1366 if (!tpg->tpg_nexus) { 1380 if (!tpg->tpg_nexus) {
1367 mutex_unlock(&tpg->tv_tpg_mutex); 1381 mutex_unlock(&tpg->tv_tpg_mutex);
@@ -1429,7 +1443,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1429 1443
1430out: 1444out:
1431 mutex_unlock(&vs->dev.mutex); 1445 mutex_unlock(&vs->dev.mutex);
1432 mutex_unlock(&tcm_vhost_mutex); 1446 mutex_unlock(&vhost_scsi_mutex);
1433 return ret; 1447 return ret;
1434} 1448}
1435 1449
@@ -1438,14 +1452,14 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1438 struct vhost_scsi_target *t) 1452 struct vhost_scsi_target *t)
1439{ 1453{
1440 struct se_portal_group *se_tpg; 1454 struct se_portal_group *se_tpg;
1441 struct tcm_vhost_tport *tv_tport; 1455 struct vhost_scsi_tport *tv_tport;
1442 struct tcm_vhost_tpg *tpg; 1456 struct vhost_scsi_tpg *tpg;
1443 struct vhost_virtqueue *vq; 1457 struct vhost_virtqueue *vq;
1444 bool match = false; 1458 bool match = false;
1445 int index, ret, i; 1459 int index, ret, i;
1446 u8 target; 1460 u8 target;
1447 1461
1448 mutex_lock(&tcm_vhost_mutex); 1462 mutex_lock(&vhost_scsi_mutex);
1449 mutex_lock(&vs->dev.mutex); 1463 mutex_lock(&vs->dev.mutex);
1450 /* Verify that ring has been setup correctly. */ 1464 /* Verify that ring has been setup correctly. */
1451 for (index = 0; index < vs->dev.nvqs; ++index) { 1465 for (index = 0; index < vs->dev.nvqs; ++index) {
@@ -1511,14 +1525,14 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1511 vs->vs_tpg = NULL; 1525 vs->vs_tpg = NULL;
1512 WARN_ON(vs->vs_events_nr); 1526 WARN_ON(vs->vs_events_nr);
1513 mutex_unlock(&vs->dev.mutex); 1527 mutex_unlock(&vs->dev.mutex);
1514 mutex_unlock(&tcm_vhost_mutex); 1528 mutex_unlock(&vhost_scsi_mutex);
1515 return 0; 1529 return 0;
1516 1530
1517err_tpg: 1531err_tpg:
1518 mutex_unlock(&tpg->tv_tpg_mutex); 1532 mutex_unlock(&tpg->tv_tpg_mutex);
1519err_dev: 1533err_dev:
1520 mutex_unlock(&vs->dev.mutex); 1534 mutex_unlock(&vs->dev.mutex);
1521 mutex_unlock(&tcm_vhost_mutex); 1535 mutex_unlock(&vhost_scsi_mutex);
1522 return ret; 1536 return ret;
1523} 1537}
1524 1538
@@ -1565,7 +1579,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
1565 goto err_vqs; 1579 goto err_vqs;
1566 1580
1567 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work); 1581 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1568 vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work); 1582 vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1569 1583
1570 vs->vs_events_nr = 0; 1584 vs->vs_events_nr = 0;
1571 vs->vs_events_missed = false; 1585 vs->vs_events_missed = false;
@@ -1580,7 +1594,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
1580 } 1594 }
1581 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); 1595 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1582 1596
1583 tcm_vhost_init_inflight(vs, NULL); 1597 vhost_scsi_init_inflight(vs, NULL);
1584 1598
1585 f->private_data = vs; 1599 f->private_data = vs;
1586 return 0; 1600 return 0;
@@ -1712,7 +1726,7 @@ static int vhost_scsi_deregister(void)
1712 return misc_deregister(&vhost_scsi_misc); 1726 return misc_deregister(&vhost_scsi_misc);
1713} 1727}
1714 1728
1715static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport) 1729static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1716{ 1730{
1717 switch (tport->tport_proto_id) { 1731 switch (tport->tport_proto_id) {
1718 case SCSI_PROTOCOL_SAS: 1732 case SCSI_PROTOCOL_SAS:
@@ -1729,7 +1743,7 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1729} 1743}
1730 1744
1731static void 1745static void
1732tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg, 1746vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1733 struct se_lun *lun, bool plug) 1747 struct se_lun *lun, bool plug)
1734{ 1748{
1735 1749
@@ -1750,71 +1764,71 @@ tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1750 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 1764 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1751 mutex_lock(&vq->mutex); 1765 mutex_lock(&vq->mutex);
1752 if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG)) 1766 if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1753 tcm_vhost_send_evt(vs, tpg, lun, 1767 vhost_scsi_send_evt(vs, tpg, lun,
1754 VIRTIO_SCSI_T_TRANSPORT_RESET, reason); 1768 VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1755 mutex_unlock(&vq->mutex); 1769 mutex_unlock(&vq->mutex);
1756 mutex_unlock(&vs->dev.mutex); 1770 mutex_unlock(&vs->dev.mutex);
1757} 1771}
1758 1772
1759static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun) 1773static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1760{ 1774{
1761 tcm_vhost_do_plug(tpg, lun, true); 1775 vhost_scsi_do_plug(tpg, lun, true);
1762} 1776}
1763 1777
1764static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun) 1778static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1765{ 1779{
1766 tcm_vhost_do_plug(tpg, lun, false); 1780 vhost_scsi_do_plug(tpg, lun, false);
1767} 1781}
1768 1782
1769static int tcm_vhost_port_link(struct se_portal_group *se_tpg, 1783static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
1770 struct se_lun *lun) 1784 struct se_lun *lun)
1771{ 1785{
1772 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 1786 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1773 struct tcm_vhost_tpg, se_tpg); 1787 struct vhost_scsi_tpg, se_tpg);
1774 1788
1775 mutex_lock(&tcm_vhost_mutex); 1789 mutex_lock(&vhost_scsi_mutex);
1776 1790
1777 mutex_lock(&tpg->tv_tpg_mutex); 1791 mutex_lock(&tpg->tv_tpg_mutex);
1778 tpg->tv_tpg_port_count++; 1792 tpg->tv_tpg_port_count++;
1779 mutex_unlock(&tpg->tv_tpg_mutex); 1793 mutex_unlock(&tpg->tv_tpg_mutex);
1780 1794
1781 tcm_vhost_hotplug(tpg, lun); 1795 vhost_scsi_hotplug(tpg, lun);
1782 1796
1783 mutex_unlock(&tcm_vhost_mutex); 1797 mutex_unlock(&vhost_scsi_mutex);
1784 1798
1785 return 0; 1799 return 0;
1786} 1800}
1787 1801
1788static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg, 1802static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
1789 struct se_lun *lun) 1803 struct se_lun *lun)
1790{ 1804{
1791 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 1805 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1792 struct tcm_vhost_tpg, se_tpg); 1806 struct vhost_scsi_tpg, se_tpg);
1793 1807
1794 mutex_lock(&tcm_vhost_mutex); 1808 mutex_lock(&vhost_scsi_mutex);
1795 1809
1796 mutex_lock(&tpg->tv_tpg_mutex); 1810 mutex_lock(&tpg->tv_tpg_mutex);
1797 tpg->tv_tpg_port_count--; 1811 tpg->tv_tpg_port_count--;
1798 mutex_unlock(&tpg->tv_tpg_mutex); 1812 mutex_unlock(&tpg->tv_tpg_mutex);
1799 1813
1800 tcm_vhost_hotunplug(tpg, lun); 1814 vhost_scsi_hotunplug(tpg, lun);
1801 1815
1802 mutex_unlock(&tcm_vhost_mutex); 1816 mutex_unlock(&vhost_scsi_mutex);
1803} 1817}
1804 1818
1805static struct se_node_acl * 1819static struct se_node_acl *
1806tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg, 1820vhost_scsi_make_nodeacl(struct se_portal_group *se_tpg,
1807 struct config_group *group, 1821 struct config_group *group,
1808 const char *name) 1822 const char *name)
1809{ 1823{
1810 struct se_node_acl *se_nacl, *se_nacl_new; 1824 struct se_node_acl *se_nacl, *se_nacl_new;
1811 struct tcm_vhost_nacl *nacl; 1825 struct vhost_scsi_nacl *nacl;
1812 u64 wwpn = 0; 1826 u64 wwpn = 0;
1813 u32 nexus_depth; 1827 u32 nexus_depth;
1814 1828
1815 /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) 1829 /* vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
1816 return ERR_PTR(-EINVAL); */ 1830 return ERR_PTR(-EINVAL); */
1817 se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg); 1831 se_nacl_new = vhost_scsi_alloc_fabric_acl(se_tpg);
1818 if (!se_nacl_new) 1832 if (!se_nacl_new)
1819 return ERR_PTR(-ENOMEM); 1833 return ERR_PTR(-ENOMEM);
1820 1834
@@ -1826,37 +1840,37 @@ tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
1826 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, 1840 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1827 name, nexus_depth); 1841 name, nexus_depth);
1828 if (IS_ERR(se_nacl)) { 1842 if (IS_ERR(se_nacl)) {
1829 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new); 1843 vhost_scsi_release_fabric_acl(se_tpg, se_nacl_new);
1830 return se_nacl; 1844 return se_nacl;
1831 } 1845 }
1832 /* 1846 /*
1833 * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN 1847 * Locate our struct vhost_scsi_nacl and set the FC Nport WWPN
1834 */ 1848 */
1835 nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl); 1849 nacl = container_of(se_nacl, struct vhost_scsi_nacl, se_node_acl);
1836 nacl->iport_wwpn = wwpn; 1850 nacl->iport_wwpn = wwpn;
1837 1851
1838 return se_nacl; 1852 return se_nacl;
1839} 1853}
1840 1854
1841static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl) 1855static void vhost_scsi_drop_nodeacl(struct se_node_acl *se_acl)
1842{ 1856{
1843 struct tcm_vhost_nacl *nacl = container_of(se_acl, 1857 struct vhost_scsi_nacl *nacl = container_of(se_acl,
1844 struct tcm_vhost_nacl, se_node_acl); 1858 struct vhost_scsi_nacl, se_node_acl);
1845 core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1); 1859 core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1846 kfree(nacl); 1860 kfree(nacl);
1847} 1861}
1848 1862
1849static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus, 1863static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus,
1850 struct se_session *se_sess) 1864 struct se_session *se_sess)
1851{ 1865{
1852 struct tcm_vhost_cmd *tv_cmd; 1866 struct vhost_scsi_cmd *tv_cmd;
1853 unsigned int i; 1867 unsigned int i;
1854 1868
1855 if (!se_sess->sess_cmd_map) 1869 if (!se_sess->sess_cmd_map)
1856 return; 1870 return;
1857 1871
1858 for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) { 1872 for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1859 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i]; 1873 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1860 1874
1861 kfree(tv_cmd->tvc_sgl); 1875 kfree(tv_cmd->tvc_sgl);
1862 kfree(tv_cmd->tvc_prot_sgl); 1876 kfree(tv_cmd->tvc_prot_sgl);
@@ -1864,13 +1878,13 @@ static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
1864 } 1878 }
1865} 1879}
1866 1880
1867static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, 1881static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1868 const char *name) 1882 const char *name)
1869{ 1883{
1870 struct se_portal_group *se_tpg; 1884 struct se_portal_group *se_tpg;
1871 struct se_session *se_sess; 1885 struct se_session *se_sess;
1872 struct tcm_vhost_nexus *tv_nexus; 1886 struct vhost_scsi_nexus *tv_nexus;
1873 struct tcm_vhost_cmd *tv_cmd; 1887 struct vhost_scsi_cmd *tv_cmd;
1874 unsigned int i; 1888 unsigned int i;
1875 1889
1876 mutex_lock(&tpg->tv_tpg_mutex); 1890 mutex_lock(&tpg->tv_tpg_mutex);
@@ -1881,19 +1895,19 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1881 } 1895 }
1882 se_tpg = &tpg->se_tpg; 1896 se_tpg = &tpg->se_tpg;
1883 1897
1884 tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL); 1898 tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
1885 if (!tv_nexus) { 1899 if (!tv_nexus) {
1886 mutex_unlock(&tpg->tv_tpg_mutex); 1900 mutex_unlock(&tpg->tv_tpg_mutex);
1887 pr_err("Unable to allocate struct tcm_vhost_nexus\n"); 1901 pr_err("Unable to allocate struct vhost_scsi_nexus\n");
1888 return -ENOMEM; 1902 return -ENOMEM;
1889 } 1903 }
1890 /* 1904 /*
1891 * Initialize the struct se_session pointer and setup tagpool 1905 * Initialize the struct se_session pointer and setup tagpool
1892 * for struct tcm_vhost_cmd descriptors 1906 * for struct vhost_scsi_cmd descriptors
1893 */ 1907 */
1894 tv_nexus->tvn_se_sess = transport_init_session_tags( 1908 tv_nexus->tvn_se_sess = transport_init_session_tags(
1895 TCM_VHOST_DEFAULT_TAGS, 1909 VHOST_SCSI_DEFAULT_TAGS,
1896 sizeof(struct tcm_vhost_cmd), 1910 sizeof(struct vhost_scsi_cmd),
1897 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS); 1911 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
1898 if (IS_ERR(tv_nexus->tvn_se_sess)) { 1912 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1899 mutex_unlock(&tpg->tv_tpg_mutex); 1913 mutex_unlock(&tpg->tv_tpg_mutex);
@@ -1901,11 +1915,11 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1901 return -ENOMEM; 1915 return -ENOMEM;
1902 } 1916 }
1903 se_sess = tv_nexus->tvn_se_sess; 1917 se_sess = tv_nexus->tvn_se_sess;
1904 for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) { 1918 for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1905 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i]; 1919 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1906 1920
1907 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) * 1921 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1908 TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL); 1922 VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
1909 if (!tv_cmd->tvc_sgl) { 1923 if (!tv_cmd->tvc_sgl) {
1910 mutex_unlock(&tpg->tv_tpg_mutex); 1924 mutex_unlock(&tpg->tv_tpg_mutex);
1911 pr_err("Unable to allocate tv_cmd->tvc_sgl\n"); 1925 pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
@@ -1913,7 +1927,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1913 } 1927 }
1914 1928
1915 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) * 1929 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1916 TCM_VHOST_PREALLOC_UPAGES, GFP_KERNEL); 1930 VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
1917 if (!tv_cmd->tvc_upages) { 1931 if (!tv_cmd->tvc_upages) {
1918 mutex_unlock(&tpg->tv_tpg_mutex); 1932 mutex_unlock(&tpg->tv_tpg_mutex);
1919 pr_err("Unable to allocate tv_cmd->tvc_upages\n"); 1933 pr_err("Unable to allocate tv_cmd->tvc_upages\n");
@@ -1921,7 +1935,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1921 } 1935 }
1922 1936
1923 tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) * 1937 tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
1924 TCM_VHOST_PREALLOC_PROT_SGLS, GFP_KERNEL); 1938 VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
1925 if (!tv_cmd->tvc_prot_sgl) { 1939 if (!tv_cmd->tvc_prot_sgl) {
1926 mutex_unlock(&tpg->tv_tpg_mutex); 1940 mutex_unlock(&tpg->tv_tpg_mutex);
1927 pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n"); 1941 pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
@@ -1930,7 +1944,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1930 } 1944 }
1931 /* 1945 /*
1932 * Since we are running in 'demo mode' this call with generate a 1946 * Since we are running in 'demo mode' this call with generate a
1933 * struct se_node_acl for the tcm_vhost struct se_portal_group with 1947 * struct se_node_acl for the vhost_scsi struct se_portal_group with
1934 * the SCSI Initiator port name of the passed configfs group 'name'. 1948 * the SCSI Initiator port name of the passed configfs group 'name'.
1935 */ 1949 */
1936 tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl( 1950 tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
@@ -1953,16 +1967,16 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1953 return 0; 1967 return 0;
1954 1968
1955out: 1969out:
1956 tcm_vhost_free_cmd_map_res(tv_nexus, se_sess); 1970 vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
1957 transport_free_session(se_sess); 1971 transport_free_session(se_sess);
1958 kfree(tv_nexus); 1972 kfree(tv_nexus);
1959 return -ENOMEM; 1973 return -ENOMEM;
1960} 1974}
1961 1975
1962static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg) 1976static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
1963{ 1977{
1964 struct se_session *se_sess; 1978 struct se_session *se_sess;
1965 struct tcm_vhost_nexus *tv_nexus; 1979 struct vhost_scsi_nexus *tv_nexus;
1966 1980
1967 mutex_lock(&tpg->tv_tpg_mutex); 1981 mutex_lock(&tpg->tv_tpg_mutex);
1968 tv_nexus = tpg->tpg_nexus; 1982 tv_nexus = tpg->tpg_nexus;
@@ -1994,10 +2008,10 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
1994 } 2008 }
1995 2009
1996 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated" 2010 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1997 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport), 2011 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
1998 tv_nexus->tvn_se_sess->se_node_acl->initiatorname); 2012 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1999 2013
2000 tcm_vhost_free_cmd_map_res(tv_nexus, se_sess); 2014 vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
2001 /* 2015 /*
2002 * Release the SCSI I_T Nexus to the emulated vhost Target Port 2016 * Release the SCSI I_T Nexus to the emulated vhost Target Port
2003 */ 2017 */
@@ -2009,12 +2023,12 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
2009 return 0; 2023 return 0;
2010} 2024}
2011 2025
2012static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg, 2026static ssize_t vhost_scsi_tpg_show_nexus(struct se_portal_group *se_tpg,
2013 char *page) 2027 char *page)
2014{ 2028{
2015 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 2029 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2016 struct tcm_vhost_tpg, se_tpg); 2030 struct vhost_scsi_tpg, se_tpg);
2017 struct tcm_vhost_nexus *tv_nexus; 2031 struct vhost_scsi_nexus *tv_nexus;
2018 ssize_t ret; 2032 ssize_t ret;
2019 2033
2020 mutex_lock(&tpg->tv_tpg_mutex); 2034 mutex_lock(&tpg->tv_tpg_mutex);
@@ -2030,40 +2044,40 @@ static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
2030 return ret; 2044 return ret;
2031} 2045}
2032 2046
2033static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg, 2047static ssize_t vhost_scsi_tpg_store_nexus(struct se_portal_group *se_tpg,
2034 const char *page, 2048 const char *page,
2035 size_t count) 2049 size_t count)
2036{ 2050{
2037 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 2051 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2038 struct tcm_vhost_tpg, se_tpg); 2052 struct vhost_scsi_tpg, se_tpg);
2039 struct tcm_vhost_tport *tport_wwn = tpg->tport; 2053 struct vhost_scsi_tport *tport_wwn = tpg->tport;
2040 unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr; 2054 unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2041 int ret; 2055 int ret;
2042 /* 2056 /*
2043 * Shutdown the active I_T nexus if 'NULL' is passed.. 2057 * Shutdown the active I_T nexus if 'NULL' is passed..
2044 */ 2058 */
2045 if (!strncmp(page, "NULL", 4)) { 2059 if (!strncmp(page, "NULL", 4)) {
2046 ret = tcm_vhost_drop_nexus(tpg); 2060 ret = vhost_scsi_drop_nexus(tpg);
2047 return (!ret) ? count : ret; 2061 return (!ret) ? count : ret;
2048 } 2062 }
2049 /* 2063 /*
2050 * Otherwise make sure the passed virtual Initiator port WWN matches 2064 * Otherwise make sure the passed virtual Initiator port WWN matches
2051 * the fabric protocol_id set in tcm_vhost_make_tport(), and call 2065 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2052 * tcm_vhost_make_nexus(). 2066 * vhost_scsi_make_nexus().
2053 */ 2067 */
2054 if (strlen(page) >= TCM_VHOST_NAMELEN) { 2068 if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2055 pr_err("Emulated NAA Sas Address: %s, exceeds" 2069 pr_err("Emulated NAA Sas Address: %s, exceeds"
2056 " max: %d\n", page, TCM_VHOST_NAMELEN); 2070 " max: %d\n", page, VHOST_SCSI_NAMELEN);
2057 return -EINVAL; 2071 return -EINVAL;
2058 } 2072 }
2059 snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page); 2073 snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2060 2074
2061 ptr = strstr(i_port, "naa."); 2075 ptr = strstr(i_port, "naa.");
2062 if (ptr) { 2076 if (ptr) {
2063 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) { 2077 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2064 pr_err("Passed SAS Initiator Port %s does not" 2078 pr_err("Passed SAS Initiator Port %s does not"
2065 " match target port protoid: %s\n", i_port, 2079 " match target port protoid: %s\n", i_port,
2066 tcm_vhost_dump_proto_id(tport_wwn)); 2080 vhost_scsi_dump_proto_id(tport_wwn));
2067 return -EINVAL; 2081 return -EINVAL;
2068 } 2082 }
2069 port_ptr = &i_port[0]; 2083 port_ptr = &i_port[0];
@@ -2074,7 +2088,7 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
2074 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) { 2088 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2075 pr_err("Passed FCP Initiator Port %s does not" 2089 pr_err("Passed FCP Initiator Port %s does not"
2076 " match target port protoid: %s\n", i_port, 2090 " match target port protoid: %s\n", i_port,
2077 tcm_vhost_dump_proto_id(tport_wwn)); 2091 vhost_scsi_dump_proto_id(tport_wwn));
2078 return -EINVAL; 2092 return -EINVAL;
2079 } 2093 }
2080 port_ptr = &i_port[3]; /* Skip over "fc." */ 2094 port_ptr = &i_port[3]; /* Skip over "fc." */
@@ -2085,7 +2099,7 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
2085 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) { 2099 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2086 pr_err("Passed iSCSI Initiator Port %s does not" 2100 pr_err("Passed iSCSI Initiator Port %s does not"
2087 " match target port protoid: %s\n", i_port, 2101 " match target port protoid: %s\n", i_port,
2088 tcm_vhost_dump_proto_id(tport_wwn)); 2102 vhost_scsi_dump_proto_id(tport_wwn));
2089 return -EINVAL; 2103 return -EINVAL;
2090 } 2104 }
2091 port_ptr = &i_port[0]; 2105 port_ptr = &i_port[0];
@@ -2101,40 +2115,40 @@ check_newline:
2101 if (i_port[strlen(i_port)-1] == '\n') 2115 if (i_port[strlen(i_port)-1] == '\n')
2102 i_port[strlen(i_port)-1] = '\0'; 2116 i_port[strlen(i_port)-1] = '\0';
2103 2117
2104 ret = tcm_vhost_make_nexus(tpg, port_ptr); 2118 ret = vhost_scsi_make_nexus(tpg, port_ptr);
2105 if (ret < 0) 2119 if (ret < 0)
2106 return ret; 2120 return ret;
2107 2121
2108 return count; 2122 return count;
2109} 2123}
2110 2124
2111TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR); 2125TF_TPG_BASE_ATTR(vhost_scsi, nexus, S_IRUGO | S_IWUSR);
2112 2126
2113static struct configfs_attribute *tcm_vhost_tpg_attrs[] = { 2127static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2114 &tcm_vhost_tpg_nexus.attr, 2128 &vhost_scsi_tpg_nexus.attr,
2115 NULL, 2129 NULL,
2116}; 2130};
2117 2131
2118static struct se_portal_group * 2132static struct se_portal_group *
2119tcm_vhost_make_tpg(struct se_wwn *wwn, 2133vhost_scsi_make_tpg(struct se_wwn *wwn,
2120 struct config_group *group, 2134 struct config_group *group,
2121 const char *name) 2135 const char *name)
2122{ 2136{
2123 struct tcm_vhost_tport *tport = container_of(wwn, 2137 struct vhost_scsi_tport *tport = container_of(wwn,
2124 struct tcm_vhost_tport, tport_wwn); 2138 struct vhost_scsi_tport, tport_wwn);
2125 2139
2126 struct tcm_vhost_tpg *tpg; 2140 struct vhost_scsi_tpg *tpg;
2127 unsigned long tpgt; 2141 u16 tpgt;
2128 int ret; 2142 int ret;
2129 2143
2130 if (strstr(name, "tpgt_") != name) 2144 if (strstr(name, "tpgt_") != name)
2131 return ERR_PTR(-EINVAL); 2145 return ERR_PTR(-EINVAL);
2132 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) 2146 if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2133 return ERR_PTR(-EINVAL); 2147 return ERR_PTR(-EINVAL);
2134 2148
2135 tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL); 2149 tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL);
2136 if (!tpg) { 2150 if (!tpg) {
2137 pr_err("Unable to allocate struct tcm_vhost_tpg"); 2151 pr_err("Unable to allocate struct vhost_scsi_tpg");
2138 return ERR_PTR(-ENOMEM); 2152 return ERR_PTR(-ENOMEM);
2139 } 2153 }
2140 mutex_init(&tpg->tv_tpg_mutex); 2154 mutex_init(&tpg->tv_tpg_mutex);
@@ -2142,31 +2156,31 @@ tcm_vhost_make_tpg(struct se_wwn *wwn,
2142 tpg->tport = tport; 2156 tpg->tport = tport;
2143 tpg->tport_tpgt = tpgt; 2157 tpg->tport_tpgt = tpgt;
2144 2158
2145 ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn, 2159 ret = core_tpg_register(&vhost_scsi_fabric_configfs->tf_ops, wwn,
2146 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); 2160 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
2147 if (ret < 0) { 2161 if (ret < 0) {
2148 kfree(tpg); 2162 kfree(tpg);
2149 return NULL; 2163 return NULL;
2150 } 2164 }
2151 mutex_lock(&tcm_vhost_mutex); 2165 mutex_lock(&vhost_scsi_mutex);
2152 list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list); 2166 list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2153 mutex_unlock(&tcm_vhost_mutex); 2167 mutex_unlock(&vhost_scsi_mutex);
2154 2168
2155 return &tpg->se_tpg; 2169 return &tpg->se_tpg;
2156} 2170}
2157 2171
2158static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg) 2172static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2159{ 2173{
2160 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 2174 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2161 struct tcm_vhost_tpg, se_tpg); 2175 struct vhost_scsi_tpg, se_tpg);
2162 2176
2163 mutex_lock(&tcm_vhost_mutex); 2177 mutex_lock(&vhost_scsi_mutex);
2164 list_del(&tpg->tv_tpg_list); 2178 list_del(&tpg->tv_tpg_list);
2165 mutex_unlock(&tcm_vhost_mutex); 2179 mutex_unlock(&vhost_scsi_mutex);
2166 /* 2180 /*
2167 * Release the virtual I_T Nexus for this vhost TPG 2181 * Release the virtual I_T Nexus for this vhost TPG
2168 */ 2182 */
2169 tcm_vhost_drop_nexus(tpg); 2183 vhost_scsi_drop_nexus(tpg);
2170 /* 2184 /*
2171 * Deregister the se_tpg from TCM.. 2185 * Deregister the se_tpg from TCM..
2172 */ 2186 */
@@ -2175,21 +2189,21 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
2175} 2189}
2176 2190
2177static struct se_wwn * 2191static struct se_wwn *
2178tcm_vhost_make_tport(struct target_fabric_configfs *tf, 2192vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2179 struct config_group *group, 2193 struct config_group *group,
2180 const char *name) 2194 const char *name)
2181{ 2195{
2182 struct tcm_vhost_tport *tport; 2196 struct vhost_scsi_tport *tport;
2183 char *ptr; 2197 char *ptr;
2184 u64 wwpn = 0; 2198 u64 wwpn = 0;
2185 int off = 0; 2199 int off = 0;
2186 2200
2187 /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) 2201 /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2188 return ERR_PTR(-EINVAL); */ 2202 return ERR_PTR(-EINVAL); */
2189 2203
2190 tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL); 2204 tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL);
2191 if (!tport) { 2205 if (!tport) {
2192 pr_err("Unable to allocate struct tcm_vhost_tport"); 2206 pr_err("Unable to allocate struct vhost_scsi_tport");
2193 return ERR_PTR(-ENOMEM); 2207 return ERR_PTR(-ENOMEM);
2194 } 2208 }
2195 tport->tport_wwpn = wwpn; 2209 tport->tport_wwpn = wwpn;
@@ -2220,102 +2234,102 @@ tcm_vhost_make_tport(struct target_fabric_configfs *tf,
2220 return ERR_PTR(-EINVAL); 2234 return ERR_PTR(-EINVAL);
2221 2235
2222check_len: 2236check_len:
2223 if (strlen(name) >= TCM_VHOST_NAMELEN) { 2237 if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2224 pr_err("Emulated %s Address: %s, exceeds" 2238 pr_err("Emulated %s Address: %s, exceeds"
2225 " max: %d\n", name, tcm_vhost_dump_proto_id(tport), 2239 " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2226 TCM_VHOST_NAMELEN); 2240 VHOST_SCSI_NAMELEN);
2227 kfree(tport); 2241 kfree(tport);
2228 return ERR_PTR(-EINVAL); 2242 return ERR_PTR(-EINVAL);
2229 } 2243 }
2230 snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]); 2244 snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2231 2245
2232 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target" 2246 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2233 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name); 2247 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2234 2248
2235 return &tport->tport_wwn; 2249 return &tport->tport_wwn;
2236} 2250}
2237 2251
2238static void tcm_vhost_drop_tport(struct se_wwn *wwn) 2252static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2239{ 2253{
2240 struct tcm_vhost_tport *tport = container_of(wwn, 2254 struct vhost_scsi_tport *tport = container_of(wwn,
2241 struct tcm_vhost_tport, tport_wwn); 2255 struct vhost_scsi_tport, tport_wwn);
2242 2256
2243 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target" 2257 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2244 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), 2258 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2245 tport->tport_name); 2259 tport->tport_name);
2246 2260
2247 kfree(tport); 2261 kfree(tport);
2248} 2262}
2249 2263
2250static ssize_t 2264static ssize_t
2251tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf, 2265vhost_scsi_wwn_show_attr_version(struct target_fabric_configfs *tf,
2252 char *page) 2266 char *page)
2253{ 2267{
2254 return sprintf(page, "TCM_VHOST fabric module %s on %s/%s" 2268 return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2255 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, 2269 "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2256 utsname()->machine); 2270 utsname()->machine);
2257} 2271}
2258 2272
2259TF_WWN_ATTR_RO(tcm_vhost, version); 2273TF_WWN_ATTR_RO(vhost_scsi, version);
2260 2274
2261static struct configfs_attribute *tcm_vhost_wwn_attrs[] = { 2275static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2262 &tcm_vhost_wwn_version.attr, 2276 &vhost_scsi_wwn_version.attr,
2263 NULL, 2277 NULL,
2264}; 2278};
2265 2279
2266static struct target_core_fabric_ops tcm_vhost_ops = { 2280static struct target_core_fabric_ops vhost_scsi_ops = {
2267 .get_fabric_name = tcm_vhost_get_fabric_name, 2281 .get_fabric_name = vhost_scsi_get_fabric_name,
2268 .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident, 2282 .get_fabric_proto_ident = vhost_scsi_get_fabric_proto_ident,
2269 .tpg_get_wwn = tcm_vhost_get_fabric_wwn, 2283 .tpg_get_wwn = vhost_scsi_get_fabric_wwn,
2270 .tpg_get_tag = tcm_vhost_get_tag, 2284 .tpg_get_tag = vhost_scsi_get_tpgt,
2271 .tpg_get_default_depth = tcm_vhost_get_default_depth, 2285 .tpg_get_default_depth = vhost_scsi_get_default_depth,
2272 .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id, 2286 .tpg_get_pr_transport_id = vhost_scsi_get_pr_transport_id,
2273 .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len, 2287 .tpg_get_pr_transport_id_len = vhost_scsi_get_pr_transport_id_len,
2274 .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id, 2288 .tpg_parse_pr_out_transport_id = vhost_scsi_parse_pr_out_transport_id,
2275 .tpg_check_demo_mode = tcm_vhost_check_true, 2289 .tpg_check_demo_mode = vhost_scsi_check_true,
2276 .tpg_check_demo_mode_cache = tcm_vhost_check_true, 2290 .tpg_check_demo_mode_cache = vhost_scsi_check_true,
2277 .tpg_check_demo_mode_write_protect = tcm_vhost_check_false, 2291 .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2278 .tpg_check_prod_mode_write_protect = tcm_vhost_check_false, 2292 .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2279 .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl, 2293 .tpg_alloc_fabric_acl = vhost_scsi_alloc_fabric_acl,
2280 .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl, 2294 .tpg_release_fabric_acl = vhost_scsi_release_fabric_acl,
2281 .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index, 2295 .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index,
2282 .release_cmd = tcm_vhost_release_cmd, 2296 .release_cmd = vhost_scsi_release_cmd,
2283 .check_stop_free = vhost_scsi_check_stop_free, 2297 .check_stop_free = vhost_scsi_check_stop_free,
2284 .shutdown_session = tcm_vhost_shutdown_session, 2298 .shutdown_session = vhost_scsi_shutdown_session,
2285 .close_session = tcm_vhost_close_session, 2299 .close_session = vhost_scsi_close_session,
2286 .sess_get_index = tcm_vhost_sess_get_index, 2300 .sess_get_index = vhost_scsi_sess_get_index,
2287 .sess_get_initiator_sid = NULL, 2301 .sess_get_initiator_sid = NULL,
2288 .write_pending = tcm_vhost_write_pending, 2302 .write_pending = vhost_scsi_write_pending,
2289 .write_pending_status = tcm_vhost_write_pending_status, 2303 .write_pending_status = vhost_scsi_write_pending_status,
2290 .set_default_node_attributes = tcm_vhost_set_default_node_attrs, 2304 .set_default_node_attributes = vhost_scsi_set_default_node_attrs,
2291 .get_task_tag = tcm_vhost_get_task_tag, 2305 .get_task_tag = vhost_scsi_get_task_tag,
2292 .get_cmd_state = tcm_vhost_get_cmd_state, 2306 .get_cmd_state = vhost_scsi_get_cmd_state,
2293 .queue_data_in = tcm_vhost_queue_data_in, 2307 .queue_data_in = vhost_scsi_queue_data_in,
2294 .queue_status = tcm_vhost_queue_status, 2308 .queue_status = vhost_scsi_queue_status,
2295 .queue_tm_rsp = tcm_vhost_queue_tm_rsp, 2309 .queue_tm_rsp = vhost_scsi_queue_tm_rsp,
2296 .aborted_task = tcm_vhost_aborted_task, 2310 .aborted_task = vhost_scsi_aborted_task,
2297 /* 2311 /*
2298 * Setup callers for generic logic in target_core_fabric_configfs.c 2312 * Setup callers for generic logic in target_core_fabric_configfs.c
2299 */ 2313 */
2300 .fabric_make_wwn = tcm_vhost_make_tport, 2314 .fabric_make_wwn = vhost_scsi_make_tport,
2301 .fabric_drop_wwn = tcm_vhost_drop_tport, 2315 .fabric_drop_wwn = vhost_scsi_drop_tport,
2302 .fabric_make_tpg = tcm_vhost_make_tpg, 2316 .fabric_make_tpg = vhost_scsi_make_tpg,
2303 .fabric_drop_tpg = tcm_vhost_drop_tpg, 2317 .fabric_drop_tpg = vhost_scsi_drop_tpg,
2304 .fabric_post_link = tcm_vhost_port_link, 2318 .fabric_post_link = vhost_scsi_port_link,
2305 .fabric_pre_unlink = tcm_vhost_port_unlink, 2319 .fabric_pre_unlink = vhost_scsi_port_unlink,
2306 .fabric_make_np = NULL, 2320 .fabric_make_np = NULL,
2307 .fabric_drop_np = NULL, 2321 .fabric_drop_np = NULL,
2308 .fabric_make_nodeacl = tcm_vhost_make_nodeacl, 2322 .fabric_make_nodeacl = vhost_scsi_make_nodeacl,
2309 .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl, 2323 .fabric_drop_nodeacl = vhost_scsi_drop_nodeacl,
2310}; 2324};
2311 2325
2312static int tcm_vhost_register_configfs(void) 2326static int vhost_scsi_register_configfs(void)
2313{ 2327{
2314 struct target_fabric_configfs *fabric; 2328 struct target_fabric_configfs *fabric;
2315 int ret; 2329 int ret;
2316 2330
2317 pr_debug("TCM_VHOST fabric module %s on %s/%s" 2331 pr_debug("vhost-scsi fabric module %s on %s/%s"
2318 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, 2332 " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2319 utsname()->machine); 2333 utsname()->machine);
2320 /* 2334 /*
2321 * Register the top level struct config_item_type with TCM core 2335 * Register the top level struct config_item_type with TCM core
@@ -2326,14 +2340,14 @@ static int tcm_vhost_register_configfs(void)
2326 return PTR_ERR(fabric); 2340 return PTR_ERR(fabric);
2327 } 2341 }
2328 /* 2342 /*
2329 * Setup fabric->tf_ops from our local tcm_vhost_ops 2343 * Setup fabric->tf_ops from our local vhost_scsi_ops
2330 */ 2344 */
2331 fabric->tf_ops = tcm_vhost_ops; 2345 fabric->tf_ops = vhost_scsi_ops;
2332 /* 2346 /*
2333 * Setup default attribute lists for various fabric->tf_cit_tmpl 2347 * Setup default attribute lists for various fabric->tf_cit_tmpl
2334 */ 2348 */
2335 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs; 2349 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = vhost_scsi_wwn_attrs;
2336 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs; 2350 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = vhost_scsi_tpg_attrs;
2337 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; 2351 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
2338 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 2352 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
2339 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; 2353 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
@@ -2353,37 +2367,37 @@ static int tcm_vhost_register_configfs(void)
2353 /* 2367 /*
2354 * Setup our local pointer to *fabric 2368 * Setup our local pointer to *fabric
2355 */ 2369 */
2356 tcm_vhost_fabric_configfs = fabric; 2370 vhost_scsi_fabric_configfs = fabric;
2357 pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n"); 2371 pr_debug("TCM_VHOST[0] - Set fabric -> vhost_scsi_fabric_configfs\n");
2358 return 0; 2372 return 0;
2359}; 2373};
2360 2374
2361static void tcm_vhost_deregister_configfs(void) 2375static void vhost_scsi_deregister_configfs(void)
2362{ 2376{
2363 if (!tcm_vhost_fabric_configfs) 2377 if (!vhost_scsi_fabric_configfs)
2364 return; 2378 return;
2365 2379
2366 target_fabric_configfs_deregister(tcm_vhost_fabric_configfs); 2380 target_fabric_configfs_deregister(vhost_scsi_fabric_configfs);
2367 tcm_vhost_fabric_configfs = NULL; 2381 vhost_scsi_fabric_configfs = NULL;
2368 pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n"); 2382 pr_debug("TCM_VHOST[0] - Cleared vhost_scsi_fabric_configfs\n");
2369}; 2383};
2370 2384
2371static int __init tcm_vhost_init(void) 2385static int __init vhost_scsi_init(void)
2372{ 2386{
2373 int ret = -ENOMEM; 2387 int ret = -ENOMEM;
2374 /* 2388 /*
2375 * Use our own dedicated workqueue for submitting I/O into 2389 * Use our own dedicated workqueue for submitting I/O into
2376 * target core to avoid contention within system_wq. 2390 * target core to avoid contention within system_wq.
2377 */ 2391 */
2378 tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0); 2392 vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
2379 if (!tcm_vhost_workqueue) 2393 if (!vhost_scsi_workqueue)
2380 goto out; 2394 goto out;
2381 2395
2382 ret = vhost_scsi_register(); 2396 ret = vhost_scsi_register();
2383 if (ret < 0) 2397 if (ret < 0)
2384 goto out_destroy_workqueue; 2398 goto out_destroy_workqueue;
2385 2399
2386 ret = tcm_vhost_register_configfs(); 2400 ret = vhost_scsi_register_configfs();
2387 if (ret < 0) 2401 if (ret < 0)
2388 goto out_vhost_scsi_deregister; 2402 goto out_vhost_scsi_deregister;
2389 2403
@@ -2392,20 +2406,20 @@ static int __init tcm_vhost_init(void)
2392out_vhost_scsi_deregister: 2406out_vhost_scsi_deregister:
2393 vhost_scsi_deregister(); 2407 vhost_scsi_deregister();
2394out_destroy_workqueue: 2408out_destroy_workqueue:
2395 destroy_workqueue(tcm_vhost_workqueue); 2409 destroy_workqueue(vhost_scsi_workqueue);
2396out: 2410out:
2397 return ret; 2411 return ret;
2398}; 2412};
2399 2413
2400static void tcm_vhost_exit(void) 2414static void vhost_scsi_exit(void)
2401{ 2415{
2402 tcm_vhost_deregister_configfs(); 2416 vhost_scsi_deregister_configfs();
2403 vhost_scsi_deregister(); 2417 vhost_scsi_deregister();
2404 destroy_workqueue(tcm_vhost_workqueue); 2418 destroy_workqueue(vhost_scsi_workqueue);
2405}; 2419};
2406 2420
2407MODULE_DESCRIPTION("VHOST_SCSI series fabric driver"); 2421MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2408MODULE_ALIAS("tcm_vhost"); 2422MODULE_ALIAS("tcm_vhost");
2409MODULE_LICENSE("GPL"); 2423MODULE_LICENSE("GPL");
2410module_init(tcm_vhost_init); 2424module_init(vhost_scsi_init);
2411module_exit(tcm_vhost_exit); 2425module_exit(vhost_scsi_exit);
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 00b228638274..b546da5d8ea3 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -12,16 +12,32 @@ config VIRTIO_PCI
12 depends on PCI 12 depends on PCI
13 select VIRTIO 13 select VIRTIO
14 ---help--- 14 ---help---
15 This drivers provides support for virtio based paravirtual device 15 This driver provides support for virtio based paravirtual device
16 drivers over PCI. This requires that your VMM has appropriate PCI 16 drivers over PCI. This requires that your VMM has appropriate PCI
17 virtio backends. Most QEMU based VMMs should support these devices 17 virtio backends. Most QEMU based VMMs should support these devices
18 (like KVM or Xen). 18 (like KVM or Xen).
19 19
20 Currently, the ABI is not considered stable so there is no guarantee
21 that this version of the driver will work with your VMM.
22
23 If unsure, say M. 20 If unsure, say M.
24 21
22config VIRTIO_PCI_LEGACY
23 bool "Support for legacy virtio draft 0.9.X and older devices"
24 default y
25 depends on VIRTIO_PCI
26 ---help---
27 Virtio PCI Card 0.9.X Draft (circa 2014) and older device support.
28
29 This option enables building a transitional driver, supporting
30 both devices conforming to Virtio 1 specification, and legacy devices.
31 If disabled, you get a slightly smaller, non-transitional driver,
32 with no legacy compatibility.
33
34 So look out into your driveway. Do you have a flying car? If
35 so, you can happily disable this option and virtio will not
36 break. Otherwise, leave it set. Unless you're testing what
37 life will be like in The Future.
38
39 If unsure, say Y.
40
25config VIRTIO_BALLOON 41config VIRTIO_BALLOON
26 tristate "Virtio balloon driver" 42 tristate "Virtio balloon driver"
27 depends on VIRTIO 43 depends on VIRTIO
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index bf5104b56894..d85565b8ea46 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -1,5 +1,6 @@
1obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o 1obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o
2obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o 2obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
3obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o 3obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
4virtio_pci-y := virtio_pci_legacy.o virtio_pci_common.o 4virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
5virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o
5obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o 6obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index b9f70dfc4751..5ce2aa48fc6e 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -236,7 +236,10 @@ static int virtio_dev_probe(struct device *_d)
236 if (err) 236 if (err)
237 goto err; 237 goto err;
238 238
239 add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); 239 /* If probe didn't do it, mark device DRIVER_OK ourselves. */
240 if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
241 virtio_device_ready(dev);
242
240 if (drv->scan) 243 if (drv->scan)
241 drv->scan(dev); 244 drv->scan(dev);
242 245
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 50c5f42d7a9f..0413157f3b49 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -44,8 +44,7 @@ static int oom_pages = OOM_VBALLOON_DEFAULT_PAGES;
44module_param(oom_pages, int, S_IRUSR | S_IWUSR); 44module_param(oom_pages, int, S_IRUSR | S_IWUSR);
45MODULE_PARM_DESC(oom_pages, "pages to free on OOM"); 45MODULE_PARM_DESC(oom_pages, "pages to free on OOM");
46 46
47struct virtio_balloon 47struct virtio_balloon {
48{
49 struct virtio_device *vdev; 48 struct virtio_device *vdev;
50 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq; 49 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
51 50
@@ -466,6 +465,12 @@ static int virtballoon_probe(struct virtio_device *vdev)
466 struct virtio_balloon *vb; 465 struct virtio_balloon *vb;
467 int err; 466 int err;
468 467
468 if (!vdev->config->get) {
469 dev_err(&vdev->dev, "%s failure: config access disabled\n",
470 __func__);
471 return -EINVAL;
472 }
473
469 vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); 474 vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
470 if (!vb) { 475 if (!vb) {
471 err = -ENOMEM; 476 err = -ENOMEM;
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 00d115b22bd8..cad569890908 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Virtio memory mapped device driver 2 * Virtio memory mapped device driver
3 * 3 *
4 * Copyright 2011, ARM Ltd. 4 * Copyright 2011-2014, ARM Ltd.
5 * 5 *
6 * This module allows virtio devices to be used over a virtual, memory mapped 6 * This module allows virtio devices to be used over a virtual, memory mapped
7 * platform device. 7 * platform device.
@@ -50,36 +50,6 @@
50 * 50 *
51 * 51 *
52 * 52 *
53 * Registers layout (all 32-bit wide):
54 *
55 * offset d. name description
56 * ------ -- ---------------- -----------------
57 *
58 * 0x000 R MagicValue Magic value "virt"
59 * 0x004 R Version Device version (current max. 1)
60 * 0x008 R DeviceID Virtio device ID
61 * 0x00c R VendorID Virtio vendor ID
62 *
63 * 0x010 R HostFeatures Features supported by the host
64 * 0x014 W HostFeaturesSel Set of host features to access via HostFeatures
65 *
66 * 0x020 W GuestFeatures Features activated by the guest
67 * 0x024 W GuestFeaturesSel Set of activated features to set via GuestFeatures
68 * 0x028 W GuestPageSize Size of guest's memory page in bytes
69 *
70 * 0x030 W QueueSel Queue selector
71 * 0x034 R QueueNumMax Maximum size of the currently selected queue
72 * 0x038 W QueueNum Queue size for the currently selected queue
73 * 0x03c W QueueAlign Used Ring alignment for the current queue
74 * 0x040 RW QueuePFN PFN for the currently selected queue
75 *
76 * 0x050 W QueueNotify Queue notifier
77 * 0x060 R InterruptStatus Interrupt status register
78 * 0x064 W InterruptACK Interrupt acknowledge register
79 * 0x070 RW Status Device status register
80 *
81 * 0x100+ RW Device-specific configuration space
82 *
83 * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007 53 * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007
84 * 54 *
85 * This work is licensed under the terms of the GNU GPL, version 2 or later. 55 * This work is licensed under the terms of the GNU GPL, version 2 or later.
@@ -145,11 +115,16 @@ struct virtio_mmio_vq_info {
145static u64 vm_get_features(struct virtio_device *vdev) 115static u64 vm_get_features(struct virtio_device *vdev)
146{ 116{
147 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 117 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
118 u64 features;
119
120 writel(1, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
121 features = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
122 features <<= 32;
148 123
149 /* TODO: Features > 32 bits */ 124 writel(0, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
150 writel(0, vm_dev->base + VIRTIO_MMIO_HOST_FEATURES_SEL); 125 features |= readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
151 126
152 return readl(vm_dev->base + VIRTIO_MMIO_HOST_FEATURES); 127 return features;
153} 128}
154 129
155static int vm_finalize_features(struct virtio_device *vdev) 130static int vm_finalize_features(struct virtio_device *vdev)
@@ -159,11 +134,20 @@ static int vm_finalize_features(struct virtio_device *vdev)
159 /* Give virtio_ring a chance to accept features. */ 134 /* Give virtio_ring a chance to accept features. */
160 vring_transport_features(vdev); 135 vring_transport_features(vdev);
161 136
162 /* Make sure we don't have any features > 32 bits! */ 137 /* Make sure there is are no mixed devices */
163 BUG_ON((u32)vdev->features != vdev->features); 138 if (vm_dev->version == 2 &&
139 !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
140 dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
141 return -EINVAL;
142 }
143
144 writel(1, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
145 writel((u32)(vdev->features >> 32),
146 vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
164 147
165 writel(0, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL); 148 writel(0, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
166 writel(vdev->features, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES); 149 writel((u32)vdev->features,
150 vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
167 151
168 return 0; 152 return 0;
169} 153}
@@ -275,7 +259,12 @@ static void vm_del_vq(struct virtqueue *vq)
275 259
276 /* Select and deactivate the queue */ 260 /* Select and deactivate the queue */
277 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); 261 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
278 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); 262 if (vm_dev->version == 1) {
263 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
264 } else {
265 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
266 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
267 }
279 268
280 size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN)); 269 size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN));
281 free_pages_exact(info->queue, size); 270 free_pages_exact(info->queue, size);
@@ -312,7 +301,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
312 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); 301 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
313 302
314 /* Queue shouldn't already be set up. */ 303 /* Queue shouldn't already be set up. */
315 if (readl(vm_dev->base + VIRTIO_MMIO_QUEUE_PFN)) { 304 if (readl(vm_dev->base + (vm_dev->version == 1 ?
305 VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) {
316 err = -ENOENT; 306 err = -ENOENT;
317 goto error_available; 307 goto error_available;
318 } 308 }
@@ -356,13 +346,6 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
356 info->num /= 2; 346 info->num /= 2;
357 } 347 }
358 348
359 /* Activate the queue */
360 writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
361 writel(VIRTIO_MMIO_VRING_ALIGN,
362 vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
363 writel(virt_to_phys(info->queue) >> PAGE_SHIFT,
364 vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
365
366 /* Create the vring */ 349 /* Create the vring */
367 vq = vring_new_virtqueue(index, info->num, VIRTIO_MMIO_VRING_ALIGN, vdev, 350 vq = vring_new_virtqueue(index, info->num, VIRTIO_MMIO_VRING_ALIGN, vdev,
368 true, info->queue, vm_notify, callback, name); 351 true, info->queue, vm_notify, callback, name);
@@ -371,6 +354,33 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
371 goto error_new_virtqueue; 354 goto error_new_virtqueue;
372 } 355 }
373 356
357 /* Activate the queue */
358 writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
359 if (vm_dev->version == 1) {
360 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
361 writel(virt_to_phys(info->queue) >> PAGE_SHIFT,
362 vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
363 } else {
364 u64 addr;
365
366 addr = virt_to_phys(info->queue);
367 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
368 writel((u32)(addr >> 32),
369 vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
370
371 addr = virt_to_phys(virtqueue_get_avail(vq));
372 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
373 writel((u32)(addr >> 32),
374 vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
375
376 addr = virt_to_phys(virtqueue_get_used(vq));
377 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW);
378 writel((u32)(addr >> 32),
379 vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
380
381 writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
382 }
383
374 vq->priv = info; 384 vq->priv = info;
375 info->vq = vq; 385 info->vq = vq;
376 386
@@ -381,7 +391,12 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
381 return vq; 391 return vq;
382 392
383error_new_virtqueue: 393error_new_virtqueue:
384 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); 394 if (vm_dev->version == 1) {
395 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
396 } else {
397 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
398 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
399 }
385 free_pages_exact(info->queue, size); 400 free_pages_exact(info->queue, size);
386error_alloc_pages: 401error_alloc_pages:
387 kfree(info); 402 kfree(info);
@@ -476,16 +491,32 @@ static int virtio_mmio_probe(struct platform_device *pdev)
476 491
477 /* Check device version */ 492 /* Check device version */
478 vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION); 493 vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION);
479 if (vm_dev->version != 1) { 494 if (vm_dev->version < 1 || vm_dev->version > 2) {
480 dev_err(&pdev->dev, "Version %ld not supported!\n", 495 dev_err(&pdev->dev, "Version %ld not supported!\n",
481 vm_dev->version); 496 vm_dev->version);
482 return -ENXIO; 497 return -ENXIO;
483 } 498 }
484 499
485 vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID); 500 vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
501 if (vm_dev->vdev.id.device == 0) {
502 /*
503 * virtio-mmio device with an ID 0 is a (dummy) placeholder
504 * with no function. End probing now with no error reported.
505 */
506 return -ENODEV;
507 }
486 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); 508 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
487 509
488 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); 510 /* Reject legacy-only IDs for version 2 devices */
511 if (vm_dev->version == 2 &&
512 virtio_device_is_legacy_only(vm_dev->vdev.id)) {
513 dev_err(&pdev->dev, "Version 2 not supported for devices %u!\n",
514 vm_dev->vdev.id.device);
515 return -ENODEV;
516 }
517
518 if (vm_dev->version == 1)
519 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
489 520
490 platform_set_drvdata(pdev, vm_dev); 521 platform_set_drvdata(pdev, vm_dev);
491 522
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 9756f21b809e..e894eb278d83 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -19,6 +19,14 @@
19 19
20#include "virtio_pci_common.h" 20#include "virtio_pci_common.h"
21 21
22static bool force_legacy = false;
23
24#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
25module_param(force_legacy, bool, 0444);
26MODULE_PARM_DESC(force_legacy,
27 "Force legacy mode for transitional virtio 1 devices");
28#endif
29
22/* wait for pending irq handlers */ 30/* wait for pending irq handlers */
23void vp_synchronize_vectors(struct virtio_device *vdev) 31void vp_synchronize_vectors(struct virtio_device *vdev)
24{ 32{
@@ -464,15 +472,97 @@ static const struct pci_device_id virtio_pci_id_table[] = {
464 472
465MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); 473MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
466 474
475static void virtio_pci_release_dev(struct device *_d)
476{
477 struct virtio_device *vdev = dev_to_virtio(_d);
478 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
479
480 /* As struct device is a kobject, it's not safe to
481 * free the memory (including the reference counter itself)
482 * until it's release callback. */
483 kfree(vp_dev);
484}
485
467static int virtio_pci_probe(struct pci_dev *pci_dev, 486static int virtio_pci_probe(struct pci_dev *pci_dev,
468 const struct pci_device_id *id) 487 const struct pci_device_id *id)
469{ 488{
470 return virtio_pci_legacy_probe(pci_dev, id); 489 struct virtio_pci_device *vp_dev;
490 int rc;
491
492 /* allocate our structure and fill it out */
493 vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
494 if (!vp_dev)
495 return -ENOMEM;
496
497 pci_set_drvdata(pci_dev, vp_dev);
498 vp_dev->vdev.dev.parent = &pci_dev->dev;
499 vp_dev->vdev.dev.release = virtio_pci_release_dev;
500 vp_dev->pci_dev = pci_dev;
501 INIT_LIST_HEAD(&vp_dev->virtqueues);
502 spin_lock_init(&vp_dev->lock);
503
504 /* Disable MSI/MSIX to bring device to a known good state. */
505 pci_msi_off(pci_dev);
506
507 /* enable the device */
508 rc = pci_enable_device(pci_dev);
509 if (rc)
510 goto err_enable_device;
511
512 rc = pci_request_regions(pci_dev, "virtio-pci");
513 if (rc)
514 goto err_request_regions;
515
516 if (force_legacy) {
517 rc = virtio_pci_legacy_probe(vp_dev);
518 /* Also try modern mode if we can't map BAR0 (no IO space). */
519 if (rc == -ENODEV || rc == -ENOMEM)
520 rc = virtio_pci_modern_probe(vp_dev);
521 if (rc)
522 goto err_probe;
523 } else {
524 rc = virtio_pci_modern_probe(vp_dev);
525 if (rc == -ENODEV)
526 rc = virtio_pci_legacy_probe(vp_dev);
527 if (rc)
528 goto err_probe;
529 }
530
531 pci_set_master(pci_dev);
532
533 rc = register_virtio_device(&vp_dev->vdev);
534 if (rc)
535 goto err_register;
536
537 return 0;
538
539err_register:
540 if (vp_dev->ioaddr)
541 virtio_pci_legacy_remove(vp_dev);
542 else
543 virtio_pci_modern_remove(vp_dev);
544err_probe:
545 pci_release_regions(pci_dev);
546err_request_regions:
547 pci_disable_device(pci_dev);
548err_enable_device:
549 kfree(vp_dev);
550 return rc;
471} 551}
472 552
473static void virtio_pci_remove(struct pci_dev *pci_dev) 553static void virtio_pci_remove(struct pci_dev *pci_dev)
474{ 554{
475 virtio_pci_legacy_remove(pci_dev); 555 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
556
557 unregister_virtio_device(&vp_dev->vdev);
558
559 if (vp_dev->ioaddr)
560 virtio_pci_legacy_remove(vp_dev);
561 else
562 virtio_pci_modern_remove(vp_dev);
563
564 pci_release_regions(pci_dev);
565 pci_disable_device(pci_dev);
476} 566}
477 567
478static struct pci_driver virtio_pci_driver = { 568static struct pci_driver virtio_pci_driver = {
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 5a497289b7e9..28ee4e56badf 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -53,12 +53,32 @@ struct virtio_pci_device {
53 struct virtio_device vdev; 53 struct virtio_device vdev;
54 struct pci_dev *pci_dev; 54 struct pci_dev *pci_dev;
55 55
56 /* In legacy mode, these two point to within ->legacy. */
57 /* Where to read and clear interrupt */
58 u8 __iomem *isr;
59
60 /* Modern only fields */
61 /* The IO mapping for the PCI config space (non-legacy mode) */
62 struct virtio_pci_common_cfg __iomem *common;
63 /* Device-specific data (non-legacy mode) */
64 void __iomem *device;
65 /* Base of vq notifications (non-legacy mode). */
66 void __iomem *notify_base;
67
68 /* So we can sanity-check accesses. */
69 size_t notify_len;
70 size_t device_len;
71
72 /* Capability for when we need to map notifications per-vq. */
73 int notify_map_cap;
74
75 /* Multiply queue_notify_off by this value. (non-legacy mode). */
76 u32 notify_offset_multiplier;
77
78 /* Legacy only field */
56 /* the IO mapping for the PCI config space */ 79 /* the IO mapping for the PCI config space */
57 void __iomem *ioaddr; 80 void __iomem *ioaddr;
58 81
59 /* the IO mapping for ISR operation */
60 void __iomem *isr;
61
62 /* a list of queues so we can dispatch IRQs */ 82 /* a list of queues so we can dispatch IRQs */
63 spinlock_t lock; 83 spinlock_t lock;
64 struct list_head virtqueues; 84 struct list_head virtqueues;
@@ -127,8 +147,19 @@ const char *vp_bus_name(struct virtio_device *vdev);
127 */ 147 */
128int vp_set_vq_affinity(struct virtqueue *vq, int cpu); 148int vp_set_vq_affinity(struct virtqueue *vq, int cpu);
129 149
130int virtio_pci_legacy_probe(struct pci_dev *pci_dev, 150#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
131 const struct pci_device_id *id); 151int virtio_pci_legacy_probe(struct virtio_pci_device *);
132void virtio_pci_legacy_remove(struct pci_dev *pci_dev); 152void virtio_pci_legacy_remove(struct virtio_pci_device *);
153#else
154static inline int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
155{
156 return -ENODEV;
157}
158static inline void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
159{
160}
161#endif
162int virtio_pci_modern_probe(struct virtio_pci_device *);
163void virtio_pci_modern_remove(struct virtio_pci_device *);
133 164
134#endif 165#endif
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index a5486e65e04b..256a5278a515 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -211,23 +211,10 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
211 .set_vq_affinity = vp_set_vq_affinity, 211 .set_vq_affinity = vp_set_vq_affinity,
212}; 212};
213 213
214static void virtio_pci_release_dev(struct device *_d)
215{
216 struct virtio_device *vdev = dev_to_virtio(_d);
217 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
218
219 /* As struct device is a kobject, it's not safe to
220 * free the memory (including the reference counter itself)
221 * until it's release callback. */
222 kfree(vp_dev);
223}
224
225/* the PCI probing function */ 214/* the PCI probing function */
226int virtio_pci_legacy_probe(struct pci_dev *pci_dev, 215int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
227 const struct pci_device_id *id)
228{ 216{
229 struct virtio_pci_device *vp_dev; 217 struct pci_dev *pci_dev = vp_dev->pci_dev;
230 int err;
231 218
232 /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */ 219 /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
233 if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f) 220 if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
@@ -239,41 +226,12 @@ int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
239 return -ENODEV; 226 return -ENODEV;
240 } 227 }
241 228
242 /* allocate our structure and fill it out */
243 vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
244 if (vp_dev == NULL)
245 return -ENOMEM;
246
247 vp_dev->vdev.dev.parent = &pci_dev->dev;
248 vp_dev->vdev.dev.release = virtio_pci_release_dev;
249 vp_dev->vdev.config = &virtio_pci_config_ops;
250 vp_dev->pci_dev = pci_dev;
251 INIT_LIST_HEAD(&vp_dev->virtqueues);
252 spin_lock_init(&vp_dev->lock);
253
254 /* Disable MSI/MSIX to bring device to a known good state. */
255 pci_msi_off(pci_dev);
256
257 /* enable the device */
258 err = pci_enable_device(pci_dev);
259 if (err)
260 goto out;
261
262 err = pci_request_regions(pci_dev, "virtio-pci");
263 if (err)
264 goto out_enable_device;
265
266 vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0); 229 vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
267 if (vp_dev->ioaddr == NULL) { 230 if (!vp_dev->ioaddr)
268 err = -ENOMEM; 231 return -ENOMEM;
269 goto out_req_regions;
270 }
271 232
272 vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR; 233 vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR;
273 234
274 pci_set_drvdata(pci_dev, vp_dev);
275 pci_set_master(pci_dev);
276
277 /* we use the subsystem vendor/device id as the virtio vendor/device 235 /* we use the subsystem vendor/device id as the virtio vendor/device
278 * id. this allows us to use the same PCI vendor/device id for all 236 * id. this allows us to use the same PCI vendor/device id for all
279 * virtio devices and to identify the particular virtio driver by 237 * virtio devices and to identify the particular virtio driver by
@@ -281,36 +239,18 @@ int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
281 vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; 239 vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
282 vp_dev->vdev.id.device = pci_dev->subsystem_device; 240 vp_dev->vdev.id.device = pci_dev->subsystem_device;
283 241
242 vp_dev->vdev.config = &virtio_pci_config_ops;
243
284 vp_dev->config_vector = vp_config_vector; 244 vp_dev->config_vector = vp_config_vector;
285 vp_dev->setup_vq = setup_vq; 245 vp_dev->setup_vq = setup_vq;
286 vp_dev->del_vq = del_vq; 246 vp_dev->del_vq = del_vq;
287 247
288 /* finally register the virtio device */
289 err = register_virtio_device(&vp_dev->vdev);
290 if (err)
291 goto out_set_drvdata;
292
293 return 0; 248 return 0;
294
295out_set_drvdata:
296 pci_iounmap(pci_dev, vp_dev->ioaddr);
297out_req_regions:
298 pci_release_regions(pci_dev);
299out_enable_device:
300 pci_disable_device(pci_dev);
301out:
302 kfree(vp_dev);
303 return err;
304} 249}
305 250
306void virtio_pci_legacy_remove(struct pci_dev *pci_dev) 251void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
307{ 252{
308 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); 253 struct pci_dev *pci_dev = vp_dev->pci_dev;
309
310 unregister_virtio_device(&vp_dev->vdev);
311 254
312 vp_del_vqs(&vp_dev->vdev);
313 pci_iounmap(pci_dev, vp_dev->ioaddr); 255 pci_iounmap(pci_dev, vp_dev->ioaddr);
314 pci_release_regions(pci_dev);
315 pci_disable_device(pci_dev);
316} 256}
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
new file mode 100644
index 000000000000..2aa38e59db2e
--- /dev/null
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -0,0 +1,695 @@
1/*
2 * Virtio PCI driver - modern (virtio 1.0) device support
3 *
4 * This module allows virtio devices to be used over a virtual PCI device.
5 * This can be used with QEMU based VMMs like KVM or Xen.
6 *
7 * Copyright IBM Corp. 2007
8 * Copyright Red Hat, Inc. 2014
9 *
10 * Authors:
11 * Anthony Liguori <aliguori@us.ibm.com>
12 * Rusty Russell <rusty@rustcorp.com.au>
13 * Michael S. Tsirkin <mst@redhat.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2 or later.
16 * See the COPYING file in the top-level directory.
17 *
18 */
19
20#define VIRTIO_PCI_NO_LEGACY
21#include "virtio_pci_common.h"
22
23static void __iomem *map_capability(struct pci_dev *dev, int off,
24 size_t minlen,
25 u32 align,
26 u32 start, u32 size,
27 size_t *len)
28{
29 u8 bar;
30 u32 offset, length;
31 void __iomem *p;
32
33 pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
34 bar),
35 &bar);
36 pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
37 &offset);
38 pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
39 &length);
40
41 if (length <= start) {
42 dev_err(&dev->dev,
43 "virtio_pci: bad capability len %u (>%u expected)\n",
44 length, start);
45 return NULL;
46 }
47
48 if (length - start < minlen) {
49 dev_err(&dev->dev,
50 "virtio_pci: bad capability len %u (>=%zu expected)\n",
51 length, minlen);
52 return NULL;
53 }
54
55 length -= start;
56
57 if (start + offset < offset) {
58 dev_err(&dev->dev,
59 "virtio_pci: map wrap-around %u+%u\n",
60 start, offset);
61 return NULL;
62 }
63
64 offset += start;
65
66 if (offset & (align - 1)) {
67 dev_err(&dev->dev,
68 "virtio_pci: offset %u not aligned to %u\n",
69 offset, align);
70 return NULL;
71 }
72
73 if (length > size)
74 length = size;
75
76 if (len)
77 *len = length;
78
79 if (minlen + offset < minlen ||
80 minlen + offset > pci_resource_len(dev, bar)) {
81 dev_err(&dev->dev,
82 "virtio_pci: map virtio %zu@%u "
83 "out of range on bar %i length %lu\n",
84 minlen, offset,
85 bar, (unsigned long)pci_resource_len(dev, bar));
86 return NULL;
87 }
88
89 p = pci_iomap_range(dev, bar, offset, length);
90 if (!p)
91 dev_err(&dev->dev,
92 "virtio_pci: unable to map virtio %u@%u on bar %i\n",
93 length, offset, bar);
94 return p;
95}
96
97static void iowrite64_twopart(u64 val, __le32 __iomem *lo, __le32 __iomem *hi)
98{
99 iowrite32((u32)val, lo);
100 iowrite32(val >> 32, hi);
101}
102
103/* virtio config->get_features() implementation */
104static u64 vp_get_features(struct virtio_device *vdev)
105{
106 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
107 u64 features;
108
109 iowrite32(0, &vp_dev->common->device_feature_select);
110 features = ioread32(&vp_dev->common->device_feature);
111 iowrite32(1, &vp_dev->common->device_feature_select);
112 features |= ((u64)ioread32(&vp_dev->common->device_feature) << 32);
113
114 return features;
115}
116
117/* virtio config->finalize_features() implementation */
118static int vp_finalize_features(struct virtio_device *vdev)
119{
120 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
121
122 /* Give virtio_ring a chance to accept features. */
123 vring_transport_features(vdev);
124
125 if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
126 dev_err(&vdev->dev, "virtio: device uses modern interface "
127 "but does not have VIRTIO_F_VERSION_1\n");
128 return -EINVAL;
129 }
130
131 iowrite32(0, &vp_dev->common->guest_feature_select);
132 iowrite32((u32)vdev->features, &vp_dev->common->guest_feature);
133 iowrite32(1, &vp_dev->common->guest_feature_select);
134 iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature);
135
136 return 0;
137}
138
139/* virtio config->get() implementation */
140static void vp_get(struct virtio_device *vdev, unsigned offset,
141 void *buf, unsigned len)
142{
143 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
144 u8 b;
145 __le16 w;
146 __le32 l;
147
148 BUG_ON(offset + len > vp_dev->device_len);
149
150 switch (len) {
151 case 1:
152 b = ioread8(vp_dev->device + offset);
153 memcpy(buf, &b, sizeof b);
154 break;
155 case 2:
156 w = cpu_to_le16(ioread16(vp_dev->device + offset));
157 memcpy(buf, &w, sizeof w);
158 break;
159 case 4:
160 l = cpu_to_le32(ioread32(vp_dev->device + offset));
161 memcpy(buf, &l, sizeof l);
162 break;
163 case 8:
164 l = cpu_to_le32(ioread32(vp_dev->device + offset));
165 memcpy(buf, &l, sizeof l);
166 l = cpu_to_le32(ioread32(vp_dev->device + offset + sizeof l));
167 memcpy(buf + sizeof l, &l, sizeof l);
168 break;
169 default:
170 BUG();
171 }
172}
173
174/* the config->set() implementation. it's symmetric to the config->get()
175 * implementation */
176static void vp_set(struct virtio_device *vdev, unsigned offset,
177 const void *buf, unsigned len)
178{
179 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
180 u8 b;
181 __le16 w;
182 __le32 l;
183
184 BUG_ON(offset + len > vp_dev->device_len);
185
186 switch (len) {
187 case 1:
188 memcpy(&b, buf, sizeof b);
189 iowrite8(b, vp_dev->device + offset);
190 break;
191 case 2:
192 memcpy(&w, buf, sizeof w);
193 iowrite16(le16_to_cpu(w), vp_dev->device + offset);
194 break;
195 case 4:
196 memcpy(&l, buf, sizeof l);
197 iowrite32(le32_to_cpu(l), vp_dev->device + offset);
198 break;
199 case 8:
200 memcpy(&l, buf, sizeof l);
201 iowrite32(le32_to_cpu(l), vp_dev->device + offset);
202 memcpy(&l, buf + sizeof l, sizeof l);
203 iowrite32(le32_to_cpu(l), vp_dev->device + offset + sizeof l);
204 break;
205 default:
206 BUG();
207 }
208}
209
210static u32 vp_generation(struct virtio_device *vdev)
211{
212 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
213 return ioread8(&vp_dev->common->config_generation);
214}
215
216/* config->{get,set}_status() implementations */
217static u8 vp_get_status(struct virtio_device *vdev)
218{
219 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
220 return ioread8(&vp_dev->common->device_status);
221}
222
223static void vp_set_status(struct virtio_device *vdev, u8 status)
224{
225 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
226 /* We should never be setting status to 0. */
227 BUG_ON(status == 0);
228 iowrite8(status, &vp_dev->common->device_status);
229}
230
231static void vp_reset(struct virtio_device *vdev)
232{
233 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
234 /* 0 status means a reset. */
235 iowrite8(0, &vp_dev->common->device_status);
236 /* Flush out the status write, and flush in device writes,
237 * including MSI-X interrupts, if any. */
238 ioread8(&vp_dev->common->device_status);
239 /* Flush pending VQ/configuration callbacks. */
240 vp_synchronize_vectors(vdev);
241}
242
243static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
244{
245 /* Setup the vector used for configuration events */
246 iowrite16(vector, &vp_dev->common->msix_config);
247 /* Verify we had enough resources to assign the vector */
248 /* Will also flush the write out to device */
249 return ioread16(&vp_dev->common->msix_config);
250}
251
252static size_t vring_pci_size(u16 num)
253{
254 /* We only need a cacheline separation. */
255 return PAGE_ALIGN(vring_size(num, SMP_CACHE_BYTES));
256}
257
258static void *alloc_virtqueue_pages(int *num)
259{
260 void *pages;
261
262 /* TODO: allocate each queue chunk individually */
263 for (; *num && vring_pci_size(*num) > PAGE_SIZE; *num /= 2) {
264 pages = alloc_pages_exact(vring_pci_size(*num),
265 GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
266 if (pages)
267 return pages;
268 }
269
270 if (!*num)
271 return NULL;
272
273 /* Try to get a single page. You are my only hope! */
274 return alloc_pages_exact(vring_pci_size(*num), GFP_KERNEL|__GFP_ZERO);
275}
276
277static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
278 struct virtio_pci_vq_info *info,
279 unsigned index,
280 void (*callback)(struct virtqueue *vq),
281 const char *name,
282 u16 msix_vec)
283{
284 struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
285 struct virtqueue *vq;
286 u16 num, off;
287 int err;
288
289 if (index >= ioread16(&cfg->num_queues))
290 return ERR_PTR(-ENOENT);
291
292 /* Select the queue we're interested in */
293 iowrite16(index, &cfg->queue_select);
294
295 /* Check if queue is either not available or already active. */
296 num = ioread16(&cfg->queue_size);
297 if (!num || ioread16(&cfg->queue_enable))
298 return ERR_PTR(-ENOENT);
299
300 if (num & (num - 1)) {
301 dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num);
302 return ERR_PTR(-EINVAL);
303 }
304
305 /* get offset of notification word for this vq */
306 off = ioread16(&cfg->queue_notify_off);
307
308 info->num = num;
309 info->msix_vector = msix_vec;
310
311 info->queue = alloc_virtqueue_pages(&info->num);
312 if (info->queue == NULL)
313 return ERR_PTR(-ENOMEM);
314
315 /* create the vring */
316 vq = vring_new_virtqueue(index, info->num,
317 SMP_CACHE_BYTES, &vp_dev->vdev,
318 true, info->queue, vp_notify, callback, name);
319 if (!vq) {
320 err = -ENOMEM;
321 goto err_new_queue;
322 }
323
324 /* activate the queue */
325 iowrite16(num, &cfg->queue_size);
326 iowrite64_twopart(virt_to_phys(info->queue),
327 &cfg->queue_desc_lo, &cfg->queue_desc_hi);
328 iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)),
329 &cfg->queue_avail_lo, &cfg->queue_avail_hi);
330 iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)),
331 &cfg->queue_used_lo, &cfg->queue_used_hi);
332
333 if (vp_dev->notify_base) {
334 /* offset should not wrap */
335 if ((u64)off * vp_dev->notify_offset_multiplier + 2
336 > vp_dev->notify_len) {
337 dev_warn(&vp_dev->pci_dev->dev,
338 "bad notification offset %u (x %u) "
339 "for queue %u > %zd",
340 off, vp_dev->notify_offset_multiplier,
341 index, vp_dev->notify_len);
342 err = -EINVAL;
343 goto err_map_notify;
344 }
345 vq->priv = (void __force *)vp_dev->notify_base +
346 off * vp_dev->notify_offset_multiplier;
347 } else {
348 vq->priv = (void __force *)map_capability(vp_dev->pci_dev,
349 vp_dev->notify_map_cap, 2, 2,
350 off * vp_dev->notify_offset_multiplier, 2,
351 NULL);
352 }
353
354 if (!vq->priv) {
355 err = -ENOMEM;
356 goto err_map_notify;
357 }
358
359 if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
360 iowrite16(msix_vec, &cfg->queue_msix_vector);
361 msix_vec = ioread16(&cfg->queue_msix_vector);
362 if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
363 err = -EBUSY;
364 goto err_assign_vector;
365 }
366 }
367
368 return vq;
369
370err_assign_vector:
371 if (!vp_dev->notify_base)
372 pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv);
373err_map_notify:
374 vring_del_virtqueue(vq);
375err_new_queue:
376 free_pages_exact(info->queue, vring_pci_size(info->num));
377 return ERR_PTR(err);
378}
379
380static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
381 struct virtqueue *vqs[],
382 vq_callback_t *callbacks[],
383 const char *names[])
384{
385 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
386 struct virtqueue *vq;
387 int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names);
388
389 if (rc)
390 return rc;
391
392 /* Select and activate all queues. Has to be done last: once we do
393 * this, there's no way to go back except reset.
394 */
395 list_for_each_entry(vq, &vdev->vqs, list) {
396 iowrite16(vq->index, &vp_dev->common->queue_select);
397 iowrite16(1, &vp_dev->common->queue_enable);
398 }
399
400 return 0;
401}
402
403static void del_vq(struct virtio_pci_vq_info *info)
404{
405 struct virtqueue *vq = info->vq;
406 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
407
408 iowrite16(vq->index, &vp_dev->common->queue_select);
409
410 if (vp_dev->msix_enabled) {
411 iowrite16(VIRTIO_MSI_NO_VECTOR,
412 &vp_dev->common->queue_msix_vector);
413 /* Flush the write out to device */
414 ioread16(&vp_dev->common->queue_msix_vector);
415 }
416
417 if (!vp_dev->notify_base)
418 pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv);
419
420 vring_del_virtqueue(vq);
421
422 free_pages_exact(info->queue, vring_pci_size(info->num));
423}
424
425static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
426 .get = NULL,
427 .set = NULL,
428 .generation = vp_generation,
429 .get_status = vp_get_status,
430 .set_status = vp_set_status,
431 .reset = vp_reset,
432 .find_vqs = vp_modern_find_vqs,
433 .del_vqs = vp_del_vqs,
434 .get_features = vp_get_features,
435 .finalize_features = vp_finalize_features,
436 .bus_name = vp_bus_name,
437 .set_vq_affinity = vp_set_vq_affinity,
438};
439
440static const struct virtio_config_ops virtio_pci_config_ops = {
441 .get = vp_get,
442 .set = vp_set,
443 .generation = vp_generation,
444 .get_status = vp_get_status,
445 .set_status = vp_set_status,
446 .reset = vp_reset,
447 .find_vqs = vp_modern_find_vqs,
448 .del_vqs = vp_del_vqs,
449 .get_features = vp_get_features,
450 .finalize_features = vp_finalize_features,
451 .bus_name = vp_bus_name,
452 .set_vq_affinity = vp_set_vq_affinity,
453};
454
455/**
456 * virtio_pci_find_capability - walk capabilities to find device info.
457 * @dev: the pci device
458 * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
459 * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
460 *
461 * Returns offset of the capability, or 0.
462 */
463static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
464 u32 ioresource_types)
465{
466 int pos;
467
468 for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
469 pos > 0;
470 pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
471 u8 type, bar;
472 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
473 cfg_type),
474 &type);
475 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
476 bar),
477 &bar);
478
479 /* Ignore structures with reserved BAR values */
480 if (bar > 0x5)
481 continue;
482
483 if (type == cfg_type) {
484 if (pci_resource_len(dev, bar) &&
485 pci_resource_flags(dev, bar) & ioresource_types)
486 return pos;
487 }
488 }
489 return 0;
490}
491
492/* This is part of the ABI. Don't screw with it. */
493static inline void check_offsets(void)
494{
495 /* Note: disk space was harmed in compilation of this function. */
496 BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
497 offsetof(struct virtio_pci_cap, cap_vndr));
498 BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
499 offsetof(struct virtio_pci_cap, cap_next));
500 BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
501 offsetof(struct virtio_pci_cap, cap_len));
502 BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
503 offsetof(struct virtio_pci_cap, cfg_type));
504 BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
505 offsetof(struct virtio_pci_cap, bar));
506 BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
507 offsetof(struct virtio_pci_cap, offset));
508 BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
509 offsetof(struct virtio_pci_cap, length));
510 BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
511 offsetof(struct virtio_pci_notify_cap,
512 notify_off_multiplier));
513 BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
514 offsetof(struct virtio_pci_common_cfg,
515 device_feature_select));
516 BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
517 offsetof(struct virtio_pci_common_cfg, device_feature));
518 BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
519 offsetof(struct virtio_pci_common_cfg,
520 guest_feature_select));
521 BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
522 offsetof(struct virtio_pci_common_cfg, guest_feature));
523 BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
524 offsetof(struct virtio_pci_common_cfg, msix_config));
525 BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
526 offsetof(struct virtio_pci_common_cfg, num_queues));
527 BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
528 offsetof(struct virtio_pci_common_cfg, device_status));
529 BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
530 offsetof(struct virtio_pci_common_cfg, config_generation));
531 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
532 offsetof(struct virtio_pci_common_cfg, queue_select));
533 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
534 offsetof(struct virtio_pci_common_cfg, queue_size));
535 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
536 offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
537 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
538 offsetof(struct virtio_pci_common_cfg, queue_enable));
539 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
540 offsetof(struct virtio_pci_common_cfg, queue_notify_off));
541 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
542 offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
543 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
544 offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
545 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
546 offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
547 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
548 offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
549 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
550 offsetof(struct virtio_pci_common_cfg, queue_used_lo));
551 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
552 offsetof(struct virtio_pci_common_cfg, queue_used_hi));
553}
554
555/* the PCI probing function */
556int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
557{
558 struct pci_dev *pci_dev = vp_dev->pci_dev;
559 int err, common, isr, notify, device;
560 u32 notify_length;
561 u32 notify_offset;
562
563 check_offsets();
564
565 /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
566 if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
567 return -ENODEV;
568
569 if (pci_dev->device < 0x1040) {
570 /* Transitional devices: use the PCI subsystem device id as
571 * virtio device id, same as legacy driver always did.
572 */
573 vp_dev->vdev.id.device = pci_dev->subsystem_device;
574 } else {
575 /* Modern devices: simply use PCI device id, but start from 0x1040. */
576 vp_dev->vdev.id.device = pci_dev->device - 0x1040;
577 }
578 vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
579
580 if (virtio_device_is_legacy_only(vp_dev->vdev.id))
581 return -ENODEV;
582
583 /* check for a common config: if not, use legacy mode (bar 0). */
584 common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
585 IORESOURCE_IO | IORESOURCE_MEM);
586 if (!common) {
587 dev_info(&pci_dev->dev,
588 "virtio_pci: leaving for legacy driver\n");
589 return -ENODEV;
590 }
591
592 /* If common is there, these should be too... */
593 isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
594 IORESOURCE_IO | IORESOURCE_MEM);
595 notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
596 IORESOURCE_IO | IORESOURCE_MEM);
597 if (!isr || !notify) {
598 dev_err(&pci_dev->dev,
599 "virtio_pci: missing capabilities %i/%i/%i\n",
600 common, isr, notify);
601 return -EINVAL;
602 }
603
604 /* Device capability is only mandatory for devices that have
605 * device-specific configuration.
606 */
607 device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
608 IORESOURCE_IO | IORESOURCE_MEM);
609
610 err = -EINVAL;
611 vp_dev->common = map_capability(pci_dev, common,
612 sizeof(struct virtio_pci_common_cfg), 4,
613 0, sizeof(struct virtio_pci_common_cfg),
614 NULL);
615 if (!vp_dev->common)
616 goto err_map_common;
617 vp_dev->isr = map_capability(pci_dev, isr, sizeof(u8), 1,
618 0, 1,
619 NULL);
620 if (!vp_dev->isr)
621 goto err_map_isr;
622
623 /* Read notify_off_multiplier from config space. */
624 pci_read_config_dword(pci_dev,
625 notify + offsetof(struct virtio_pci_notify_cap,
626 notify_off_multiplier),
627 &vp_dev->notify_offset_multiplier);
628 /* Read notify length and offset from config space. */
629 pci_read_config_dword(pci_dev,
630 notify + offsetof(struct virtio_pci_notify_cap,
631 cap.length),
632 &notify_length);
633
634 pci_read_config_dword(pci_dev,
635 notify + offsetof(struct virtio_pci_notify_cap,
636 cap.length),
637 &notify_offset);
638
639 /* We don't know how many VQs we'll map, ahead of the time.
640 * If notify length is small, map it all now.
641 * Otherwise, map each VQ individually later.
642 */
643 if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
644 vp_dev->notify_base = map_capability(pci_dev, notify, 2, 2,
645 0, notify_length,
646 &vp_dev->notify_len);
647 if (!vp_dev->notify_base)
648 goto err_map_notify;
649 } else {
650 vp_dev->notify_map_cap = notify;
651 }
652
653 /* Again, we don't know how much we should map, but PAGE_SIZE
654 * is more than enough for all existing devices.
655 */
656 if (device) {
657 vp_dev->device = map_capability(pci_dev, device, 0, 4,
658 0, PAGE_SIZE,
659 &vp_dev->device_len);
660 if (!vp_dev->device)
661 goto err_map_device;
662
663 vp_dev->vdev.config = &virtio_pci_config_ops;
664 } else {
665 vp_dev->vdev.config = &virtio_pci_config_nodev_ops;
666 }
667
668 vp_dev->config_vector = vp_config_vector;
669 vp_dev->setup_vq = setup_vq;
670 vp_dev->del_vq = del_vq;
671
672 return 0;
673
674err_map_device:
675 if (vp_dev->notify_base)
676 pci_iounmap(pci_dev, vp_dev->notify_base);
677err_map_notify:
678 pci_iounmap(pci_dev, vp_dev->isr);
679err_map_isr:
680 pci_iounmap(pci_dev, vp_dev->common);
681err_map_common:
682 return err;
683}
684
685void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
686{
687 struct pci_dev *pci_dev = vp_dev->pci_dev;
688
689 if (vp_dev->device)
690 pci_iounmap(pci_dev, vp_dev->device);
691 if (vp_dev->notify_base)
692 pci_iounmap(pci_dev, vp_dev->notify_base);
693 pci_iounmap(pci_dev, vp_dev->isr);
694 pci_iounmap(pci_dev, vp_dev->common);
695}
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 00ec6b3f96b2..096b857e7b75 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -54,8 +54,7 @@
54#define END_USE(vq) 54#define END_USE(vq)
55#endif 55#endif
56 56
57struct vring_virtqueue 57struct vring_virtqueue {
58{
59 struct virtqueue vq; 58 struct virtqueue vq;
60 59
61 /* Actual memory layout for this queue */ 60 /* Actual memory layout for this queue */
@@ -245,14 +244,14 @@ static inline int virtqueue_add(struct virtqueue *_vq,
245 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) + 1); 244 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) + 1);
246 vq->num_added++; 245 vq->num_added++;
247 246
247 pr_debug("Added buffer head %i to %p\n", head, vq);
248 END_USE(vq);
249
248 /* This is very unlikely, but theoretically possible. Kick 250 /* This is very unlikely, but theoretically possible. Kick
249 * just in case. */ 251 * just in case. */
250 if (unlikely(vq->num_added == (1 << 16) - 1)) 252 if (unlikely(vq->num_added == (1 << 16) - 1))
251 virtqueue_kick(_vq); 253 virtqueue_kick(_vq);
252 254
253 pr_debug("Added buffer head %i to %p\n", head, vq);
254 END_USE(vq);
255
256 return 0; 255 return 0;
257} 256}
258 257
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 08f41add1461..16f202350997 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -505,6 +505,16 @@ config MESON_WATCHDOG
505 To compile this driver as a module, choose M here: the 505 To compile this driver as a module, choose M here: the
506 module will be called meson_wdt. 506 module will be called meson_wdt.
507 507
508config MEDIATEK_WATCHDOG
509 tristate "Mediatek SoCs watchdog support"
510 depends on ARCH_MEDIATEK
511 select WATCHDOG_CORE
512 help
513 Say Y here to include support for the watchdog timer
514 in Mediatek SoCs.
515 To compile this driver as a module, choose M here: the
516 module will be called mtk_wdt.
517
508# AVR32 Architecture 518# AVR32 Architecture
509 519
510config AT32AP700X_WDT 520config AT32AP700X_WDT
@@ -1005,6 +1015,8 @@ config W83627HF_WDT
1005 NCT6775 1015 NCT6775
1006 NCT6776 1016 NCT6776
1007 NCT6779 1017 NCT6779
1018 NCT6791
1019 NCT6792
1008 1020
1009 This watchdog simply watches your kernel to make sure it doesn't 1021 This watchdog simply watches your kernel to make sure it doesn't
1010 freeze, and if it does, it reboots your computer after a certain 1022 freeze, and if it does, it reboots your computer after a certain
@@ -1101,7 +1113,7 @@ config ATH79_WDT
1101 1113
1102config BCM47XX_WDT 1114config BCM47XX_WDT
1103 tristate "Broadcom BCM47xx Watchdog Timer" 1115 tristate "Broadcom BCM47xx Watchdog Timer"
1104 depends on BCM47XX 1116 depends on BCM47XX || ARCH_BCM_5301X
1105 select WATCHDOG_CORE 1117 select WATCHDOG_CORE
1106 help 1118 help
1107 Hardware driver for the Broadcom BCM47xx Watchdog Timer. 1119 Hardware driver for the Broadcom BCM47xx Watchdog Timer.
@@ -1235,6 +1247,17 @@ config BCM_KONA_WDT_DEBUG
1235 1247
1236 If in doubt, say 'N'. 1248 If in doubt, say 'N'.
1237 1249
1250config IMGPDC_WDT
1251 tristate "Imagination Technologies PDC Watchdog Timer"
1252 depends on HAS_IOMEM
1253 depends on METAG || MIPS || COMPILE_TEST
1254 help
1255 Driver for Imagination Technologies PowerDown Controller
1256 Watchdog Timer.
1257
1258 To compile this driver as a loadable module, choose M here.
1259 The module will be called imgpdc_wdt.
1260
1238config LANTIQ_WDT 1261config LANTIQ_WDT
1239 tristate "Lantiq SoC watchdog" 1262 tristate "Lantiq SoC watchdog"
1240 depends on LANTIQ 1263 depends on LANTIQ
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index c569ec8f8a76..5c19294d1c30 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_QCOM_WDT) += qcom-wdt.o
63obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o 63obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o
64obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o 64obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o
65obj-$(CONFIG_MESON_WATCHDOG) += meson_wdt.o 65obj-$(CONFIG_MESON_WATCHDOG) += meson_wdt.o
66obj-$(CONFIG_MEDIATEK_WATCHDOG) += mtk_wdt.o
66 67
67# AVR32 Architecture 68# AVR32 Architecture
68obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o 69obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
@@ -142,6 +143,7 @@ obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o
142octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o 143octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o
143obj-$(CONFIG_LANTIQ_WDT) += lantiq_wdt.o 144obj-$(CONFIG_LANTIQ_WDT) += lantiq_wdt.o
144obj-$(CONFIG_RALINK_WDT) += rt2880_wdt.o 145obj-$(CONFIG_RALINK_WDT) += rt2880_wdt.o
146obj-$(CONFIG_IMGPDC_WDT) += imgpdc_wdt.o
145 147
146# PARISC Architecture 148# PARISC Architecture
147 149
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index 9816485f6825..b28a072abf78 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -169,6 +169,17 @@ static int bcm47xx_wdt_notify_sys(struct notifier_block *this,
169 return NOTIFY_DONE; 169 return NOTIFY_DONE;
170} 170}
171 171
172static int bcm47xx_wdt_restart(struct notifier_block *this, unsigned long mode,
173 void *cmd)
174{
175 struct bcm47xx_wdt *wdt;
176
177 wdt = container_of(this, struct bcm47xx_wdt, restart_handler);
178 wdt->timer_set(wdt, 1);
179
180 return NOTIFY_DONE;
181}
182
172static struct watchdog_ops bcm47xx_wdt_soft_ops = { 183static struct watchdog_ops bcm47xx_wdt_soft_ops = {
173 .owner = THIS_MODULE, 184 .owner = THIS_MODULE,
174 .start = bcm47xx_wdt_soft_start, 185 .start = bcm47xx_wdt_soft_start,
@@ -209,15 +220,23 @@ static int bcm47xx_wdt_probe(struct platform_device *pdev)
209 if (ret) 220 if (ret)
210 goto err_timer; 221 goto err_timer;
211 222
212 ret = watchdog_register_device(&wdt->wdd); 223 wdt->restart_handler.notifier_call = &bcm47xx_wdt_restart;
224 wdt->restart_handler.priority = 64;
225 ret = register_restart_handler(&wdt->restart_handler);
213 if (ret) 226 if (ret)
214 goto err_notifier; 227 goto err_notifier;
215 228
229 ret = watchdog_register_device(&wdt->wdd);
230 if (ret)
231 goto err_handler;
232
216 dev_info(&pdev->dev, "BCM47xx Watchdog Timer enabled (%d seconds%s%s)\n", 233 dev_info(&pdev->dev, "BCM47xx Watchdog Timer enabled (%d seconds%s%s)\n",
217 timeout, nowayout ? ", nowayout" : "", 234 timeout, nowayout ? ", nowayout" : "",
218 soft ? ", Software Timer" : ""); 235 soft ? ", Software Timer" : "");
219 return 0; 236 return 0;
220 237
238err_handler:
239 unregister_restart_handler(&wdt->restart_handler);
221err_notifier: 240err_notifier:
222 unregister_reboot_notifier(&wdt->notifier); 241 unregister_reboot_notifier(&wdt->notifier);
223err_timer: 242err_timer:
diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c
index 2cd6b2c2dd2a..e2fe2ebdebd4 100644
--- a/drivers/watchdog/da9063_wdt.c
+++ b/drivers/watchdog/da9063_wdt.c
@@ -20,6 +20,7 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/mfd/da9063/registers.h> 21#include <linux/mfd/da9063/registers.h>
22#include <linux/mfd/da9063/core.h> 22#include <linux/mfd/da9063/core.h>
23#include <linux/reboot.h>
23#include <linux/regmap.h> 24#include <linux/regmap.h>
24 25
25/* 26/*
@@ -38,6 +39,7 @@ static const unsigned int wdt_timeout[] = { 0, 2, 4, 8, 16, 32, 65, 131 };
38struct da9063_watchdog { 39struct da9063_watchdog {
39 struct da9063 *da9063; 40 struct da9063 *da9063;
40 struct watchdog_device wdtdev; 41 struct watchdog_device wdtdev;
42 struct notifier_block restart_handler;
41}; 43};
42 44
43static unsigned int da9063_wdt_timeout_to_sel(unsigned int secs) 45static unsigned int da9063_wdt_timeout_to_sel(unsigned int secs)
@@ -119,6 +121,23 @@ static int da9063_wdt_set_timeout(struct watchdog_device *wdd,
119 return ret; 121 return ret;
120} 122}
121 123
124static int da9063_wdt_restart_handler(struct notifier_block *this,
125 unsigned long mode, void *cmd)
126{
127 struct da9063_watchdog *wdt = container_of(this,
128 struct da9063_watchdog,
129 restart_handler);
130 int ret;
131
132 ret = regmap_write(wdt->da9063->regmap, DA9063_REG_CONTROL_F,
133 DA9063_SHUTDOWN);
134 if (ret)
135 dev_alert(wdt->da9063->dev, "Failed to shutdown (err = %d)\n",
136 ret);
137
138 return NOTIFY_DONE;
139}
140
122static const struct watchdog_info da9063_watchdog_info = { 141static const struct watchdog_info da9063_watchdog_info = {
123 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, 142 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
124 .identity = "DA9063 Watchdog", 143 .identity = "DA9063 Watchdog",
@@ -163,14 +182,25 @@ static int da9063_wdt_probe(struct platform_device *pdev)
163 dev_set_drvdata(&pdev->dev, wdt); 182 dev_set_drvdata(&pdev->dev, wdt);
164 183
165 ret = watchdog_register_device(&wdt->wdtdev); 184 ret = watchdog_register_device(&wdt->wdtdev);
185 if (ret)
186 return ret;
166 187
167 return ret; 188 wdt->restart_handler.notifier_call = da9063_wdt_restart_handler;
189 wdt->restart_handler.priority = 128;
190 ret = register_restart_handler(&wdt->restart_handler);
191 if (ret)
192 dev_err(wdt->da9063->dev,
193 "Failed to register restart handler (err = %d)\n", ret);
194
195 return 0;
168} 196}
169 197
170static int da9063_wdt_remove(struct platform_device *pdev) 198static int da9063_wdt_remove(struct platform_device *pdev)
171{ 199{
172 struct da9063_watchdog *wdt = dev_get_drvdata(&pdev->dev); 200 struct da9063_watchdog *wdt = dev_get_drvdata(&pdev->dev);
173 201
202 unregister_restart_handler(&wdt->restart_handler);
203
174 watchdog_unregister_device(&wdt->wdtdev); 204 watchdog_unregister_device(&wdt->wdtdev);
175 205
176 return 0; 206 return 0;
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index b34a2e4e4e43..d0bb9499d12c 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -51,6 +51,8 @@
51/* The maximum TOP (timeout period) value that can be set in the watchdog. */ 51/* The maximum TOP (timeout period) value that can be set in the watchdog. */
52#define DW_WDT_MAX_TOP 15 52#define DW_WDT_MAX_TOP 15
53 53
54#define DW_WDT_DEFAULT_SECONDS 30
55
54static bool nowayout = WATCHDOG_NOWAYOUT; 56static bool nowayout = WATCHDOG_NOWAYOUT;
55module_param(nowayout, bool, 0); 57module_param(nowayout, bool, 0);
56MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " 58MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
@@ -96,6 +98,12 @@ static inline void dw_wdt_set_next_heartbeat(void)
96 dw_wdt.next_heartbeat = jiffies + dw_wdt_get_top() * HZ; 98 dw_wdt.next_heartbeat = jiffies + dw_wdt_get_top() * HZ;
97} 99}
98 100
101static void dw_wdt_keepalive(void)
102{
103 writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt.regs +
104 WDOG_COUNTER_RESTART_REG_OFFSET);
105}
106
99static int dw_wdt_set_top(unsigned top_s) 107static int dw_wdt_set_top(unsigned top_s)
100{ 108{
101 int i, top_val = DW_WDT_MAX_TOP; 109 int i, top_val = DW_WDT_MAX_TOP;
@@ -110,21 +118,27 @@ static int dw_wdt_set_top(unsigned top_s)
110 break; 118 break;
111 } 119 }
112 120
113 /* Set the new value in the watchdog. */ 121 /*
122 * Set the new value in the watchdog. Some versions of dw_wdt
123 * have have TOPINIT in the TIMEOUT_RANGE register (as per
124 * CP_WDT_DUAL_TOP in WDT_COMP_PARAMS_1). On those we
125 * effectively get a pat of the watchdog right here.
126 */
114 writel(top_val | top_val << WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT, 127 writel(top_val | top_val << WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT,
115 dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET); 128 dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
116 129
130 /*
131 * Add an explicit pat to handle versions of the watchdog that
132 * don't have TOPINIT. This won't hurt on versions that have
133 * it.
134 */
135 dw_wdt_keepalive();
136
117 dw_wdt_set_next_heartbeat(); 137 dw_wdt_set_next_heartbeat();
118 138
119 return dw_wdt_top_in_seconds(top_val); 139 return dw_wdt_top_in_seconds(top_val);
120} 140}
121 141
122static void dw_wdt_keepalive(void)
123{
124 writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt.regs +
125 WDOG_COUNTER_RESTART_REG_OFFSET);
126}
127
128static int dw_wdt_restart_handle(struct notifier_block *this, 142static int dw_wdt_restart_handle(struct notifier_block *this,
129 unsigned long mode, void *cmd) 143 unsigned long mode, void *cmd)
130{ 144{
@@ -167,9 +181,9 @@ static int dw_wdt_open(struct inode *inode, struct file *filp)
167 if (!dw_wdt_is_enabled()) { 181 if (!dw_wdt_is_enabled()) {
168 /* 182 /*
169 * The watchdog is not currently enabled. Set the timeout to 183 * The watchdog is not currently enabled. Set the timeout to
170 * the maximum and then start it. 184 * something reasonable and then start it.
171 */ 185 */
172 dw_wdt_set_top(DW_WDT_MAX_TOP); 186 dw_wdt_set_top(DW_WDT_DEFAULT_SECONDS);
173 writel(WDOG_CONTROL_REG_WDT_EN_MASK, 187 writel(WDOG_CONTROL_REG_WDT_EN_MASK,
174 dw_wdt.regs + WDOG_CONTROL_REG_OFFSET); 188 dw_wdt.regs + WDOG_CONTROL_REG_OFFSET);
175 } 189 }
diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c
index bbdb19b45332..cbc313d37c59 100644
--- a/drivers/watchdog/gpio_wdt.c
+++ b/drivers/watchdog/gpio_wdt.c
@@ -31,6 +31,8 @@ struct gpio_wdt_priv {
31 int gpio; 31 int gpio;
32 bool active_low; 32 bool active_low;
33 bool state; 33 bool state;
34 bool always_running;
35 bool armed;
34 unsigned int hw_algo; 36 unsigned int hw_algo;
35 unsigned int hw_margin; 37 unsigned int hw_margin;
36 unsigned long last_jiffies; 38 unsigned long last_jiffies;
@@ -48,14 +50,20 @@ static void gpio_wdt_disable(struct gpio_wdt_priv *priv)
48 gpio_direction_input(priv->gpio); 50 gpio_direction_input(priv->gpio);
49} 51}
50 52
51static int gpio_wdt_start(struct watchdog_device *wdd) 53static void gpio_wdt_start_impl(struct gpio_wdt_priv *priv)
52{ 54{
53 struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
54
55 priv->state = priv->active_low; 55 priv->state = priv->active_low;
56 gpio_direction_output(priv->gpio, priv->state); 56 gpio_direction_output(priv->gpio, priv->state);
57 priv->last_jiffies = jiffies; 57 priv->last_jiffies = jiffies;
58 mod_timer(&priv->timer, priv->last_jiffies + priv->hw_margin); 58 mod_timer(&priv->timer, priv->last_jiffies + priv->hw_margin);
59}
60
61static int gpio_wdt_start(struct watchdog_device *wdd)
62{
63 struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
64
65 gpio_wdt_start_impl(priv);
66 priv->armed = true;
59 67
60 return 0; 68 return 0;
61} 69}
@@ -64,8 +72,11 @@ static int gpio_wdt_stop(struct watchdog_device *wdd)
64{ 72{
65 struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd); 73 struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
66 74
67 mod_timer(&priv->timer, 0); 75 priv->armed = false;
68 gpio_wdt_disable(priv); 76 if (!priv->always_running) {
77 mod_timer(&priv->timer, 0);
78 gpio_wdt_disable(priv);
79 }
69 80
70 return 0; 81 return 0;
71} 82}
@@ -91,8 +102,8 @@ static void gpio_wdt_hwping(unsigned long data)
91 struct watchdog_device *wdd = (struct watchdog_device *)data; 102 struct watchdog_device *wdd = (struct watchdog_device *)data;
92 struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd); 103 struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
93 104
94 if (time_after(jiffies, priv->last_jiffies + 105 if (priv->armed && time_after(jiffies, priv->last_jiffies +
95 msecs_to_jiffies(wdd->timeout * 1000))) { 106 msecs_to_jiffies(wdd->timeout * 1000))) {
96 dev_crit(wdd->dev, "Timer expired. System will reboot soon!\n"); 107 dev_crit(wdd->dev, "Timer expired. System will reboot soon!\n");
97 return; 108 return;
98 } 109 }
@@ -197,6 +208,9 @@ static int gpio_wdt_probe(struct platform_device *pdev)
197 /* Use safe value (1/2 of real timeout) */ 208 /* Use safe value (1/2 of real timeout) */
198 priv->hw_margin = msecs_to_jiffies(hw_margin / 2); 209 priv->hw_margin = msecs_to_jiffies(hw_margin / 2);
199 210
211 priv->always_running = of_property_read_bool(pdev->dev.of_node,
212 "always-running");
213
200 watchdog_set_drvdata(&priv->wdd, priv); 214 watchdog_set_drvdata(&priv->wdd, priv);
201 215
202 priv->wdd.info = &gpio_wdt_ident; 216 priv->wdd.info = &gpio_wdt_ident;
@@ -216,8 +230,15 @@ static int gpio_wdt_probe(struct platform_device *pdev)
216 priv->notifier.notifier_call = gpio_wdt_notify_sys; 230 priv->notifier.notifier_call = gpio_wdt_notify_sys;
217 ret = register_reboot_notifier(&priv->notifier); 231 ret = register_reboot_notifier(&priv->notifier);
218 if (ret) 232 if (ret)
219 watchdog_unregister_device(&priv->wdd); 233 goto error_unregister;
220 234
235 if (priv->always_running)
236 gpio_wdt_start_impl(priv);
237
238 return 0;
239
240error_unregister:
241 watchdog_unregister_device(&priv->wdd);
221 return ret; 242 return ret;
222} 243}
223 244
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 75d2243b94f5..ada3e44f9932 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -745,7 +745,7 @@ static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
745 745
746 dev_info(&dev->dev, 746 dev_info(&dev->dev,
747 "HP Watchdog Timer Driver: NMI decoding initialized" 747 "HP Watchdog Timer Driver: NMI decoding initialized"
748 ", allow kernel dump: %s (default = 0/OFF)\n", 748 ", allow kernel dump: %s (default = 1/ON)\n",
749 (allow_kdump == 0) ? "OFF" : "ON"); 749 (allow_kdump == 0) ? "OFF" : "ON");
750 return 0; 750 return 0;
751 751
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c
new file mode 100644
index 000000000000..c8def68d9e4c
--- /dev/null
+++ b/drivers/watchdog/imgpdc_wdt.c
@@ -0,0 +1,289 @@
1/*
2 * Imagination Technologies PowerDown Controller Watchdog Timer.
3 *
4 * Copyright (c) 2014 Imagination Technologies Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * Based on drivers/watchdog/sunxi_wdt.c Copyright (c) 2013 Carlo Caione
11 * 2012 Henrik Nordstrom
12 */
13
14#include <linux/clk.h>
15#include <linux/io.h>
16#include <linux/log2.h>
17#include <linux/module.h>
18#include <linux/platform_device.h>
19#include <linux/slab.h>
20#include <linux/watchdog.h>
21
22/* registers */
23#define PDC_WDT_SOFT_RESET 0x00
24#define PDC_WDT_CONFIG 0x04
25 #define PDC_WDT_CONFIG_ENABLE BIT(31)
26 #define PDC_WDT_CONFIG_DELAY_MASK 0x1f
27
28#define PDC_WDT_TICKLE1 0x08
29#define PDC_WDT_TICKLE1_MAGIC 0xabcd1234
30#define PDC_WDT_TICKLE2 0x0c
31#define PDC_WDT_TICKLE2_MAGIC 0x4321dcba
32
33#define PDC_WDT_TICKLE_STATUS_MASK 0x7
34#define PDC_WDT_TICKLE_STATUS_SHIFT 0
35#define PDC_WDT_TICKLE_STATUS_HRESET 0x0 /* Hard reset */
36#define PDC_WDT_TICKLE_STATUS_TIMEOUT 0x1 /* Timeout */
37#define PDC_WDT_TICKLE_STATUS_TICKLE 0x2 /* Tickled incorrectly */
38#define PDC_WDT_TICKLE_STATUS_SRESET 0x3 /* Soft reset */
39#define PDC_WDT_TICKLE_STATUS_USER 0x4 /* User reset */
40
41/* Timeout values are in seconds */
42#define PDC_WDT_MIN_TIMEOUT 1
43#define PDC_WDT_DEF_TIMEOUT 64
44
45static int heartbeat;
46module_param(heartbeat, int, 0);
47MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. "
48 "(default = " __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")");
49
50static bool nowayout = WATCHDOG_NOWAYOUT;
51module_param(nowayout, bool, 0);
52MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
53 "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
54
55struct pdc_wdt_dev {
56 struct watchdog_device wdt_dev;
57 struct clk *wdt_clk;
58 struct clk *sys_clk;
59 void __iomem *base;
60};
61
62static int pdc_wdt_keepalive(struct watchdog_device *wdt_dev)
63{
64 struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev);
65
66 writel(PDC_WDT_TICKLE1_MAGIC, wdt->base + PDC_WDT_TICKLE1);
67 writel(PDC_WDT_TICKLE2_MAGIC, wdt->base + PDC_WDT_TICKLE2);
68
69 return 0;
70}
71
72static int pdc_wdt_stop(struct watchdog_device *wdt_dev)
73{
74 unsigned int val;
75 struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev);
76
77 val = readl(wdt->base + PDC_WDT_CONFIG);
78 val &= ~PDC_WDT_CONFIG_ENABLE;
79 writel(val, wdt->base + PDC_WDT_CONFIG);
80
81 /* Must tickle to finish the stop */
82 pdc_wdt_keepalive(wdt_dev);
83
84 return 0;
85}
86
87static int pdc_wdt_set_timeout(struct watchdog_device *wdt_dev,
88 unsigned int new_timeout)
89{
90 unsigned int val;
91 struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev);
92 unsigned long clk_rate = clk_get_rate(wdt->wdt_clk);
93
94 wdt->wdt_dev.timeout = new_timeout;
95
96 val = readl(wdt->base + PDC_WDT_CONFIG) & ~PDC_WDT_CONFIG_DELAY_MASK;
97 val |= order_base_2(new_timeout * clk_rate) - 1;
98 writel(val, wdt->base + PDC_WDT_CONFIG);
99
100 return 0;
101}
102
103/* Start the watchdog timer (delay should already be set) */
104static int pdc_wdt_start(struct watchdog_device *wdt_dev)
105{
106 unsigned int val;
107 struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev);
108
109 val = readl(wdt->base + PDC_WDT_CONFIG);
110 val |= PDC_WDT_CONFIG_ENABLE;
111 writel(val, wdt->base + PDC_WDT_CONFIG);
112
113 return 0;
114}
115
116static struct watchdog_info pdc_wdt_info = {
117 .identity = "IMG PDC Watchdog",
118 .options = WDIOF_SETTIMEOUT |
119 WDIOF_KEEPALIVEPING |
120 WDIOF_MAGICCLOSE,
121};
122
123static const struct watchdog_ops pdc_wdt_ops = {
124 .owner = THIS_MODULE,
125 .start = pdc_wdt_start,
126 .stop = pdc_wdt_stop,
127 .ping = pdc_wdt_keepalive,
128 .set_timeout = pdc_wdt_set_timeout,
129};
130
131static int pdc_wdt_probe(struct platform_device *pdev)
132{
133 int ret, val;
134 unsigned long clk_rate;
135 struct resource *res;
136 struct pdc_wdt_dev *pdc_wdt;
137
138 pdc_wdt = devm_kzalloc(&pdev->dev, sizeof(*pdc_wdt), GFP_KERNEL);
139 if (!pdc_wdt)
140 return -ENOMEM;
141
142 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
143 pdc_wdt->base = devm_ioremap_resource(&pdev->dev, res);
144 if (IS_ERR(pdc_wdt->base))
145 return PTR_ERR(pdc_wdt->base);
146
147 pdc_wdt->sys_clk = devm_clk_get(&pdev->dev, "sys");
148 if (IS_ERR(pdc_wdt->sys_clk)) {
149 dev_err(&pdev->dev, "failed to get the sys clock\n");
150 return PTR_ERR(pdc_wdt->sys_clk);
151 }
152
153 pdc_wdt->wdt_clk = devm_clk_get(&pdev->dev, "wdt");
154 if (IS_ERR(pdc_wdt->wdt_clk)) {
155 dev_err(&pdev->dev, "failed to get the wdt clock\n");
156 return PTR_ERR(pdc_wdt->wdt_clk);
157 }
158
159 ret = clk_prepare_enable(pdc_wdt->sys_clk);
160 if (ret) {
161 dev_err(&pdev->dev, "could not prepare or enable sys clock\n");
162 return ret;
163 }
164
165 ret = clk_prepare_enable(pdc_wdt->wdt_clk);
166 if (ret) {
167 dev_err(&pdev->dev, "could not prepare or enable wdt clock\n");
168 goto disable_sys_clk;
169 }
170
171 /* We use the clock rate to calculate the max timeout */
172 clk_rate = clk_get_rate(pdc_wdt->wdt_clk);
173 if (clk_rate == 0) {
174 dev_err(&pdev->dev, "failed to get clock rate\n");
175 ret = -EINVAL;
176 goto disable_wdt_clk;
177 }
178
179 if (order_base_2(clk_rate) > PDC_WDT_CONFIG_DELAY_MASK + 1) {
180 dev_err(&pdev->dev, "invalid clock rate\n");
181 ret = -EINVAL;
182 goto disable_wdt_clk;
183 }
184
185 if (order_base_2(clk_rate) == 0)
186 pdc_wdt->wdt_dev.min_timeout = PDC_WDT_MIN_TIMEOUT + 1;
187 else
188 pdc_wdt->wdt_dev.min_timeout = PDC_WDT_MIN_TIMEOUT;
189
190 pdc_wdt->wdt_dev.info = &pdc_wdt_info;
191 pdc_wdt->wdt_dev.ops = &pdc_wdt_ops;
192 pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK;
193 pdc_wdt->wdt_dev.parent = &pdev->dev;
194
195 ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev);
196 if (ret < 0) {
197 pdc_wdt->wdt_dev.timeout = pdc_wdt->wdt_dev.max_timeout;
198 dev_warn(&pdev->dev,
199 "Initial timeout out of range! setting max timeout\n");
200 }
201
202 pdc_wdt_stop(&pdc_wdt->wdt_dev);
203
204 /* Find what caused the last reset */
205 val = readl(pdc_wdt->base + PDC_WDT_TICKLE1);
206 val = (val & PDC_WDT_TICKLE_STATUS_MASK) >> PDC_WDT_TICKLE_STATUS_SHIFT;
207 switch (val) {
208 case PDC_WDT_TICKLE_STATUS_TICKLE:
209 case PDC_WDT_TICKLE_STATUS_TIMEOUT:
210 pdc_wdt->wdt_dev.bootstatus |= WDIOF_CARDRESET;
211 dev_info(&pdev->dev,
212 "watchdog module last reset due to timeout\n");
213 break;
214 case PDC_WDT_TICKLE_STATUS_HRESET:
215 dev_info(&pdev->dev,
216 "watchdog module last reset due to hard reset\n");
217 break;
218 case PDC_WDT_TICKLE_STATUS_SRESET:
219 dev_info(&pdev->dev,
220 "watchdog module last reset due to soft reset\n");
221 break;
222 case PDC_WDT_TICKLE_STATUS_USER:
223 dev_info(&pdev->dev,
224 "watchdog module last reset due to user reset\n");
225 break;
226 default:
227 dev_info(&pdev->dev,
228 "contains an illegal status code (%08x)\n", val);
229 break;
230 }
231
232 watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout);
233
234 platform_set_drvdata(pdev, pdc_wdt);
235 watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
236
237 ret = watchdog_register_device(&pdc_wdt->wdt_dev);
238 if (ret)
239 goto disable_wdt_clk;
240
241 return 0;
242
243disable_wdt_clk:
244 clk_disable_unprepare(pdc_wdt->wdt_clk);
245disable_sys_clk:
246 clk_disable_unprepare(pdc_wdt->sys_clk);
247 return ret;
248}
249
250static void pdc_wdt_shutdown(struct platform_device *pdev)
251{
252 struct pdc_wdt_dev *pdc_wdt = platform_get_drvdata(pdev);
253
254 pdc_wdt_stop(&pdc_wdt->wdt_dev);
255}
256
257static int pdc_wdt_remove(struct platform_device *pdev)
258{
259 struct pdc_wdt_dev *pdc_wdt = platform_get_drvdata(pdev);
260
261 pdc_wdt_stop(&pdc_wdt->wdt_dev);
262 watchdog_unregister_device(&pdc_wdt->wdt_dev);
263 clk_disable_unprepare(pdc_wdt->wdt_clk);
264 clk_disable_unprepare(pdc_wdt->sys_clk);
265
266 return 0;
267}
268
269static const struct of_device_id pdc_wdt_match[] = {
270 { .compatible = "img,pdc-wdt" },
271 {}
272};
273MODULE_DEVICE_TABLE(of, pdc_wdt_match);
274
275static struct platform_driver pdc_wdt_driver = {
276 .driver = {
277 .name = "imgpdc-wdt",
278 .of_match_table = pdc_wdt_match,
279 },
280 .probe = pdc_wdt_probe,
281 .remove = pdc_wdt_remove,
282 .shutdown = pdc_wdt_shutdown,
283};
284module_platform_driver(pdc_wdt_driver);
285
286MODULE_AUTHOR("Jude Abraham <Jude.Abraham@imgtec.com>");
287MODULE_AUTHOR("Naidu Tellapati <Naidu.Tellapati@imgtec.com>");
288MODULE_DESCRIPTION("Imagination Technologies PDC Watchdog Timer Driver");
289MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 5142bbabe027..5e6d808d358a 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -205,7 +205,7 @@ static inline void imx2_wdt_ping_if_active(struct watchdog_device *wdog)
205 } 205 }
206} 206}
207 207
208static struct watchdog_ops imx2_wdt_ops = { 208static const struct watchdog_ops imx2_wdt_ops = {
209 .owner = THIS_MODULE, 209 .owner = THIS_MODULE,
210 .start = imx2_wdt_start, 210 .start = imx2_wdt_start,
211 .stop = imx2_wdt_stop, 211 .stop = imx2_wdt_stop,
@@ -213,7 +213,7 @@ static struct watchdog_ops imx2_wdt_ops = {
213 .set_timeout = imx2_wdt_set_timeout, 213 .set_timeout = imx2_wdt_set_timeout,
214}; 214};
215 215
216static struct regmap_config imx2_wdt_regmap_config = { 216static const struct regmap_config imx2_wdt_regmap_config = {
217 .reg_bits = 16, 217 .reg_bits = 16,
218 .reg_stride = 2, 218 .reg_stride = 2,
219 .val_bits = 16, 219 .val_bits = 16,
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index 0b93739c0106..e54839b12650 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -12,8 +12,8 @@
12 * http://www.ite.com.tw/ 12 * http://www.ite.com.tw/
13 * 13 *
14 * Support of the watchdog timers, which are available on 14 * Support of the watchdog timers, which are available on
15 * IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726 15 * IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726,
16 * and IT8728. 16 * IT8728 and IT8783.
17 * 17 *
18 * This program is free software; you can redistribute it and/or 18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License 19 * modify it under the terms of the GNU General Public License
@@ -87,6 +87,7 @@
87#define IT8721_ID 0x8721 87#define IT8721_ID 0x8721
88#define IT8726_ID 0x8726 /* the data sheet suggest wrongly 0x8716 */ 88#define IT8726_ID 0x8726 /* the data sheet suggest wrongly 0x8716 */
89#define IT8728_ID 0x8728 89#define IT8728_ID 0x8728
90#define IT8783_ID 0x8783
90 91
91/* GPIO Configuration Registers LDN=0x07 */ 92/* GPIO Configuration Registers LDN=0x07 */
92#define WDTCTRL 0x71 93#define WDTCTRL 0x71
@@ -633,6 +634,7 @@ static int __init it87_wdt_init(void)
633 case IT8720_ID: 634 case IT8720_ID:
634 case IT8721_ID: 635 case IT8721_ID:
635 case IT8728_ID: 636 case IT8728_ID:
637 case IT8783_ID:
636 max_units = 65535; 638 max_units = 65535;
637 try_gameport = 0; 639 try_gameport = 0;
638 break; 640 break;
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index 18e41afa4da3..4c2cc09c0c57 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -24,6 +24,7 @@
24#include <linux/clk.h> 24#include <linux/clk.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/of.h>
27 28
28#include <asm/mach-jz4740/timer.h> 29#include <asm/mach-jz4740/timer.h>
29 30
@@ -142,6 +143,14 @@ static const struct watchdog_ops jz4740_wdt_ops = {
142 .set_timeout = jz4740_wdt_set_timeout, 143 .set_timeout = jz4740_wdt_set_timeout,
143}; 144};
144 145
146#ifdef CONFIG_OF
147static const struct of_device_id jz4740_wdt_of_matches[] = {
148 { .compatible = "ingenic,jz4740-watchdog", },
149 { /* sentinel */ }
150};
151MODULE_DEVICE_TABLE(of, jz4740_wdt_of_matches)
152#endif
153
145static int jz4740_wdt_probe(struct platform_device *pdev) 154static int jz4740_wdt_probe(struct platform_device *pdev)
146{ 155{
147 struct jz4740_wdt_drvdata *drvdata; 156 struct jz4740_wdt_drvdata *drvdata;
@@ -211,6 +220,7 @@ static struct platform_driver jz4740_wdt_driver = {
211 .remove = jz4740_wdt_remove, 220 .remove = jz4740_wdt_remove,
212 .driver = { 221 .driver = {
213 .name = "jz4740-wdt", 222 .name = "jz4740-wdt",
223 .of_match_table = of_match_ptr(jz4740_wdt_of_matches),
214 }, 224 },
215}; 225};
216 226
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
new file mode 100644
index 000000000000..a87f6df6e85f
--- /dev/null
+++ b/drivers/watchdog/mtk_wdt.c
@@ -0,0 +1,251 @@
1/*
2 * Mediatek Watchdog Driver
3 *
4 * Copyright (C) 2014 Matthias Brugger
5 *
6 * Matthias Brugger <matthias.bgg@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * Based on sunxi_wdt.c
19 */
20
21#include <linux/err.h>
22#include <linux/init.h>
23#include <linux/io.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/of.h>
28#include <linux/platform_device.h>
29#include <linux/types.h>
30#include <linux/watchdog.h>
31#include <linux/notifier.h>
32#include <linux/reboot.h>
33#include <linux/delay.h>
34
35#define WDT_MAX_TIMEOUT 31
36#define WDT_MIN_TIMEOUT 1
37#define WDT_LENGTH_TIMEOUT(n) ((n) << 5)
38
39#define WDT_LENGTH 0x04
40#define WDT_LENGTH_KEY 0x8
41
42#define WDT_RST 0x08
43#define WDT_RST_RELOAD 0x1971
44
45#define WDT_MODE 0x00
46#define WDT_MODE_EN (1 << 0)
47#define WDT_MODE_EXT_POL_LOW (0 << 1)
48#define WDT_MODE_EXT_POL_HIGH (1 << 1)
49#define WDT_MODE_EXRST_EN (1 << 2)
50#define WDT_MODE_IRQ_EN (1 << 3)
51#define WDT_MODE_AUTO_START (1 << 4)
52#define WDT_MODE_DUAL_EN (1 << 6)
53#define WDT_MODE_KEY 0x22000000
54
55#define WDT_SWRST 0x14
56#define WDT_SWRST_KEY 0x1209
57
58#define DRV_NAME "mtk-wdt"
59#define DRV_VERSION "1.0"
60
61static bool nowayout = WATCHDOG_NOWAYOUT;
62static unsigned int timeout = WDT_MAX_TIMEOUT;
63
64struct mtk_wdt_dev {
65 struct watchdog_device wdt_dev;
66 void __iomem *wdt_base;
67 struct notifier_block restart_handler;
68};
69
70static int mtk_reset_handler(struct notifier_block *this, unsigned long mode,
71 void *cmd)
72{
73 struct mtk_wdt_dev *mtk_wdt;
74 void __iomem *wdt_base;
75
76 mtk_wdt = container_of(this, struct mtk_wdt_dev, restart_handler);
77 wdt_base = mtk_wdt->wdt_base;
78
79 while (1) {
80 writel(WDT_SWRST_KEY, wdt_base + WDT_SWRST);
81 mdelay(5);
82 }
83
84 return NOTIFY_DONE;
85}
86
87static int mtk_wdt_ping(struct watchdog_device *wdt_dev)
88{
89 struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
90 void __iomem *wdt_base = mtk_wdt->wdt_base;
91
92 iowrite32(WDT_RST_RELOAD, wdt_base + WDT_RST);
93
94 return 0;
95}
96
97static int mtk_wdt_set_timeout(struct watchdog_device *wdt_dev,
98 unsigned int timeout)
99{
100 struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
101 void __iomem *wdt_base = mtk_wdt->wdt_base;
102 u32 reg;
103
104 wdt_dev->timeout = timeout;
105
106 /*
107 * One bit is the value of 512 ticks
108 * The clock has 32 KHz
109 */
110 reg = WDT_LENGTH_TIMEOUT(timeout << 6) | WDT_LENGTH_KEY;
111 iowrite32(reg, wdt_base + WDT_LENGTH);
112
113 mtk_wdt_ping(wdt_dev);
114
115 return 0;
116}
117
118static int mtk_wdt_stop(struct watchdog_device *wdt_dev)
119{
120 struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
121 void __iomem *wdt_base = mtk_wdt->wdt_base;
122 u32 reg;
123
124 reg = readl(wdt_base + WDT_MODE);
125 reg &= ~WDT_MODE_EN;
126 iowrite32(reg, wdt_base + WDT_MODE);
127
128 return 0;
129}
130
131static int mtk_wdt_start(struct watchdog_device *wdt_dev)
132{
133 u32 reg;
134 struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
135 void __iomem *wdt_base = mtk_wdt->wdt_base;
136 u32 ret;
137
138 ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
139 if (ret < 0)
140 return ret;
141
142 reg = ioread32(wdt_base + WDT_MODE);
143 reg &= ~(WDT_MODE_IRQ_EN | WDT_MODE_DUAL_EN);
144 reg |= (WDT_MODE_EN | WDT_MODE_KEY);
145 iowrite32(reg, wdt_base + WDT_MODE);
146
147 return 0;
148}
149
150static const struct watchdog_info mtk_wdt_info = {
151 .identity = DRV_NAME,
152 .options = WDIOF_SETTIMEOUT |
153 WDIOF_KEEPALIVEPING |
154 WDIOF_MAGICCLOSE,
155};
156
157static const struct watchdog_ops mtk_wdt_ops = {
158 .owner = THIS_MODULE,
159 .start = mtk_wdt_start,
160 .stop = mtk_wdt_stop,
161 .ping = mtk_wdt_ping,
162 .set_timeout = mtk_wdt_set_timeout,
163};
164
165static int mtk_wdt_probe(struct platform_device *pdev)
166{
167 struct mtk_wdt_dev *mtk_wdt;
168 struct resource *res;
169 int err;
170
171 mtk_wdt = devm_kzalloc(&pdev->dev, sizeof(*mtk_wdt), GFP_KERNEL);
172 if (!mtk_wdt)
173 return -ENOMEM;
174
175 platform_set_drvdata(pdev, mtk_wdt);
176
177 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
178 mtk_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res);
179 if (IS_ERR(mtk_wdt->wdt_base))
180 return PTR_ERR(mtk_wdt->wdt_base);
181
182 mtk_wdt->wdt_dev.info = &mtk_wdt_info;
183 mtk_wdt->wdt_dev.ops = &mtk_wdt_ops;
184 mtk_wdt->wdt_dev.timeout = WDT_MAX_TIMEOUT;
185 mtk_wdt->wdt_dev.max_timeout = WDT_MAX_TIMEOUT;
186 mtk_wdt->wdt_dev.min_timeout = WDT_MIN_TIMEOUT;
187 mtk_wdt->wdt_dev.parent = &pdev->dev;
188
189 watchdog_init_timeout(&mtk_wdt->wdt_dev, timeout, &pdev->dev);
190 watchdog_set_nowayout(&mtk_wdt->wdt_dev, nowayout);
191
192 watchdog_set_drvdata(&mtk_wdt->wdt_dev, mtk_wdt);
193
194 mtk_wdt_stop(&mtk_wdt->wdt_dev);
195
196 err = watchdog_register_device(&mtk_wdt->wdt_dev);
197 if (unlikely(err))
198 return err;
199
200 mtk_wdt->restart_handler.notifier_call = mtk_reset_handler;
201 mtk_wdt->restart_handler.priority = 128;
202 err = register_restart_handler(&mtk_wdt->restart_handler);
203 if (err)
204 dev_warn(&pdev->dev,
205 "cannot register restart handler (err=%d)\n", err);
206
207 dev_info(&pdev->dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)\n",
208 mtk_wdt->wdt_dev.timeout, nowayout);
209
210 return 0;
211}
212
213static int mtk_wdt_remove(struct platform_device *pdev)
214{
215 struct mtk_wdt_dev *mtk_wdt = platform_get_drvdata(pdev);
216
217 unregister_restart_handler(&mtk_wdt->restart_handler);
218
219 watchdog_unregister_device(&mtk_wdt->wdt_dev);
220
221 return 0;
222}
223
224static const struct of_device_id mtk_wdt_dt_ids[] = {
225 { .compatible = "mediatek,mt6589-wdt" },
226 { /* sentinel */ }
227};
228MODULE_DEVICE_TABLE(of, mtk_wdt_dt_ids);
229
230static struct platform_driver mtk_wdt_driver = {
231 .probe = mtk_wdt_probe,
232 .remove = mtk_wdt_remove,
233 .driver = {
234 .name = DRV_NAME,
235 .of_match_table = mtk_wdt_dt_ids,
236 },
237};
238
239module_platform_driver(mtk_wdt_driver);
240
241module_param(timeout, uint, 0);
242MODULE_PARM_DESC(timeout, "Watchdog heartbeat in seconds");
243
244module_param(nowayout, bool, 0);
245MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
246 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
247
248MODULE_LICENSE("GPL");
249MODULE_AUTHOR("Matthias Brugger <matthias.bgg@gmail.com>");
250MODULE_DESCRIPTION("Mediatek WatchDog Timer Driver");
251MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 9f2709db61ca..1e6be9e40577 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -189,7 +189,7 @@ static int omap_wdt_set_timeout(struct watchdog_device *wdog,
189} 189}
190 190
191static const struct watchdog_info omap_wdt_info = { 191static const struct watchdog_info omap_wdt_info = {
192 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, 192 .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
193 .identity = "OMAP Watchdog", 193 .identity = "OMAP Watchdog",
194}; 194};
195 195
diff --git a/drivers/watchdog/retu_wdt.c b/drivers/watchdog/retu_wdt.c
index a7a0695971e4..b7c68e275aeb 100644
--- a/drivers/watchdog/retu_wdt.c
+++ b/drivers/watchdog/retu_wdt.c
@@ -94,7 +94,7 @@ static int retu_wdt_set_timeout(struct watchdog_device *wdog,
94} 94}
95 95
96static const struct watchdog_info retu_wdt_info = { 96static const struct watchdog_info retu_wdt_info = {
97 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, 97 .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
98 .identity = "Retu watchdog", 98 .identity = "Retu watchdog",
99}; 99};
100 100
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index 11aad5b7aafe..a6f7e2e29beb 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -45,6 +45,7 @@
45static struct clk *rt288x_wdt_clk; 45static struct clk *rt288x_wdt_clk;
46static unsigned long rt288x_wdt_freq; 46static unsigned long rt288x_wdt_freq;
47static void __iomem *rt288x_wdt_base; 47static void __iomem *rt288x_wdt_base;
48static struct reset_control *rt288x_wdt_reset;
48 49
49static bool nowayout = WATCHDOG_NOWAYOUT; 50static bool nowayout = WATCHDOG_NOWAYOUT;
50module_param(nowayout, bool, 0); 51module_param(nowayout, bool, 0);
@@ -151,16 +152,18 @@ static int rt288x_wdt_probe(struct platform_device *pdev)
151 if (IS_ERR(rt288x_wdt_clk)) 152 if (IS_ERR(rt288x_wdt_clk))
152 return PTR_ERR(rt288x_wdt_clk); 153 return PTR_ERR(rt288x_wdt_clk);
153 154
154 device_reset(&pdev->dev); 155 rt288x_wdt_reset = devm_reset_control_get(&pdev->dev, NULL);
156 if (!IS_ERR(rt288x_wdt_reset))
157 reset_control_deassert(rt288x_wdt_reset);
155 158
156 rt288x_wdt_freq = clk_get_rate(rt288x_wdt_clk) / RALINK_WDT_PRESCALE; 159 rt288x_wdt_freq = clk_get_rate(rt288x_wdt_clk) / RALINK_WDT_PRESCALE;
157 160
158 rt288x_wdt_dev.dev = &pdev->dev; 161 rt288x_wdt_dev.dev = &pdev->dev;
159 rt288x_wdt_dev.bootstatus = rt288x_wdt_bootcause(); 162 rt288x_wdt_dev.bootstatus = rt288x_wdt_bootcause();
160
161 rt288x_wdt_dev.max_timeout = (0xfffful / rt288x_wdt_freq); 163 rt288x_wdt_dev.max_timeout = (0xfffful / rt288x_wdt_freq);
162 rt288x_wdt_dev.timeout = rt288x_wdt_dev.max_timeout;
163 164
165 watchdog_init_timeout(&rt288x_wdt_dev, rt288x_wdt_dev.max_timeout,
166 &pdev->dev);
164 watchdog_set_nowayout(&rt288x_wdt_dev, nowayout); 167 watchdog_set_nowayout(&rt288x_wdt_dev, nowayout);
165 168
166 ret = watchdog_register_device(&rt288x_wdt_dev); 169 ret = watchdog_register_device(&rt288x_wdt_dev);
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index 12c15903d098..2c1db6fa9a27 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -57,7 +57,7 @@ static int twl4030_wdt_set_timeout(struct watchdog_device *wdt,
57} 57}
58 58
59static const struct watchdog_info twl4030_wdt_info = { 59static const struct watchdog_info twl4030_wdt_info = {
60 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, 60 .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
61 .identity = "TWL4030 Watchdog", 61 .identity = "TWL4030 Watchdog",
62}; 62};
63 63
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index 7165704a3e33..5824e25eebbb 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -50,7 +50,7 @@ static int cr_wdt_control; /* WDT control register */
50 50
51enum chips { w83627hf, w83627s, w83697hf, w83697ug, w83637hf, w83627thf, 51enum chips { w83627hf, w83627s, w83697hf, w83697ug, w83637hf, w83627thf,
52 w83687thf, w83627ehf, w83627dhg, w83627uhg, w83667hg, w83627dhg_p, 52 w83687thf, w83627ehf, w83627dhg, w83627uhg, w83667hg, w83627dhg_p,
53 w83667hg_b, nct6775, nct6776, nct6779 }; 53 w83667hg_b, nct6775, nct6776, nct6779, nct6791, nct6792 };
54 54
55static int timeout; /* in seconds */ 55static int timeout; /* in seconds */
56module_param(timeout, int, 0); 56module_param(timeout, int, 0);
@@ -95,6 +95,8 @@ MODULE_PARM_DESC(early_disable, "Disable watchdog at boot time (default=0)");
95#define NCT6775_ID 0xb4 95#define NCT6775_ID 0xb4
96#define NCT6776_ID 0xc3 96#define NCT6776_ID 0xc3
97#define NCT6779_ID 0xc5 97#define NCT6779_ID 0xc5
98#define NCT6791_ID 0xc8
99#define NCT6792_ID 0xc9
98 100
99#define W83627HF_WDT_TIMEOUT 0xf6 101#define W83627HF_WDT_TIMEOUT 0xf6
100#define W83697HF_WDT_TIMEOUT 0xf4 102#define W83697HF_WDT_TIMEOUT 0xf4
@@ -195,6 +197,8 @@ static int w83627hf_init(struct watchdog_device *wdog, enum chips chip)
195 case nct6775: 197 case nct6775:
196 case nct6776: 198 case nct6776:
197 case nct6779: 199 case nct6779:
200 case nct6791:
201 case nct6792:
198 /* 202 /*
199 * These chips have a fixed WDTO# output pin (W83627UHG), 203 * These chips have a fixed WDTO# output pin (W83627UHG),
200 * or support more than one WDTO# output pin. 204 * or support more than one WDTO# output pin.
@@ -395,6 +399,12 @@ static int wdt_find(int addr)
395 case NCT6779_ID: 399 case NCT6779_ID:
396 ret = nct6779; 400 ret = nct6779;
397 break; 401 break;
402 case NCT6791_ID:
403 ret = nct6791;
404 break;
405 case NCT6792_ID:
406 ret = nct6792;
407 break;
398 case 0xff: 408 case 0xff:
399 ret = -ENODEV; 409 ret = -ENODEV;
400 break; 410 break;
@@ -428,6 +438,8 @@ static int __init wdt_init(void)
428 "NCT6775", 438 "NCT6775",
429 "NCT6776", 439 "NCT6776",
430 "NCT6779", 440 "NCT6779",
441 "NCT6791",
442 "NCT6792",
431 }; 443 };
432 444
433 wdt_io = 0x2e; 445 wdt_io = 0x2e;
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 2140398a2a8c..2ccd3592d41f 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -2,7 +2,7 @@ ifeq ($(filter y, $(CONFIG_ARM) $(CONFIG_ARM64)),)
2obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o 2obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
3endif 3endif
4obj-$(CONFIG_X86) += fallback.o 4obj-$(CONFIG_X86) += fallback.o
5obj-y += grant-table.o features.o balloon.o manage.o 5obj-y += grant-table.o features.o balloon.o manage.o preempt.o
6obj-y += events/ 6obj-y += events/
7obj-y += xenbus/ 7obj-y += xenbus/
8 8
diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
new file mode 100644
index 000000000000..a1800c150839
--- /dev/null
+++ b/drivers/xen/preempt.c
@@ -0,0 +1,44 @@
1/*
2 * Preemptible hypercalls
3 *
4 * Copyright (C) 2014 Citrix Systems R&D ltd.
5 *
6 * This source code is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of the
9 * License, or (at your option) any later version.
10 */
11
12#include <linux/sched.h>
13#include <xen/xen-ops.h>
14
15#ifndef CONFIG_PREEMPT
16
17/*
18 * Some hypercalls issued by the toolstack can take many 10s of
19 * seconds. Allow tasks running hypercalls via the privcmd driver to
20 * be voluntarily preempted even if full kernel preemption is
21 * disabled.
22 *
23 * Such preemptible hypercalls are bracketed by
24 * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
25 * calls.
26 */
27
28DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
29EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
30
31asmlinkage __visible void xen_maybe_preempt_hcall(void)
32{
33 if (unlikely(__this_cpu_read(xen_in_preemptible_hcall)
34 && should_resched())) {
35 /*
36 * Clear flag as we may be rescheduled on a different
37 * cpu.
38 */
39 __this_cpu_write(xen_in_preemptible_hcall, false);
40 _cond_resched();
41 __this_cpu_write(xen_in_preemptible_hcall, true);
42 }
43}
44#endif /* CONFIG_PREEMPT */
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 569a13b9e856..59ac71c4a043 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -56,10 +56,12 @@ static long privcmd_ioctl_hypercall(void __user *udata)
56 if (copy_from_user(&hypercall, udata, sizeof(hypercall))) 56 if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
57 return -EFAULT; 57 return -EFAULT;
58 58
59 xen_preemptible_hcall_begin();
59 ret = privcmd_call(hypercall.op, 60 ret = privcmd_call(hypercall.op,
60 hypercall.arg[0], hypercall.arg[1], 61 hypercall.arg[0], hypercall.arg[1],
61 hypercall.arg[2], hypercall.arg[3], 62 hypercall.arg[2], hypercall.arg[3],
62 hypercall.arg[4]); 63 hypercall.arg[4]);
64 xen_preemptible_hcall_end();
63 65
64 return ret; 66 return ret;
65} 67}
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 61653a03a8f5..9faca6a60bb0 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -709,12 +709,11 @@ static int prepare_pending_reqs(struct vscsibk_info *info,
709static int scsiback_do_cmd_fn(struct vscsibk_info *info) 709static int scsiback_do_cmd_fn(struct vscsibk_info *info)
710{ 710{
711 struct vscsiif_back_ring *ring = &info->ring; 711 struct vscsiif_back_ring *ring = &info->ring;
712 struct vscsiif_request *ring_req; 712 struct vscsiif_request ring_req;
713 struct vscsibk_pend *pending_req; 713 struct vscsibk_pend *pending_req;
714 RING_IDX rc, rp; 714 RING_IDX rc, rp;
715 int err, more_to_do; 715 int err, more_to_do;
716 uint32_t result; 716 uint32_t result;
717 uint8_t act;
718 717
719 rc = ring->req_cons; 718 rc = ring->req_cons;
720 rp = ring->sring->req_prod; 719 rp = ring->sring->req_prod;
@@ -735,11 +734,10 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
735 if (!pending_req) 734 if (!pending_req)
736 return 1; 735 return 1;
737 736
738 ring_req = RING_GET_REQUEST(ring, rc); 737 ring_req = *RING_GET_REQUEST(ring, rc);
739 ring->req_cons = ++rc; 738 ring->req_cons = ++rc;
740 739
741 act = ring_req->act; 740 err = prepare_pending_reqs(info, &ring_req, pending_req);
742 err = prepare_pending_reqs(info, ring_req, pending_req);
743 if (err) { 741 if (err) {
744 switch (err) { 742 switch (err) {
745 case -ENODEV: 743 case -ENODEV:
@@ -755,9 +753,9 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
755 return 1; 753 return 1;
756 } 754 }
757 755
758 switch (act) { 756 switch (ring_req.act) {
759 case VSCSIIF_ACT_SCSI_CDB: 757 case VSCSIIF_ACT_SCSI_CDB:
760 if (scsiback_gnttab_data_map(ring_req, pending_req)) { 758 if (scsiback_gnttab_data_map(&ring_req, pending_req)) {
761 scsiback_fast_flush_area(pending_req); 759 scsiback_fast_flush_area(pending_req);
762 scsiback_do_resp_with_sense(NULL, 760 scsiback_do_resp_with_sense(NULL,
763 DRIVER_ERROR << 24, 0, pending_req); 761 DRIVER_ERROR << 24, 0, pending_req);
@@ -768,7 +766,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
768 break; 766 break;
769 case VSCSIIF_ACT_SCSI_ABORT: 767 case VSCSIIF_ACT_SCSI_ABORT:
770 scsiback_device_action(pending_req, TMR_ABORT_TASK, 768 scsiback_device_action(pending_req, TMR_ABORT_TASK,
771 ring_req->ref_rqid); 769 ring_req.ref_rqid);
772 break; 770 break;
773 case VSCSIIF_ACT_SCSI_RESET: 771 case VSCSIIF_ACT_SCSI_RESET:
774 scsiback_device_action(pending_req, TMR_LUN_RESET, 0); 772 scsiback_device_action(pending_req, TMR_LUN_RESET, 0);