aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2012-11-21 05:38:13 -0500
committerDavid Woodhouse <David.Woodhouse@intel.com>2012-11-21 05:38:13 -0500
commit851462444d421c223965b12b836bef63da61b57f (patch)
tree495baa14e638817941496c36e1443aed7dae0ea0 /drivers
parent5a6ea4af0907f995dc06df21a9c9ef764c7cd3bc (diff)
parent6924d99fcdf1a688538a3cdebd1f135c22eec191 (diff)
Merge branch 'for-3.7' of git://git.infradead.org/users/dedekind/l2-mtd
Conflicts: drivers/mtd/nand/nand_base.c
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile4
-rw-r--r--drivers/acpi/Makefile5
-rw-r--r--drivers/acpi/ec.c30
-rw-r--r--drivers/acpi/glue.c1
-rw-r--r--drivers/acpi/processor_driver.c1
-rw-r--r--drivers/acpi/processor_idle.c3
-rw-r--r--drivers/acpi/thermal.c93
-rw-r--r--drivers/acpi/video.c11
-rw-r--r--drivers/base/Kconfig2
-rw-r--r--drivers/base/dma-coherent.c5
-rw-r--r--drivers/base/dma-contiguous.c5
-rw-r--r--drivers/base/firmware_class.c266
-rw-r--r--drivers/base/power/domain.c5
-rw-r--r--drivers/base/regmap/Kconfig2
-rw-r--r--drivers/bcma/main.c5
-rw-r--r--drivers/block/Kconfig15
-rw-r--r--drivers/block/cciss.c1
-rw-r--r--drivers/block/drbd/drbd_main.c13
-rw-r--r--drivers/block/floppy.c90
-rw-r--r--drivers/block/loop.c17
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c19
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h3
-rw-r--r--drivers/block/osdblk.c3
-rw-r--r--drivers/block/pktcdvd.c52
-rw-r--r--drivers/block/xen-blkback/common.h4
-rw-r--r--drivers/block/xen-blkback/xenbus.c9
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/intel-gtt.c2
-rw-r--r--drivers/char/ds1620.c8
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c36
-rw-r--r--drivers/char/nwflash.c4
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/char/sonypi.c2
-rw-r--r--drivers/char/tpm/tpm.c24
-rw-r--r--drivers/char/tpm/tpm.h9
-rw-r--r--drivers/char/tpm/tpm_ppi.c18
-rw-r--r--drivers/cpufreq/cpufreq_stats.c1
-rw-r--r--drivers/cpufreq/omap-cpufreq.c36
-rw-r--r--drivers/cpufreq/powernow-k8.c11
-rw-r--r--drivers/cpuidle/cpuidle.c2
-rw-r--r--drivers/dma/Kconfig28
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/dw_dmac.c258
-rw-r--r--drivers/dma/dw_dmac_regs.h64
-rw-r--r--drivers/dma/edma.c671
-rw-r--r--drivers/dma/imx-dma.c4
-rw-r--r--drivers/dma/ioat/dma_v2.c3
-rw-r--r--drivers/dma/ioat/pci.c22
-rw-r--r--drivers/dma/mmp_pdma.c875
-rw-r--r--drivers/dma/mmp_tdma.c51
-rw-r--r--drivers/dma/mxs-dma.c14
-rw-r--r--drivers/dma/pl330.c78
-rw-r--r--drivers/dma/sirf-dma.c27
-rw-r--r--drivers/dma/ste_dma40.c14
-rw-r--r--drivers/dma/tegra20-apb-dma.c12
-rw-r--r--drivers/edac/amd64_edac.c11
-rw-r--r--drivers/extcon/extcon-adc-jack.c10
-rw-r--r--drivers/extcon/extcon-class.c142
-rw-r--r--drivers/extcon/extcon-gpio.c1
-rw-r--r--drivers/extcon/extcon-max77693.c46
-rw-r--r--drivers/extcon/extcon-max8997.c6
-rw-r--r--drivers/firewire/core-cdev.c4
-rw-r--r--drivers/firmware/memmap.c4
-rw-r--r--drivers/gpio/gpio-74x164.c2
-rw-r--r--drivers/gpio/gpio-mvebu.c7
-rw-r--r--drivers/gpio/gpio-omap.c35
-rw-r--r--drivers/gpio/gpio-timberdale.c4
-rw-r--r--drivers/gpio/gpiolib.c10
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c4
-rw-r--r--drivers/gpu/drm/drm_info.c4
-rw-r--r--drivers/gpu/drm/drm_platform.c1
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c2
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c6
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c13
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c26
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c17
-rw-r--r--drivers/gpu/drm/i915/intel_display.c79
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c18
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c86
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c76
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/core/gpuobj.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/core/mm.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/core/parent.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/mm.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/parent.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/timer.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c43
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/pll.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fan.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c50
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c9
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c5
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c3
-rw-r--r--drivers/gpu/drm/radeon/ni.c57
-rw-r--r--drivers/gpu/drm/radeon/nid.h1
-rw-r--r--drivers/gpu/drm/radeon/r600.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon.h14
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c64
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c386
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c228
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c54
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c24
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c12
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c5
-rw-r--r--drivers/hid/hid-apple.c6
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-microsoft.c18
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hv/channel.c24
-rw-r--r--drivers/hwmon/Kconfig10
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/acpi_power_meter.c1
-rw-r--r--drivers/hwmon/adm9240.c1
-rw-r--r--drivers/hwmon/adt7411.c1
-rw-r--r--drivers/hwmon/adt7462.c1
-rw-r--r--drivers/hwmon/adt7475.c1
-rw-r--r--drivers/hwmon/applesmc.c1
-rw-r--r--drivers/hwmon/asus_atk0110.c2
-rw-r--r--drivers/hwmon/coretemp.c7
-rw-r--r--drivers/hwmon/da9052-hwmon.c1
-rw-r--r--drivers/hwmon/emc1403.c1
-rw-r--r--drivers/hwmon/emc6w201.c1
-rw-r--r--drivers/hwmon/exynos4_tmu.c518
-rw-r--r--drivers/hwmon/fam15h_power.c4
-rw-r--r--drivers/hwmon/gpio-fan.c2
-rw-r--r--drivers/hwmon/hih6130.c1
-rw-r--r--drivers/hwmon/i5k_amb.c2
-rw-r--r--drivers/hwmon/ibmaem.c1
-rw-r--r--drivers/hwmon/ibmpex.c1
-rw-r--r--drivers/hwmon/ina2xx.c1
-rw-r--r--drivers/hwmon/k8temp.c1
-rw-r--r--drivers/hwmon/lineage-pem.c1
-rw-r--r--drivers/hwmon/lm92.c1
-rw-r--r--drivers/hwmon/lm93.c1
-rw-r--r--drivers/hwmon/ltc4151.c1
-rw-r--r--drivers/hwmon/ltc4215.c1
-rw-r--r--drivers/hwmon/ltc4245.c1
-rw-r--r--drivers/hwmon/ltc4261.c1
-rw-r--r--drivers/hwmon/max16065.c1
-rw-r--r--drivers/hwmon/max1619.c4
-rw-r--r--drivers/hwmon/max6642.c2
-rw-r--r--drivers/hwmon/pmbus/Kconfig2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c2
-rw-r--r--drivers/hwmon/s3c-hwmon.c1
-rw-r--r--drivers/hwmon/sht21.c1
-rw-r--r--drivers/hwmon/smm665.c1
-rw-r--r--drivers/hwmon/thmc50.c1
-rw-r--r--drivers/hwmon/tmp102.c1
-rw-r--r--drivers/hwmon/ultra45_env.c1
-rw-r--r--drivers/hwmon/w83791d.c1
-rw-r--r--drivers/hwmon/w83792d.c1
-rw-r--r--drivers/hwmon/w83793.c1
-rw-r--r--drivers/hwmon/w83795.c2
-rw-r--r--drivers/hwmon/w83l786ng.c1
-rw-r--r--drivers/i2c/Makefile1
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c27
-rw-r--r--drivers/i2c/busses/Kconfig28
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-at91.c667
-rw-r--r--drivers/i2c/busses/i2c-davinci.c58
-rw-r--r--drivers/i2c/busses/i2c-i801.c11
-rw-r--r--drivers/i2c/busses/i2c-imx.c6
-rw-r--r--drivers/i2c/busses/i2c-mpc.c18
-rw-r--r--drivers/i2c/busses/i2c-mxs.c313
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c61
-rw-r--r--drivers/i2c/busses/i2c-omap.c474
-rw-r--r--drivers/i2c/busses/i2c-rcar.c709
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c18
-rw-r--r--drivers/i2c/busses/i2c-tegra.c2
-rw-r--r--drivers/i2c/i2c-stub.c (renamed from drivers/i2c/busses/i2c-stub.c)66
-rw-r--r--drivers/iio/Kconfig1
-rw-r--r--drivers/iio/Makefile1
-rw-r--r--drivers/infiniband/core/cma.c3
-rw-r--r--drivers/infiniband/core/netlink.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c85
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c18
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c12
-rw-r--r--drivers/input/evdev.c100
-rw-r--r--drivers/input/input.c114
-rw-r--r--drivers/input/joydev.c89
-rw-r--r--drivers/input/keyboard/Kconfig1
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c3
-rw-r--r--drivers/input/keyboard/samsung-keypad.c11
-rw-r--r--drivers/input/misc/xen-kbdfront.c5
-rw-r--r--drivers/input/mouse/bcm5974.c21
-rw-r--r--drivers/input/mousedev.c225
-rw-r--r--drivers/input/tablet/wacom_sys.c196
-rw-r--r--drivers/input/tablet/wacom_wac.c121
-rw-r--r--drivers/input/tablet/wacom_wac.h5
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c6
-rw-r--r--drivers/input/touchscreen/egalax_ts.c23
-rw-r--r--drivers/input/touchscreen/tsc40.c1
-rw-r--r--drivers/iommu/amd_iommu_init.c39
-rw-r--r--drivers/iommu/tegra-smmu.c2
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c19
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c2
-rw-r--r--drivers/leds/Kconfig26
-rw-r--r--drivers/leds/Makefile3
-rw-r--r--drivers/leds/led-class.c15
-rw-r--r--drivers/leds/led-core.c16
-rw-r--r--drivers/leds/led-triggers.c17
-rw-r--r--drivers/leds/leds-clevo-mail.c10
-rw-r--r--drivers/leds/leds-gpio.c19
-rw-r--r--drivers/leds/leds-lm3530.c16
-rw-r--r--drivers/leds/leds-lm3556.c512
-rw-r--r--drivers/leds/leds-lm355x.c572
-rw-r--r--drivers/leds/leds-lm3642.c462
-rw-r--r--drivers/leds/leds-lp5523.c75
-rw-r--r--drivers/leds/leds-pca9633.c19
-rw-r--r--drivers/leds/leds-wm8350.c29
-rw-r--r--drivers/leds/leds.h2
-rw-r--r--drivers/md/Kconfig8
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/bitmap.c17
-rw-r--r--drivers/md/dm-bio-prison.c415
-rw-r--r--drivers/md/dm-bio-prison.h72
-rw-r--r--drivers/md/dm-bufio.c13
-rw-r--r--drivers/md/dm-crypt.c16
-rw-r--r--drivers/md/dm-io.c11
-rw-r--r--drivers/md/dm-mpath.c3
-rw-r--r--drivers/md/dm-raid.c124
-rw-r--r--drivers/md/dm-thin.c521
-rw-r--r--drivers/md/dm-verity.c2
-rw-r--r--drivers/md/dm.c178
-rw-r--r--drivers/md/faulty.c5
-rw-r--r--drivers/md/linear.c25
-rw-r--r--drivers/md/md.c189
-rw-r--r--drivers/md/md.h9
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c4
-rw-r--r--drivers/md/raid0.c20
-rw-r--r--drivers/md/raid1.c39
-rw-r--r--drivers/md/raid10.c98
-rw-r--r--drivers/md/raid5.c219
-rw-r--r--drivers/md/raid5.h1
-rw-r--r--drivers/media/Kconfig18
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c20
-rw-r--r--drivers/media/dvb-core/dvb_frontend.h4
-rw-r--r--drivers/media/dvb-frontends/a8293.c2
-rw-r--r--drivers/media/dvb-frontends/af9013.c6
-rw-r--r--drivers/media/dvb-frontends/af9033.c16
-rw-r--r--drivers/media/dvb-frontends/bcm3510.c2
-rw-r--r--drivers/media/dvb-frontends/cx24110.c6
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c3
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c2
-rw-r--r--drivers/media/dvb-frontends/ds3000.c12
-rw-r--r--drivers/media/dvb-frontends/dvb_dummy_fe.c21
-rw-r--r--drivers/media/dvb-frontends/isl6405.c2
-rw-r--r--drivers/media/dvb-frontends/isl6421.c2
-rw-r--r--drivers/media/dvb-frontends/itd1000.c2
-rw-r--r--drivers/media/dvb-frontends/lg2160.c8
-rw-r--r--drivers/media/dvb-frontends/lnbp21.c4
-rw-r--r--drivers/media/dvb-frontends/lnbp22.c2
-rw-r--r--drivers/media/dvb-frontends/s5h1432.c8
-rw-r--r--drivers/media/dvb-frontends/s921.c9
-rw-r--r--drivers/media/dvb-frontends/si21xx.c4
-rw-r--r--drivers/media/dvb-frontends/sp8870.c6
-rw-r--r--drivers/media/dvb-frontends/sp887x.c6
-rw-r--r--drivers/media/dvb-frontends/stb6100.c8
-rw-r--r--drivers/media/dvb-frontends/stv0299.c6
-rw-r--r--drivers/media/dvb-frontends/stv0900_core.c4
-rw-r--r--drivers/media/dvb-frontends/tda665x.c8
-rw-r--r--drivers/media/dvb-frontends/tda8083.c4
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c2
-rw-r--r--drivers/media/i2c/m5mols/m5mols.h10
-rw-r--r--drivers/media/i2c/m5mols/m5mols_capture.c3
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c47
-rw-r--r--drivers/media/i2c/m5mols/m5mols_reg.h1
-rw-r--r--drivers/media/i2c/mt9p031.c19
-rw-r--r--drivers/media/i2c/mt9t001.c22
-rw-r--r--drivers/media/i2c/mt9v032.c54
-rw-r--r--drivers/media/i2c/soc_camera/ov2640.c5
-rw-r--r--drivers/media/i2c/ths7303.c106
-rw-r--r--drivers/media/i2c/tvp514x.c77
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c30
-rw-r--r--drivers/media/pci/bt8xx/bttvp.h1
-rw-r--r--drivers/media/pci/bt8xx/dst_ca.c2
-rw-r--r--drivers/media/pci/cx23885/altera-ci.c4
-rw-r--r--drivers/media/pci/cx23885/cimax2.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c3
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c3
-rw-r--r--drivers/media/pci/cx25821/cx25821-video-upstream-ch2.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-video-upstream.c2
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c6
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c2
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c2
-rw-r--r--drivers/media/pci/cx88/cx88-tvaudio.c4
-rw-r--r--drivers/media/pci/cx88/cx88-video.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c2
-rw-r--r--drivers/media/platform/Kconfig4
-rw-r--r--drivers/media/platform/davinci/vpbe.c136
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c80
-rw-r--r--drivers/media/platform/davinci/vpbe_venc.c25
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c17
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c370
-rw-r--r--drivers/media/platform/davinci/vpif_capture.h16
-rw-r--r--drivers/media/platform/davinci/vpif_display.c275
-rw-r--r--drivers/media/platform/davinci/vpif_display.h18
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-regs.c4
-rw-r--r--drivers/media/platform/fsl-viu.c2
-rw-r--r--drivers/media/platform/mem2mem_testdev.c14
-rw-r--r--drivers/media/platform/omap/omap_vout.c75
-rw-r--r--drivers/media/platform/omap3isp/ispreg.h6
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-capture.c135
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-core.c19
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-core.h28
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-m2m.c25
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-reg.c23
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-reg.h3
-rw-r--r--drivers/media/platform/s5p-fimc/mipi-csis.c75
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c6
-rw-r--r--drivers/media/platform/s5p-mfc/Makefile7
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc-v6.h408
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc.h41
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c294
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c111
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd.h17
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c166
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.h20
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c156
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.h20
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h191
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c202
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c258
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.h1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c236
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.h1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_intr.c11
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr.c1418
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr.h137
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c1794
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.h (renamed from drivers/media/platform/s5p-mfc/s5p_mfc_shm.h)41
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c1956
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.h50
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_pm.c3
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_shm.c47
-rw-r--r--drivers/media/platform/soc_camera/mx2_camera.c7
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c40
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c2
-rw-r--r--drivers/media/radio/si4713-i2c.c12
-rw-r--r--drivers/media/rc/ene_ir.c5
-rw-r--r--drivers/media/rc/iguanair.c147
-rw-r--r--drivers/media/rc/ir-lirc-codec.c4
-rw-r--r--drivers/media/rc/keymaps/rc-msi-digivox-ii.c36
-rw-r--r--drivers/media/rc/nuvoton-cir.c3
-rw-r--r--drivers/media/rc/redrat3.c3
-rw-r--r--drivers/media/rc/winbond-cir.c2
-rw-r--r--drivers/media/tuners/mt2063.c44
-rw-r--r--drivers/media/tuners/mt2063.h4
-rw-r--r--drivers/media/tuners/tda18271-common.c104
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c2
-rw-r--r--drivers/media/usb/dvb-usb/a800.c2
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-core.c3
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-common.c2
-rw-r--r--drivers/media/usb/dvb-usb/digitv.c2
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.c2
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c2
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c1
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c13
-rw-r--r--drivers/media/usb/stk1160/stk1160-core.c15
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c7
-rw-r--r--drivers/media/usb/stk1160/stk1160.h3
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c10
-rw-r--r--drivers/media/usb/uvc/uvc_video.c4
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c74
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c8
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c79
-rw-r--r--drivers/misc/Kconfig10
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/mmc/core/core.c240
-rw-r--r--drivers/mmc/core/debugfs.c2
-rw-r--r--drivers/mmc/core/mmc.c57
-rw-r--r--drivers/mmc/core/mmc_ops.c84
-rw-r--r--drivers/mmc/core/sdio_bus.c7
-rw-r--r--drivers/mmc/core/slot-gpio.c8
-rw-r--r--drivers/mmc/host/Kconfig9
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/atmel-mci-regs.h7
-rw-r--r--drivers/mmc/host/atmel-mci.c113
-rw-r--r--drivers/mmc/host/bfin_sdh.c210
-rw-r--r--drivers/mmc/host/davinci_mmc.c271
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c253
-rw-r--r--drivers/mmc/host/dw_mmc-pci.c15
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c62
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.h20
-rw-r--r--drivers/mmc/host/dw_mmc.c326
-rw-r--r--drivers/mmc/host/dw_mmc.h24
-rw-r--r--drivers/mmc/host/mmc_spi.c15
-rw-r--r--drivers/mmc/host/mmci.c13
-rw-r--r--drivers/mmc/host/mxcmmc.c76
-rw-r--r--drivers/mmc/host/mxs-mmc.c2
-rw-r--r--drivers/mmc/host/omap.c37
-rw-r--r--drivers/mmc/host/omap_hsmmc.c125
-rw-r--r--drivers/mmc/host/pxamci.c52
-rw-r--r--drivers/mmc/host/sdhci-dove.c8
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c36
-rw-r--r--drivers/mmc/host/sdhci-pci.c19
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c3
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c6
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c35
-rw-r--r--drivers/mmc/host/sdhci-s3c.c216
-rw-r--r--drivers/mmc/host/sdhci-spear.c67
-rw-r--r--drivers/mmc/host/sdhci-tegra.c8
-rw-r--r--drivers/mmc/host/sdhci.c205
-rw-r--r--drivers/mmc/host/sh_mmcif.c8
-rw-r--r--drivers/mmc/host/via-sdmmc.c16
-rw-r--r--drivers/mmc/host/vub300.c4
-rw-r--r--drivers/mtd/devices/slram.c2
-rw-r--r--drivers/mtd/nand/nand_base.c1
-rw-r--r--drivers/mtd/onenand/onenand_base.c2
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/bonding/bond_sysfs.c4
-rw-r--r--drivers/net/can/flexcan.c19
-rw-r--r--drivers/net/can/sja1000/peak_pci.c13
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c158
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c93
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c15
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c27
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c29
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c7
-rw-r--r--drivers/net/ethernet/jme.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c18
-rw-r--r--drivers/net/ethernet/marvell/skge.c13
-rw-r--r--drivers/net/ethernet/marvell/sky2.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c6
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c4
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c1
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c116
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c3
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c1
-rw-r--r--drivers/net/ethernet/sfc/ptp.c9
-rw-r--r--drivers/net/ethernet/sis/sis900.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c20
-rw-r--r--drivers/net/ethernet/sun/niu.c1
-rw-r--r--drivers/net/ethernet/sun/sungem.c3
-rw-r--r--drivers/net/ethernet/ti/Kconfig2
-rw-r--r--drivers/net/ethernet/tile/tilegx.c35
-rw-r--r--drivers/net/irda/irtty-sir.c4
-rw-r--r--drivers/net/irda/mcs7780.c4
-rw-r--r--drivers/net/irda/pxaficp_ir.c4
-rw-r--r--drivers/net/irda/sa1100_ir.c4
-rw-r--r--drivers/net/irda/sh_irda.c4
-rw-r--r--drivers/net/irda/sh_sir.c5
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/at803x.c176
-rw-r--r--drivers/net/phy/mdio-bitbang.c1
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/usb/cdc_eem.c4
-rw-r--r--drivers/net/usb/cdc_ether.c41
-rw-r--r--drivers/net/usb/ipheth.c5
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/mcs7830.c30
-rw-r--r--drivers/net/usb/qmi_wwan.c55
-rw-r--r--drivers/net/usb/usbnet.c20
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c65
-rw-r--r--drivers/net/vxlan.c155
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h164
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c63
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h1
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c29
-rw-r--r--drivers/net/wireless/b43/main.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c70
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c39
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c34
-rw-r--r--drivers/net/wireless/mwifiex/join.c6
-rw-r--r--drivers/net/wireless/mwifiex/main.h2
-rw-r--r--drivers/net/wireless/mwifiex/scan.c51
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c4
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c31
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c1
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c2
-rw-r--r--drivers/net/xen-netback/netback.c40
-rw-r--r--drivers/of/address.c24
-rw-r--r--drivers/of/irq.c8
-rw-r--r--drivers/of/platform.c2
-rw-r--r--drivers/pcmcia/pxa2xx_sharpsl.c2
-rw-r--r--drivers/pinctrl/core.c4
-rw-r--r--drivers/pinctrl/pinconf.c4
-rw-r--r--drivers/pinctrl/pinctrl-bcm2835.c5
-rw-r--r--drivers/pinctrl/pinctrl-nomadik-db8500.c289
-rw-r--r--drivers/pinctrl/pinctrl-nomadik-db8540.c344
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c125
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.h78
-rw-r--r--drivers/pinctrl/pinctrl-samsung.c10
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c35
-rw-r--r--drivers/pinctrl/pinctrl-tegra.c2
-rw-r--r--drivers/pinctrl/pinctrl-tegra30.c24
-rw-r--r--drivers/pinctrl/pinctrl-xway.c2
-rw-r--r--drivers/platform/x86/acerhdf.c5
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c2
-rw-r--r--drivers/power/avs/smartreflex.c8
-rw-r--r--drivers/power/power_supply_core.c2
-rw-r--r--drivers/pwm/Kconfig29
-rw-r--r--drivers/pwm/Makefile3
-rw-r--r--drivers/pwm/core.c82
-rw-r--r--drivers/pwm/pwm-ab8500.c (renamed from drivers/misc/ab8500-pwm.c)116
-rw-r--r--drivers/pwm/pwm-bfin.c3
-rw-r--r--drivers/pwm/pwm-imx.c278
-rw-r--r--drivers/pwm/pwm-jz4740.c221
-rw-r--r--drivers/pwm/pwm-puv3.c161
-rw-r--r--drivers/pwm/pwm-pxa.c3
-rw-r--r--drivers/pwm/pwm-samsung.c3
-rw-r--r--drivers/pwm/pwm-tiecap.c24
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c75
-rw-r--r--drivers/rapidio/rio-scan.c40
-rw-r--r--drivers/rapidio/rio.c75
-rw-r--r--drivers/rtc/Kconfig3
-rw-r--r--drivers/rtc/rtc-imxdi.c2
-rw-r--r--drivers/s390/block/dcssblk.c52
-rw-r--r--drivers/s390/cio/css.c7
-rw-r--r--drivers/s390/cio/idset.c26
-rw-r--r--drivers/s390/cio/idset.h3
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c1
-rw-r--r--drivers/s390/net/qeth_l2_main.c11
-rw-r--r--drivers/s390/net/qeth_l3_main.c11
-rw-r--r--drivers/s390/net/smsgiucv.c2
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/arm/oak.c1
-rw-r--r--drivers/scsi/bfa/bfa_core.c85
-rw-r--r--drivers/scsi/bfa/bfa_defs.h61
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h119
-rw-r--r--drivers/scsi/bfa/bfa_fc.h5
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c123
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h13
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c64
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h23
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c155
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c288
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c494
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h63
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c236
-rw-r--r--drivers/scsi/bfa/bfa_modules.h1
-rw-r--r--drivers/scsi/bfa/bfa_port.c32
-rw-r--r--drivers/scsi/bfa/bfa_port.h3
-rw-r--r--drivers/scsi/bfa/bfa_svc.c732
-rw-r--r--drivers/scsi/bfa/bfa_svc.h30
-rw-r--r--drivers/scsi/bfa/bfad.c6
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c375
-rw-r--r--drivers/scsi/bfa/bfad_bsg.h63
-rw-r--r--drivers/scsi/bfa/bfad_drv.h2
-rw-r--r--drivers/scsi/bfa/bfi.h72
-rw-r--r--drivers/scsi/bfa/bfi_ms.h14
-rw-r--r--drivers/scsi/bfa/bfi_reg.h3
-rw-r--r--drivers/scsi/fcoe/fcoe.c14
-rw-r--r--drivers/scsi/fcoe/fcoe.h2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c3
-rw-r--r--drivers/scsi/lpfc/lpfc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h20
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c256
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c39
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c96
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c13
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c27
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h2
-rw-r--r--drivers/scsi/mvumi.c1093
-rw-r--r--drivers/scsi/mvumi.h235
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h1
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c92
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h2
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_error.c8
-rw-r--r--drivers/scsi/storvsc_drv.c5
-rw-r--r--drivers/scsi/virtio_scsi.c39
-rw-r--r--drivers/sh/intc/access.c45
-rw-r--r--drivers/sh/intc/chip.c4
-rw-r--r--drivers/spi/Kconfig1
-rw-r--r--drivers/spi/spi-davinci.c292
-rw-r--r--drivers/spi/spi-mxs.c3
-rw-r--r--drivers/spi/spi-pl022.c3
-rw-r--r--drivers/spi/spi-rspi.c56
-rw-r--r--drivers/spi/spi-s3c64xx.c6
-rw-r--r--drivers/staging/android/binder.c30
-rw-r--r--drivers/staging/comedi/drivers/8255_pci.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236.c5
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c2
-rw-r--r--drivers/staging/comedi/drivers/das08.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c2
-rw-r--r--drivers/staging/dgrp/dgrp_mon_ops.c1
-rw-r--r--drivers/staging/dgrp/dgrp_specproc.c7
-rw-r--r--drivers/staging/dgrp/dgrp_tty.c10
-rw-r--r--drivers/staging/iio/accel/adis16201_core.c24
-rw-r--r--drivers/staging/iio/accel/adis16203_core.c20
-rw-r--r--drivers/staging/iio/accel/adis16204_core.c22
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c18
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c19
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c23
-rw-r--r--drivers/staging/iio/gyro/adis16260_core.c29
-rw-r--r--drivers/staging/iio/imu/adis16400.h2
-rw-r--r--drivers/staging/iio/imu/adis16400_core.c70
-rw-r--r--drivers/staging/ipack/bridges/tpci200.c1
-rw-r--r--drivers/staging/omap-thermal/omap-thermal-common.c5
-rw-r--r--drivers/staging/omapdrm/omap_drv.c5
-rw-r--r--drivers/staging/omapdrm/omap_gem.c4
-rw-r--r--drivers/staging/ramster/Kconfig1
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c37
-rw-r--r--drivers/staging/tidspbridge/hw/hw_mmu.c115
-rw-r--r--drivers/staging/tidspbridge/hw/hw_mmu.h31
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h4
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/host_os.h4
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv.c8
-rw-r--r--drivers/staging/tidspbridge/rmgr/node.c21
-rw-r--r--drivers/staging/zram/zram_drv.c12
-rw-r--r--drivers/target/iscsi/iscsi_target.c200
-rw-r--r--drivers/target/iscsi/iscsi_target.h5
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c42
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h16
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c94
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.h4
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c9
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c21
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c31
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c71
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h7
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.c61
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c31
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c38
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h9
-rw-r--r--drivers/target/loopback/tcm_loop.c74
-rw-r--r--drivers/target/sbp/sbp_target.c27
-rw-r--r--drivers/target/target_core_alua.c2
-rw-r--r--drivers/target/target_core_configfs.c21
-rw-r--r--drivers/target/target_core_device.c34
-rw-r--r--drivers/target/target_core_fabric_configfs.c1
-rw-r--r--drivers/target/target_core_fabric_lib.c8
-rw-r--r--drivers/target/target_core_file.c43
-rw-r--r--drivers/target/target_core_file.h1
-rw-r--r--drivers/target/target_core_iblock.c26
-rw-r--r--drivers/target/target_core_pr.c14
-rw-r--r--drivers/target/target_core_pscsi.c2
-rw-r--r--drivers/target/target_core_sbc.c41
-rw-r--r--drivers/target/target_core_spc.c48
-rw-r--r--drivers/target/target_core_stat.c1
-rw-r--r--drivers/target/target_core_tmr.c6
-rw-r--r--drivers/target/target_core_tpg.c2
-rw-r--r--drivers/target/target_core_transport.c269
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c1
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c12
-rw-r--r--drivers/target/tcm_fc/tfc_io.c4
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c1
-rw-r--r--drivers/thermal/Kconfig28
-rw-r--r--drivers/thermal/Makefile5
-rw-r--r--drivers/thermal/cpu_cooling.c449
-rw-r--r--drivers/thermal/exynos_thermal.c997
-rw-r--r--drivers/thermal/rcar_thermal.c260
-rw-r--r--drivers/thermal/spear_thermal.c2
-rw-r--r--drivers/thermal/thermal_sys.c321
-rw-r--r--drivers/tty/hvc/hvc_xen.c5
-rw-r--r--drivers/tty/serial/8250/8250_hp300.c20
-rw-r--r--drivers/tty/serial/Kconfig4
-rw-r--r--drivers/tty/serial/kgdboc.c3
-rw-r--r--drivers/tty/serial/omap-serial.c12
-rw-r--r--drivers/tty/serial/sccnxp.c1
-rw-r--r--drivers/tty/serial/sh-sci.c3
-rw-r--r--drivers/tty/sysrq.c1
-rw-r--r--drivers/tty/vt/vt.c13
-rw-r--r--drivers/usb/class/cdc-acm.c25
-rw-r--r--drivers/usb/core/devio.c1
-rw-r--r--drivers/usb/core/driver.c4
-rw-r--r--drivers/usb/core/hub.c43
-rw-r--r--drivers/usb/dwc3/core.c4
-rw-r--r--drivers/usb/dwc3/gadget.c2
-rw-r--r--drivers/usb/gadget/Kconfig1
-rw-r--r--drivers/usb/gadget/lpc32xx_udc.c4
-rw-r--r--drivers/usb/gadget/net2272.c4
-rw-r--r--drivers/usb/gadget/tcm_usb_gadget.c15
-rw-r--r--drivers/usb/host/ehci-fsl.c2
-rw-r--r--drivers/usb/host/ehci-orion.c2
-rw-r--r--drivers/usb/host/ehci-vt8500.c10
-rw-r--r--drivers/usb/host/pci-quirks.c9
-rw-r--r--drivers/usb/host/uhci-platform.c9
-rw-r--r--drivers/usb/host/xhci-dbg.c2
-rw-r--r--drivers/usb/host/xhci-hub.c9
-rw-r--r--drivers/usb/host/xhci-ring.c11
-rw-r--r--drivers/usb/host/xhci.c11
-rw-r--r--drivers/usb/misc/ezusb.c1
-rw-r--r--drivers/usb/musb/am35x.c6
-rw-r--r--drivers/usb/musb/musb_dsps.c8
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c1
-rw-r--r--drivers/usb/renesas_usbhs/mod.c6
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c5
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h2
-rw-r--r--drivers/usb/serial/ark3116.c26
-rw-r--r--drivers/usb/serial/belkin_sa.c31
-rw-r--r--drivers/usb/serial/ch341.c23
-rw-r--r--drivers/usb/serial/cp210x.c40
-rw-r--r--drivers/usb/serial/cyberjack.c49
-rw-r--r--drivers/usb/serial/cypress_m8.c75
-rw-r--r--drivers/usb/serial/digi_acceleport.c117
-rw-r--r--drivers/usb/serial/f81232.c43
-rw-r--r--drivers/usb/serial/garmin_gps.c24
-rw-r--r--drivers/usb/serial/io_edgeport.c54
-rw-r--r--drivers/usb/serial/io_tables.h8
-rw-r--r--drivers/usb/serial/io_ti.c91
-rw-r--r--drivers/usb/serial/ipw.c7
-rw-r--r--drivers/usb/serial/iuu_phoenix.c76
-rw-r--r--drivers/usb/serial/keyspan.c181
-rw-r--r--drivers/usb/serial/keyspan.h8
-rw-r--r--drivers/usb/serial/keyspan_pda.c30
-rw-r--r--drivers/usb/serial/kl5kusb105.c68
-rw-r--r--drivers/usb/serial/kobil_sct.c23
-rw-r--r--drivers/usb/serial/mct_u232.c59
-rw-r--r--drivers/usb/serial/metro-usb.c65
-rw-r--r--drivers/usb/serial/mos7720.c62
-rw-r--r--drivers/usb/serial/mos7840.c495
-rw-r--r--drivers/usb/serial/omninet.c36
-rw-r--r--drivers/usb/serial/opticon.c11
-rw-r--r--drivers/usb/serial/option.c107
-rw-r--r--drivers/usb/serial/oti6858.c68
-rw-r--r--drivers/usb/serial/pl2303.c90
-rw-r--r--drivers/usb/serial/qcserial.c33
-rw-r--r--drivers/usb/serial/quatech2.c135
-rw-r--r--drivers/usb/serial/sierra.c133
-rw-r--r--drivers/usb/serial/spcp8x5.c46
-rw-r--r--drivers/usb/serial/ssu100.c34
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c88
-rw-r--r--drivers/usb/serial/usb-wwan.h2
-rw-r--r--drivers/usb/serial/usb_wwan.c124
-rw-r--r--drivers/usb/serial/whiteheat.c60
-rw-r--r--drivers/usb/storage/unusual_devs.h6
-rw-r--r--drivers/vfio/pci/vfio_pci.c7
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c18
-rw-r--r--drivers/vhost/net.c3
-rw-r--r--drivers/vhost/tcm_vhost.c81
-rw-r--r--drivers/vhost/tcm_vhost.h8
-rw-r--r--drivers/video/Kconfig15
-rw-r--r--drivers/video/Makefile2
-rw-r--r--drivers/video/amifb.c2
-rw-r--r--drivers/video/arcfb.c1
-rw-r--r--drivers/video/atmel_lcdfb.c5
-rw-r--r--drivers/video/backlight/Kconfig3
-rw-r--r--drivers/video/backlight/lm3639_bl.c4
-rw-r--r--drivers/video/backlight/pwm_bl.c7
-rw-r--r--drivers/video/bf537-lq035.c12
-rw-r--r--drivers/video/bf54x-lq043fb.c9
-rw-r--r--drivers/video/bfin-lq035q1-fb.c13
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c1
-rw-r--r--drivers/video/bw2.c4
-rw-r--r--drivers/video/cg3.c3
-rw-r--r--drivers/video/cobalt_lcdfb.c5
-rw-r--r--drivers/video/console/font_mini_4x6.c2
-rw-r--r--drivers/video/console/font_sun8x16.c2
-rw-r--r--drivers/video/cyber2000fb.c4
-rw-r--r--drivers/video/da8xx-fb.c283
-rw-r--r--drivers/video/ep93xx-fb.c17
-rw-r--r--drivers/video/exynos/exynos_dp_core.c322
-rw-r--r--drivers/video/exynos/exynos_dp_core.h6
-rw-r--r--drivers/video/exynos/exynos_dp_reg.c58
-rw-r--r--drivers/video/exynos/exynos_dp_reg.h3
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi.c9
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi_common.c8
-rw-r--r--drivers/video/fsl-diu-fb.c10
-rw-r--r--drivers/video/gbefb.c17
-rw-r--r--drivers/video/hpfb.c28
-rw-r--r--drivers/video/imxfb.c1
-rw-r--r--drivers/video/jz4740_fb.c44
-rw-r--r--drivers/video/mb862xx/mb862xxfbdrv.c10
-rw-r--r--drivers/video/mbx/mbxfb.c25
-rw-r--r--drivers/video/msm/mddi.c3
-rw-r--r--drivers/video/msm/mddi_client_nt35399.c6
-rw-r--r--drivers/video/msm/mdp.c1
-rw-r--r--drivers/video/msm/mdp_hw.h1
-rw-r--r--drivers/video/mx3fb.c3
-rw-r--r--drivers/video/nuc900fb.c2
-rw-r--r--drivers/video/omap/hwa742.c1
-rw-r--r--drivers/video/omap/lcd_palmte.c1
-rw-r--r--drivers/video/omap/omapfb_main.c9
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c14
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c76
-rw-r--r--drivers/video/omap2/displays/panel-lgphilips-lb035q02.c3
-rw-r--r--drivers/video/omap2/displays/panel-n8x0.c31
-rw-r--r--drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c3
-rw-r--r--drivers/video/omap2/displays/panel-picodlp.c4
-rw-r--r--drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c3
-rw-r--r--drivers/video/omap2/displays/panel-taal.c239
-rw-r--r--drivers/video/omap2/displays/panel-tfp410.c20
-rw-r--r--drivers/video/omap2/displays/panel-tpo-td043mtea1.c7
-rw-r--r--drivers/video/omap2/dss/Kconfig2
-rw-r--r--drivers/video/omap2/dss/Makefile4
-rw-r--r--drivers/video/omap2/dss/apply.c330
-rw-r--r--drivers/video/omap2/dss/core.c91
-rw-r--r--drivers/video/omap2/dss/dispc.c1018
-rw-r--r--drivers/video/omap2/dss/dispc.h37
-rw-r--r--drivers/video/omap2/dss/display.c108
-rw-r--r--drivers/video/omap2/dss/dpi.c181
-rw-r--r--drivers/video/omap2/dss/dsi.c675
-rw-r--r--drivers/video/omap2/dss/dss.c257
-rw-r--r--drivers/video/omap2/dss/dss.h79
-rw-r--r--drivers/video/omap2/dss/dss_features.c278
-rw-r--r--drivers/video/omap2/dss/dss_features.h7
-rw-r--r--drivers/video/omap2/dss/hdmi.c247
-rw-r--r--drivers/video/omap2/dss/hdmi_panel.c31
-rw-r--r--drivers/video/omap2/dss/manager-sysfs.c512
-rw-r--r--drivers/video/omap2/dss/manager.c473
-rw-r--r--drivers/video/omap2/dss/output.c148
-rw-r--r--drivers/video/omap2/dss/overlay-sysfs.c456
-rw-r--r--drivers/video/omap2/dss/overlay.c492
-rw-r--r--drivers/video/omap2/dss/rfbi.c222
-rw-r--r--drivers/video/omap2/dss/sdi.c126
-rw-r--r--drivers/video/omap2/dss/venc.c337
-rw-r--r--drivers/video/omap2/dss/venc_panel.c251
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c7
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c32
-rw-r--r--drivers/video/omap2/omapfb/omapfb.h5
-rw-r--r--drivers/video/omap2/vram.c56
-rw-r--r--drivers/video/pnx4008/Makefile7
-rw-r--r--drivers/video/pnx4008/dum.h211
-rw-r--r--drivers/video/pnx4008/fbcommon.h43
-rw-r--r--drivers/video/pnx4008/pnxrgbfb.c198
-rw-r--r--drivers/video/pnx4008/sdum.c861
-rw-r--r--drivers/video/pnx4008/sdum.h136
-rw-r--r--drivers/video/ps3fb.c7
-rw-r--r--drivers/video/s3c-fb.c54
-rw-r--r--drivers/video/s3c2410fb.c34
-rw-r--r--drivers/video/savage/savagefb_driver.c4
-rw-r--r--drivers/video/sis/initextlfb.c2
-rw-r--r--drivers/video/sunxvr1000.c4
-rw-r--r--drivers/video/sunxvr2500.c4
-rw-r--r--drivers/video/sunxvr500.c8
-rw-r--r--drivers/video/udlfb.c2
-rw-r--r--drivers/video/uvesafb.c2
-rw-r--r--drivers/video/vermilion/vermilion.c3
-rw-r--r--drivers/video/via/via_clock.c19
-rw-r--r--drivers/video/xen-fbfront.c5
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/balloon.c3
-rw-r--r--drivers/xen/dbgp.c2
-rw-r--r--drivers/xen/events.c4
-rw-r--r--drivers/xen/gntdev.c36
-rw-r--r--drivers/xen/grant-table.c8
-rw-r--r--drivers/xen/sys-hypervisor.c4
-rw-r--r--drivers/xen/xen-pciback/vpci.c14
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c23
944 files changed, 34674 insertions, 17239 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 03da5b663aef..a16a8d001ae0 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -17,6 +17,10 @@ obj-$(CONFIG_PARISC) += parisc/
17obj-$(CONFIG_RAPIDIO) += rapidio/ 17obj-$(CONFIG_RAPIDIO) += rapidio/
18obj-y += video/ 18obj-y += video/
19obj-y += idle/ 19obj-y += idle/
20
21# IPMI must come before ACPI in order to provide IPMI opregion support
22obj-$(CONFIG_IPMI_HANDLER) += char/ipmi/
23
20obj-$(CONFIG_ACPI) += acpi/ 24obj-$(CONFIG_ACPI) += acpi/
21obj-$(CONFIG_SFI) += sfi/ 25obj-$(CONFIG_SFI) += sfi/
22# PnP must come after ACPI since it will eventually need to check if acpi 26# PnP must come after ACPI since it will eventually need to check if acpi
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 47199e2a9130..82422fe90f81 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -47,6 +47,10 @@ acpi-y += video_detect.o
47endif 47endif
48 48
49# These are (potentially) separate modules 49# These are (potentially) separate modules
50
51# IPMI may be used by other drivers, so it has to initialise before them
52obj-$(CONFIG_ACPI_IPMI) += acpi_ipmi.o
53
50obj-$(CONFIG_ACPI_AC) += ac.o 54obj-$(CONFIG_ACPI_AC) += ac.o
51obj-$(CONFIG_ACPI_BUTTON) += button.o 55obj-$(CONFIG_ACPI_BUTTON) += button.o
52obj-$(CONFIG_ACPI_FAN) += fan.o 56obj-$(CONFIG_ACPI_FAN) += fan.o
@@ -70,6 +74,5 @@ processor-y += processor_idle.o processor_thermal.o
70processor-$(CONFIG_CPU_FREQ) += processor_perflib.o 74processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
71 75
72obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o 76obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
73obj-$(CONFIG_ACPI_IPMI) += acpi_ipmi.o
74 77
75obj-$(CONFIG_ACPI_APEI) += apei/ 78obj-$(CONFIG_ACPI_APEI) += apei/
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 7edaccce6640..a51df9681319 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -71,9 +71,6 @@ enum ec_command {
71#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ 71#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
72#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ 72#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
73 73
74#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts
75 per one transaction */
76
77enum { 74enum {
78 EC_FLAGS_QUERY_PENDING, /* Query is pending */ 75 EC_FLAGS_QUERY_PENDING, /* Query is pending */
79 EC_FLAGS_GPE_STORM, /* GPE storm detected */ 76 EC_FLAGS_GPE_STORM, /* GPE storm detected */
@@ -87,6 +84,15 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
87module_param(ec_delay, uint, 0644); 84module_param(ec_delay, uint, 0644);
88MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes"); 85MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
89 86
87/*
88 * If the number of false interrupts per one transaction exceeds
89 * this threshold, will think there is a GPE storm happened and
90 * will disable the GPE for normal transaction.
91 */
92static unsigned int ec_storm_threshold __read_mostly = 8;
93module_param(ec_storm_threshold, uint, 0644);
94MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
95
90/* If we find an EC via the ECDT, we need to keep a ptr to its context */ 96/* If we find an EC via the ECDT, we need to keep a ptr to its context */
91/* External interfaces use first EC only, so remember */ 97/* External interfaces use first EC only, so remember */
92typedef int (*acpi_ec_query_func) (void *data); 98typedef int (*acpi_ec_query_func) (void *data);
@@ -319,7 +325,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
319 msleep(1); 325 msleep(1);
320 /* It is safe to enable the GPE outside of the transaction. */ 326 /* It is safe to enable the GPE outside of the transaction. */
321 acpi_enable_gpe(NULL, ec->gpe); 327 acpi_enable_gpe(NULL, ec->gpe);
322 } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) { 328 } else if (t->irq_count > ec_storm_threshold) {
323 pr_info(PREFIX "GPE storm detected, " 329 pr_info(PREFIX "GPE storm detected, "
324 "transactions will use polling mode\n"); 330 "transactions will use polling mode\n");
325 set_bit(EC_FLAGS_GPE_STORM, &ec->flags); 331 set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
@@ -924,6 +930,17 @@ static int ec_flag_msi(const struct dmi_system_id *id)
924 return 0; 930 return 0;
925} 931}
926 932
933/*
934 * Clevo M720 notebook actually works ok with IRQ mode, if we lifted
935 * the GPE storm threshold back to 20
936 */
937static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
938{
939 pr_debug("Setting the EC GPE storm threshold to 20\n");
940 ec_storm_threshold = 20;
941 return 0;
942}
943
927static struct dmi_system_id __initdata ec_dmi_table[] = { 944static struct dmi_system_id __initdata ec_dmi_table[] = {
928 { 945 {
929 ec_skip_dsdt_scan, "Compal JFL92", { 946 ec_skip_dsdt_scan, "Compal JFL92", {
@@ -955,10 +972,13 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
955 { 972 {
956 ec_validate_ecdt, "ASUS hardware", { 973 ec_validate_ecdt, "ASUS hardware", {
957 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL}, 974 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
975 {
976 ec_enlarge_storm_threshold, "CLEVO hardware", {
977 DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
978 DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL},
958 {}, 979 {},
959}; 980};
960 981
961
962int __init acpi_ec_ecdt_probe(void) 982int __init acpi_ec_ecdt_probe(void)
963{ 983{
964 acpi_status status; 984 acpi_status status;
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index d1a2d74033e9..08373086cd7e 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -159,6 +159,7 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle)
159 if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) { 159 if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) {
160 retval = -ENOSPC; 160 retval = -ENOSPC;
161 mutex_unlock(&acpi_dev->physical_node_lock); 161 mutex_unlock(&acpi_dev->physical_node_lock);
162 kfree(physical_node);
162 goto err; 163 goto err;
163 } 164 }
164 165
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index e78c2a52ea46..bd4e5dca3ff7 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -409,6 +409,7 @@ static void acpi_processor_notify(struct acpi_device *device, u32 event)
409 acpi_bus_generate_proc_event(device, event, 0); 409 acpi_bus_generate_proc_event(device, event, 0);
410 acpi_bus_generate_netlink_event(device->pnp.device_class, 410 acpi_bus_generate_netlink_event(device->pnp.device_class,
411 dev_name(&device->dev), event, 0); 411 dev_name(&device->dev), event, 0);
412 break;
412 default: 413 default:
413 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 414 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
414 "Unsupported event [0x%x]\n", event)); 415 "Unsupported event [0x%x]\n", event));
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 3655ab923812..e8086c725305 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -1132,7 +1132,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1132int acpi_processor_hotplug(struct acpi_processor *pr) 1132int acpi_processor_hotplug(struct acpi_processor *pr)
1133{ 1133{
1134 int ret = 0; 1134 int ret = 0;
1135 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id); 1135 struct cpuidle_device *dev;
1136 1136
1137 if (disabled_by_idle_boot_param()) 1137 if (disabled_by_idle_boot_param())
1138 return 0; 1138 return 0;
@@ -1147,6 +1147,7 @@ int acpi_processor_hotplug(struct acpi_processor *pr)
1147 if (!pr->flags.power_setup_done) 1147 if (!pr->flags.power_setup_done)
1148 return -ENODEV; 1148 return -ENODEV;
1149 1149
1150 dev = per_cpu(acpi_cpuidle_device, pr->id);
1150 cpuidle_pause_and_lock(); 1151 cpuidle_pause_and_lock();
1151 cpuidle_disable_device(dev); 1152 cpuidle_disable_device(dev);
1152 acpi_processor_get_power_info(pr); 1153 acpi_processor_get_power_info(pr);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index edda74a43406..804204d41999 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -708,6 +708,40 @@ static int thermal_get_crit_temp(struct thermal_zone_device *thermal,
708 return -EINVAL; 708 return -EINVAL;
709} 709}
710 710
711static int thermal_get_trend(struct thermal_zone_device *thermal,
712 int trip, enum thermal_trend *trend)
713{
714 struct acpi_thermal *tz = thermal->devdata;
715 enum thermal_trip_type type;
716 int i;
717
718 if (thermal_get_trip_type(thermal, trip, &type))
719 return -EINVAL;
720
721 if (type == THERMAL_TRIP_ACTIVE) {
722 /* aggressive active cooling */
723 *trend = THERMAL_TREND_RAISING;
724 return 0;
725 }
726
727 /*
728 * tz->temperature has already been updated by generic thermal layer,
729 * before this callback being invoked
730 */
731 i = (tz->trips.passive.tc1 * (tz->temperature - tz->last_temperature))
732 + (tz->trips.passive.tc2
733 * (tz->temperature - tz->trips.passive.temperature));
734
735 if (i > 0)
736 *trend = THERMAL_TREND_RAISING;
737 else if (i < 0)
738 *trend = THERMAL_TREND_DROPPING;
739 else
740 *trend = THERMAL_TREND_STABLE;
741 return 0;
742}
743
744
711static int thermal_notify(struct thermal_zone_device *thermal, int trip, 745static int thermal_notify(struct thermal_zone_device *thermal, int trip,
712 enum thermal_trip_type trip_type) 746 enum thermal_trip_type trip_type)
713{ 747{
@@ -731,11 +765,9 @@ static int thermal_notify(struct thermal_zone_device *thermal, int trip,
731 return 0; 765 return 0;
732} 766}
733 767
734typedef int (*cb)(struct thermal_zone_device *, int,
735 struct thermal_cooling_device *);
736static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal, 768static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
737 struct thermal_cooling_device *cdev, 769 struct thermal_cooling_device *cdev,
738 cb action) 770 bool bind)
739{ 771{
740 struct acpi_device *device = cdev->devdata; 772 struct acpi_device *device = cdev->devdata;
741 struct acpi_thermal *tz = thermal->devdata; 773 struct acpi_thermal *tz = thermal->devdata;
@@ -759,11 +791,19 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
759 i++) { 791 i++) {
760 handle = tz->trips.passive.devices.handles[i]; 792 handle = tz->trips.passive.devices.handles[i];
761 status = acpi_bus_get_device(handle, &dev); 793 status = acpi_bus_get_device(handle, &dev);
762 if (ACPI_SUCCESS(status) && (dev == device)) { 794 if (ACPI_FAILURE(status) || dev != device)
763 result = action(thermal, trip, cdev); 795 continue;
764 if (result) 796 if (bind)
765 goto failed; 797 result =
766 } 798 thermal_zone_bind_cooling_device
799 (thermal, trip, cdev,
800 THERMAL_NO_LIMIT, THERMAL_NO_LIMIT);
801 else
802 result =
803 thermal_zone_unbind_cooling_device
804 (thermal, trip, cdev);
805 if (result)
806 goto failed;
767 } 807 }
768 } 808 }
769 809
@@ -776,11 +816,17 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
776 j++) { 816 j++) {
777 handle = tz->trips.active[i].devices.handles[j]; 817 handle = tz->trips.active[i].devices.handles[j];
778 status = acpi_bus_get_device(handle, &dev); 818 status = acpi_bus_get_device(handle, &dev);
779 if (ACPI_SUCCESS(status) && (dev == device)) { 819 if (ACPI_FAILURE(status) || dev != device)
780 result = action(thermal, trip, cdev); 820 continue;
781 if (result) 821 if (bind)
782 goto failed; 822 result = thermal_zone_bind_cooling_device
783 } 823 (thermal, trip, cdev,
824 THERMAL_NO_LIMIT, THERMAL_NO_LIMIT);
825 else
826 result = thermal_zone_unbind_cooling_device
827 (thermal, trip, cdev);
828 if (result)
829 goto failed;
784 } 830 }
785 } 831 }
786 832
@@ -788,7 +834,14 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
788 handle = tz->devices.handles[i]; 834 handle = tz->devices.handles[i];
789 status = acpi_bus_get_device(handle, &dev); 835 status = acpi_bus_get_device(handle, &dev);
790 if (ACPI_SUCCESS(status) && (dev == device)) { 836 if (ACPI_SUCCESS(status) && (dev == device)) {
791 result = action(thermal, -1, cdev); 837 if (bind)
838 result = thermal_zone_bind_cooling_device
839 (thermal, -1, cdev,
840 THERMAL_NO_LIMIT,
841 THERMAL_NO_LIMIT);
842 else
843 result = thermal_zone_unbind_cooling_device
844 (thermal, -1, cdev);
792 if (result) 845 if (result)
793 goto failed; 846 goto failed;
794 } 847 }
@@ -802,16 +855,14 @@ static int
802acpi_thermal_bind_cooling_device(struct thermal_zone_device *thermal, 855acpi_thermal_bind_cooling_device(struct thermal_zone_device *thermal,
803 struct thermal_cooling_device *cdev) 856 struct thermal_cooling_device *cdev)
804{ 857{
805 return acpi_thermal_cooling_device_cb(thermal, cdev, 858 return acpi_thermal_cooling_device_cb(thermal, cdev, true);
806 thermal_zone_bind_cooling_device);
807} 859}
808 860
809static int 861static int
810acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal, 862acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal,
811 struct thermal_cooling_device *cdev) 863 struct thermal_cooling_device *cdev)
812{ 864{
813 return acpi_thermal_cooling_device_cb(thermal, cdev, 865 return acpi_thermal_cooling_device_cb(thermal, cdev, false);
814 thermal_zone_unbind_cooling_device);
815} 866}
816 867
817static const struct thermal_zone_device_ops acpi_thermal_zone_ops = { 868static const struct thermal_zone_device_ops acpi_thermal_zone_ops = {
@@ -823,6 +874,7 @@ static const struct thermal_zone_device_ops acpi_thermal_zone_ops = {
823 .get_trip_type = thermal_get_trip_type, 874 .get_trip_type = thermal_get_trip_type,
824 .get_trip_temp = thermal_get_trip_temp, 875 .get_trip_temp = thermal_get_trip_temp,
825 .get_crit_temp = thermal_get_crit_temp, 876 .get_crit_temp = thermal_get_crit_temp,
877 .get_trend = thermal_get_trend,
826 .notify = thermal_notify, 878 .notify = thermal_notify,
827}; 879};
828 880
@@ -849,15 +901,12 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
849 tz->thermal_zone = 901 tz->thermal_zone =
850 thermal_zone_device_register("acpitz", trips, 0, tz, 902 thermal_zone_device_register("acpitz", trips, 0, tz,
851 &acpi_thermal_zone_ops, 903 &acpi_thermal_zone_ops,
852 tz->trips.passive.tc1,
853 tz->trips.passive.tc2,
854 tz->trips.passive.tsp*100, 904 tz->trips.passive.tsp*100,
855 tz->polling_frequency*100); 905 tz->polling_frequency*100);
856 else 906 else
857 tz->thermal_zone = 907 tz->thermal_zone =
858 thermal_zone_device_register("acpitz", trips, 0, tz, 908 thermal_zone_device_register("acpitz", trips, 0, tz,
859 &acpi_thermal_zone_ops, 909 &acpi_thermal_zone_ops, 0,
860 0, 0, 0,
861 tz->polling_frequency*100); 910 tz->polling_frequency*100);
862 if (IS_ERR(tz->thermal_zone)) 911 if (IS_ERR(tz->thermal_zone))
863 return -ENODEV; 912 return -ENODEV;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index f94d4c818fc7..0230cb6cbb3a 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -1345,12 +1345,15 @@ static int
1345acpi_video_bus_get_devices(struct acpi_video_bus *video, 1345acpi_video_bus_get_devices(struct acpi_video_bus *video,
1346 struct acpi_device *device) 1346 struct acpi_device *device)
1347{ 1347{
1348 int status; 1348 int status = 0;
1349 struct acpi_device *dev; 1349 struct acpi_device *dev;
1350 1350
1351 status = acpi_video_device_enumerate(video); 1351 /*
1352 if (status) 1352 * There are systems where video module known to work fine regardless
1353 return status; 1353 * of broken _DOD and ignoring returned value here doesn't cause
1354 * any issues later.
1355 */
1356 acpi_video_device_enumerate(video);
1354 1357
1355 list_for_each_entry(dev, &device->children, node) { 1358 list_for_each_entry(dev, &device->children, node) {
1356 1359
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 08b4c5209384..b34b5cda5ae1 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -236,7 +236,7 @@ config CMA_SIZE_PERCENTAGE
236 236
237choice 237choice
238 prompt "Selected region size" 238 prompt "Selected region size"
239 default CMA_SIZE_SEL_ABSOLUTE 239 default CMA_SIZE_SEL_MBYTES
240 240
241config CMA_SIZE_SEL_MBYTES 241config CMA_SIZE_SEL_MBYTES
242 bool "Use mega bytes value only" 242 bool "Use mega bytes value only"
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 560a7173f810..bc256b641027 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -191,9 +191,8 @@ EXPORT_SYMBOL(dma_release_from_coherent);
191 * This checks whether the memory was allocated from the per-device 191 * This checks whether the memory was allocated from the per-device
192 * coherent memory pool and if so, maps that memory to the provided vma. 192 * coherent memory pool and if so, maps that memory to the provided vma.
193 * 193 *
194 * Returns 1 if we correctly mapped the memory, or 0 if 194 * Returns 1 if we correctly mapped the memory, or 0 if the caller should
195 * dma_release_coherent() should proceed with mapping memory from 195 * proceed with mapping memory from generic pools.
196 * generic pools.
197 */ 196 */
198int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, 197int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
199 void *vaddr, size_t size, int *ret) 198 void *vaddr, size_t size, int *ret)
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 9a1469474f55..612afcc5a938 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -27,15 +27,12 @@
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/mutex.h> 28#include <linux/mutex.h>
29#include <linux/page-isolation.h> 29#include <linux/page-isolation.h>
30#include <linux/sizes.h>
30#include <linux/slab.h> 31#include <linux/slab.h>
31#include <linux/swap.h> 32#include <linux/swap.h>
32#include <linux/mm_types.h> 33#include <linux/mm_types.h>
33#include <linux/dma-contiguous.h> 34#include <linux/dma-contiguous.h>
34 35
35#ifndef SZ_1M
36#define SZ_1M (1 << 20)
37#endif
38
39struct cma { 36struct cma {
40 unsigned long base_pfn; 37 unsigned long base_pfn;
41 unsigned long count; 38 unsigned long count;
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 81541452887b..8945f4e489ed 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -36,68 +36,6 @@ MODULE_AUTHOR("Manuel Estrada Sainz");
36MODULE_DESCRIPTION("Multi purpose firmware loading support"); 36MODULE_DESCRIPTION("Multi purpose firmware loading support");
37MODULE_LICENSE("GPL"); 37MODULE_LICENSE("GPL");
38 38
39static const char *fw_path[] = {
40 "/lib/firmware/updates/" UTS_RELEASE,
41 "/lib/firmware/updates",
42 "/lib/firmware/" UTS_RELEASE,
43 "/lib/firmware"
44};
45
46/* Don't inline this: 'struct kstat' is biggish */
47static noinline long fw_file_size(struct file *file)
48{
49 struct kstat st;
50 if (vfs_getattr(file->f_path.mnt, file->f_path.dentry, &st))
51 return -1;
52 if (!S_ISREG(st.mode))
53 return -1;
54 if (st.size != (long)st.size)
55 return -1;
56 return st.size;
57}
58
59static bool fw_read_file_contents(struct file *file, struct firmware *fw)
60{
61 long size;
62 char *buf;
63
64 size = fw_file_size(file);
65 if (size < 0)
66 return false;
67 buf = vmalloc(size);
68 if (!buf)
69 return false;
70 if (kernel_read(file, 0, buf, size) != size) {
71 vfree(buf);
72 return false;
73 }
74 fw->data = buf;
75 fw->size = size;
76 return true;
77}
78
79static bool fw_get_filesystem_firmware(struct firmware *fw, const char *name)
80{
81 int i;
82 bool success = false;
83 char *path = __getname();
84
85 for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
86 struct file *file;
87 snprintf(path, PATH_MAX, "%s/%s", fw_path[i], name);
88
89 file = filp_open(path, O_RDONLY, 0);
90 if (IS_ERR(file))
91 continue;
92 success = fw_read_file_contents(file, fw);
93 fput(file);
94 if (success)
95 break;
96 }
97 __putname(path);
98 return success;
99}
100
101/* Builtin firmware support */ 39/* Builtin firmware support */
102 40
103#ifdef CONFIG_FW_LOADER 41#ifdef CONFIG_FW_LOADER
@@ -150,6 +88,11 @@ enum {
150 FW_STATUS_ABORT, 88 FW_STATUS_ABORT,
151}; 89};
152 90
91enum fw_buf_fmt {
92 VMALLOC_BUF, /* used in direct loading */
93 PAGE_BUF, /* used in loading via userspace */
94};
95
153static int loading_timeout = 60; /* In seconds */ 96static int loading_timeout = 60; /* In seconds */
154 97
155static inline long firmware_loading_timeout(void) 98static inline long firmware_loading_timeout(void)
@@ -173,8 +116,6 @@ struct firmware_cache {
173 spinlock_t name_lock; 116 spinlock_t name_lock;
174 struct list_head fw_names; 117 struct list_head fw_names;
175 118
176 wait_queue_head_t wait_queue;
177 int cnt;
178 struct delayed_work work; 119 struct delayed_work work;
179 120
180 struct notifier_block pm_notify; 121 struct notifier_block pm_notify;
@@ -187,6 +128,7 @@ struct firmware_buf {
187 struct completion completion; 128 struct completion completion;
188 struct firmware_cache *fwc; 129 struct firmware_cache *fwc;
189 unsigned long status; 130 unsigned long status;
131 enum fw_buf_fmt fmt;
190 void *data; 132 void *data;
191 size_t size; 133 size_t size;
192 struct page **pages; 134 struct page **pages;
@@ -240,6 +182,7 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
240 strcpy(buf->fw_id, fw_name); 182 strcpy(buf->fw_id, fw_name);
241 buf->fwc = fwc; 183 buf->fwc = fwc;
242 init_completion(&buf->completion); 184 init_completion(&buf->completion);
185 buf->fmt = VMALLOC_BUF;
243 186
244 pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf); 187 pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
245 188
@@ -307,10 +250,14 @@ static void __fw_free_buf(struct kref *ref)
307 list_del(&buf->list); 250 list_del(&buf->list);
308 spin_unlock(&fwc->lock); 251 spin_unlock(&fwc->lock);
309 252
310 vunmap(buf->data); 253
311 for (i = 0; i < buf->nr_pages; i++) 254 if (buf->fmt == PAGE_BUF) {
312 __free_page(buf->pages[i]); 255 vunmap(buf->data);
313 kfree(buf->pages); 256 for (i = 0; i < buf->nr_pages; i++)
257 __free_page(buf->pages[i]);
258 kfree(buf->pages);
259 } else
260 vfree(buf->data);
314 kfree(buf); 261 kfree(buf);
315} 262}
316 263
@@ -319,6 +266,69 @@ static void fw_free_buf(struct firmware_buf *buf)
319 kref_put(&buf->ref, __fw_free_buf); 266 kref_put(&buf->ref, __fw_free_buf);
320} 267}
321 268
269/* direct firmware loading support */
270static const char *fw_path[] = {
271 "/lib/firmware/updates/" UTS_RELEASE,
272 "/lib/firmware/updates",
273 "/lib/firmware/" UTS_RELEASE,
274 "/lib/firmware"
275};
276
277/* Don't inline this: 'struct kstat' is biggish */
278static noinline long fw_file_size(struct file *file)
279{
280 struct kstat st;
281 if (vfs_getattr(file->f_path.mnt, file->f_path.dentry, &st))
282 return -1;
283 if (!S_ISREG(st.mode))
284 return -1;
285 if (st.size != (long)st.size)
286 return -1;
287 return st.size;
288}
289
290static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
291{
292 long size;
293 char *buf;
294
295 size = fw_file_size(file);
296 if (size < 0)
297 return false;
298 buf = vmalloc(size);
299 if (!buf)
300 return false;
301 if (kernel_read(file, 0, buf, size) != size) {
302 vfree(buf);
303 return false;
304 }
305 fw_buf->data = buf;
306 fw_buf->size = size;
307 return true;
308}
309
310static bool fw_get_filesystem_firmware(struct firmware_buf *buf)
311{
312 int i;
313 bool success = false;
314 char *path = __getname();
315
316 for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
317 struct file *file;
318 snprintf(path, PATH_MAX, "%s/%s", fw_path[i], buf->fw_id);
319
320 file = filp_open(path, O_RDONLY, 0);
321 if (IS_ERR(file))
322 continue;
323 success = fw_read_file_contents(file, buf);
324 fput(file);
325 if (success)
326 break;
327 }
328 __putname(path);
329 return success;
330}
331
322static struct firmware_priv *to_firmware_priv(struct device *dev) 332static struct firmware_priv *to_firmware_priv(struct device *dev)
323{ 333{
324 return container_of(dev, struct firmware_priv, dev); 334 return container_of(dev, struct firmware_priv, dev);
@@ -423,6 +433,21 @@ static void firmware_free_data(const struct firmware *fw)
423#ifndef PAGE_KERNEL_RO 433#ifndef PAGE_KERNEL_RO
424#define PAGE_KERNEL_RO PAGE_KERNEL 434#define PAGE_KERNEL_RO PAGE_KERNEL
425#endif 435#endif
436
437/* one pages buffer should be mapped/unmapped only once */
438static int fw_map_pages_buf(struct firmware_buf *buf)
439{
440 if (buf->fmt != PAGE_BUF)
441 return 0;
442
443 if (buf->data)
444 vunmap(buf->data);
445 buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
446 if (!buf->data)
447 return -ENOMEM;
448 return 0;
449}
450
426/** 451/**
427 * firmware_loading_store - set value in the 'loading' control file 452 * firmware_loading_store - set value in the 'loading' control file
428 * @dev: device pointer 453 * @dev: device pointer
@@ -467,6 +492,14 @@ static ssize_t firmware_loading_store(struct device *dev,
467 if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) { 492 if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) {
468 set_bit(FW_STATUS_DONE, &fw_buf->status); 493 set_bit(FW_STATUS_DONE, &fw_buf->status);
469 clear_bit(FW_STATUS_LOADING, &fw_buf->status); 494 clear_bit(FW_STATUS_LOADING, &fw_buf->status);
495
496 /*
497 * Several loading requests may be pending on
498 * one same firmware buf, so let all requests
499 * see the mapped 'buf->data' once the loading
500 * is completed.
501 * */
502 fw_map_pages_buf(fw_buf);
470 complete_all(&fw_buf->completion); 503 complete_all(&fw_buf->completion);
471 break; 504 break;
472 } 505 }
@@ -670,15 +703,6 @@ exit:
670 return fw_priv; 703 return fw_priv;
671} 704}
672 705
673/* one pages buffer is mapped/unmapped only once */
674static int fw_map_pages_buf(struct firmware_buf *buf)
675{
676 buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
677 if (!buf->data)
678 return -ENOMEM;
679 return 0;
680}
681
682/* store the pages buffer info firmware from buf */ 706/* store the pages buffer info firmware from buf */
683static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw) 707static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
684{ 708{
@@ -778,11 +802,6 @@ _request_firmware_prepare(const struct firmware **firmware_p, const char *name,
778 return NULL; 802 return NULL;
779 } 803 }
780 804
781 if (fw_get_filesystem_firmware(firmware, name)) {
782 dev_dbg(device, "firmware: direct-loading firmware %s\n", name);
783 return NULL;
784 }
785
786 ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf); 805 ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf);
787 if (!ret) 806 if (!ret)
788 fw_priv = fw_create_instance(firmware, name, device, 807 fw_priv = fw_create_instance(firmware, name, device,
@@ -832,6 +851,21 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
832 struct device *f_dev = &fw_priv->dev; 851 struct device *f_dev = &fw_priv->dev;
833 struct firmware_buf *buf = fw_priv->buf; 852 struct firmware_buf *buf = fw_priv->buf;
834 struct firmware_cache *fwc = &fw_cache; 853 struct firmware_cache *fwc = &fw_cache;
854 int direct_load = 0;
855
856 /* try direct loading from fs first */
857 if (fw_get_filesystem_firmware(buf)) {
858 dev_dbg(f_dev->parent, "firmware: direct-loading"
859 " firmware %s\n", buf->fw_id);
860
861 set_bit(FW_STATUS_DONE, &buf->status);
862 complete_all(&buf->completion);
863 direct_load = 1;
864 goto handle_fw;
865 }
866
867 /* fall back on userspace loading */
868 buf->fmt = PAGE_BUF;
835 869
836 dev_set_uevent_suppress(f_dev, true); 870 dev_set_uevent_suppress(f_dev, true);
837 871
@@ -870,6 +904,7 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
870 904
871 del_timer_sync(&fw_priv->timeout); 905 del_timer_sync(&fw_priv->timeout);
872 906
907handle_fw:
873 mutex_lock(&fw_lock); 908 mutex_lock(&fw_lock);
874 if (!buf->size || test_bit(FW_STATUS_ABORT, &buf->status)) 909 if (!buf->size || test_bit(FW_STATUS_ABORT, &buf->status))
875 retval = -ENOENT; 910 retval = -ENOENT;
@@ -884,9 +919,6 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
884 if (!retval && f_dev->parent) 919 if (!retval && f_dev->parent)
885 fw_add_devm_name(f_dev->parent, buf->fw_id); 920 fw_add_devm_name(f_dev->parent, buf->fw_id);
886 921
887 if (!retval)
888 retval = fw_map_pages_buf(buf);
889
890 /* 922 /*
891 * After caching firmware image is started, let it piggyback 923 * After caching firmware image is started, let it piggyback
892 * on request firmware. 924 * on request firmware.
@@ -902,6 +934,9 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
902 fw_priv->buf = NULL; 934 fw_priv->buf = NULL;
903 mutex_unlock(&fw_lock); 935 mutex_unlock(&fw_lock);
904 936
937 if (direct_load)
938 goto err_put_dev;
939
905 device_remove_file(f_dev, &dev_attr_loading); 940 device_remove_file(f_dev, &dev_attr_loading);
906err_del_bin_attr: 941err_del_bin_attr:
907 device_remove_bin_file(f_dev, &firmware_attr_data); 942 device_remove_bin_file(f_dev, &firmware_attr_data);
@@ -1129,6 +1164,8 @@ int uncache_firmware(const char *fw_name)
1129} 1164}
1130 1165
1131#ifdef CONFIG_PM_SLEEP 1166#ifdef CONFIG_PM_SLEEP
1167static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
1168
1132static struct fw_cache_entry *alloc_fw_cache_entry(const char *name) 1169static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
1133{ 1170{
1134 struct fw_cache_entry *fce; 1171 struct fw_cache_entry *fce;
@@ -1142,17 +1179,27 @@ exit:
1142 return fce; 1179 return fce;
1143} 1180}
1144 1181
1145static int fw_cache_piggyback_on_request(const char *name) 1182static int __fw_entry_found(const char *name)
1146{ 1183{
1147 struct firmware_cache *fwc = &fw_cache; 1184 struct firmware_cache *fwc = &fw_cache;
1148 struct fw_cache_entry *fce; 1185 struct fw_cache_entry *fce;
1149 int ret = 0;
1150 1186
1151 spin_lock(&fwc->name_lock);
1152 list_for_each_entry(fce, &fwc->fw_names, list) { 1187 list_for_each_entry(fce, &fwc->fw_names, list) {
1153 if (!strcmp(fce->name, name)) 1188 if (!strcmp(fce->name, name))
1154 goto found; 1189 return 1;
1155 } 1190 }
1191 return 0;
1192}
1193
1194static int fw_cache_piggyback_on_request(const char *name)
1195{
1196 struct firmware_cache *fwc = &fw_cache;
1197 struct fw_cache_entry *fce;
1198 int ret = 0;
1199
1200 spin_lock(&fwc->name_lock);
1201 if (__fw_entry_found(name))
1202 goto found;
1156 1203
1157 fce = alloc_fw_cache_entry(name); 1204 fce = alloc_fw_cache_entry(name);
1158 if (fce) { 1205 if (fce) {
@@ -1185,12 +1232,6 @@ static void __async_dev_cache_fw_image(void *fw_entry,
1185 1232
1186 free_fw_cache_entry(fce); 1233 free_fw_cache_entry(fce);
1187 } 1234 }
1188
1189 spin_lock(&fwc->name_lock);
1190 fwc->cnt--;
1191 spin_unlock(&fwc->name_lock);
1192
1193 wake_up(&fwc->wait_queue);
1194} 1235}
1195 1236
1196/* called with dev->devres_lock held */ 1237/* called with dev->devres_lock held */
@@ -1229,11 +1270,19 @@ static void dev_cache_fw_image(struct device *dev, void *data)
1229 list_del(&fce->list); 1270 list_del(&fce->list);
1230 1271
1231 spin_lock(&fwc->name_lock); 1272 spin_lock(&fwc->name_lock);
1232 fwc->cnt++; 1273 /* only one cache entry for one firmware */
1233 list_add(&fce->list, &fwc->fw_names); 1274 if (!__fw_entry_found(fce->name)) {
1275 list_add(&fce->list, &fwc->fw_names);
1276 } else {
1277 free_fw_cache_entry(fce);
1278 fce = NULL;
1279 }
1234 spin_unlock(&fwc->name_lock); 1280 spin_unlock(&fwc->name_lock);
1235 1281
1236 async_schedule(__async_dev_cache_fw_image, (void *)fce); 1282 if (fce)
1283 async_schedule_domain(__async_dev_cache_fw_image,
1284 (void *)fce,
1285 &fw_cache_domain);
1237 } 1286 }
1238} 1287}
1239 1288
@@ -1275,6 +1324,9 @@ static void device_cache_fw_images(void)
1275 1324
1276 pr_debug("%s\n", __func__); 1325 pr_debug("%s\n", __func__);
1277 1326
1327 /* cancel uncache work */
1328 cancel_delayed_work_sync(&fwc->work);
1329
1278 /* 1330 /*
1279 * use small loading timeout for caching devices' firmware 1331 * use small loading timeout for caching devices' firmware
1280 * because all these firmware images have been loaded 1332 * because all these firmware images have been loaded
@@ -1292,21 +1344,7 @@ static void device_cache_fw_images(void)
1292 mutex_unlock(&fw_lock); 1344 mutex_unlock(&fw_lock);
1293 1345
1294 /* wait for completion of caching firmware for all devices */ 1346 /* wait for completion of caching firmware for all devices */
1295 spin_lock(&fwc->name_lock); 1347 async_synchronize_full_domain(&fw_cache_domain);
1296 for (;;) {
1297 prepare_to_wait(&fwc->wait_queue, &wait,
1298 TASK_UNINTERRUPTIBLE);
1299 if (!fwc->cnt)
1300 break;
1301
1302 spin_unlock(&fwc->name_lock);
1303
1304 schedule();
1305
1306 spin_lock(&fwc->name_lock);
1307 }
1308 spin_unlock(&fwc->name_lock);
1309 finish_wait(&fwc->wait_queue, &wait);
1310 1348
1311 loading_timeout = old_timeout; 1349 loading_timeout = old_timeout;
1312} 1350}
@@ -1394,9 +1432,7 @@ static void __init fw_cache_init(void)
1394#ifdef CONFIG_PM_SLEEP 1432#ifdef CONFIG_PM_SLEEP
1395 spin_lock_init(&fw_cache.name_lock); 1433 spin_lock_init(&fw_cache.name_lock);
1396 INIT_LIST_HEAD(&fw_cache.fw_names); 1434 INIT_LIST_HEAD(&fw_cache.fw_names);
1397 fw_cache.cnt = 0;
1398 1435
1399 init_waitqueue_head(&fw_cache.wait_queue);
1400 INIT_DELAYED_WORK(&fw_cache.work, 1436 INIT_DELAYED_WORK(&fw_cache.work,
1401 device_uncache_fw_images_work); 1437 device_uncache_fw_images_work);
1402 1438
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index c22b869245d9..96b71b6536d6 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1862,7 +1862,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1862 cpuidle_drv = cpuidle_driver_ref(); 1862 cpuidle_drv = cpuidle_driver_ref();
1863 if (!cpuidle_drv) { 1863 if (!cpuidle_drv) {
1864 ret = -ENODEV; 1864 ret = -ENODEV;
1865 goto out; 1865 goto err_drv;
1866 } 1866 }
1867 if (cpuidle_drv->state_count <= state) { 1867 if (cpuidle_drv->state_count <= state) {
1868 ret = -EINVAL; 1868 ret = -EINVAL;
@@ -1884,6 +1884,9 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1884 1884
1885 err: 1885 err:
1886 cpuidle_driver_unref(); 1886 cpuidle_driver_unref();
1887
1888 err_drv:
1889 kfree(cpu_data);
1887 goto out; 1890 goto out;
1888} 1891}
1889 1892
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 6be390bd8bd1..f0d30543fcce 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -3,7 +3,7 @@
3# subsystems should select the appropriate symbols. 3# subsystems should select the appropriate symbols.
4 4
5config REGMAP 5config REGMAP
6 default y if (REGMAP_I2C || REGMAP_SPI) 6 default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_MMIO || REGMAP_IRQ)
7 select LZO_COMPRESS 7 select LZO_COMPRESS
8 select LZO_DECOMPRESS 8 select LZO_DECOMPRESS
9 select IRQ_DOMAIN if REGMAP_IRQ 9 select IRQ_DOMAIN if REGMAP_IRQ
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 432aeeedfd5e..d865470bc951 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -158,9 +158,10 @@ static int bcma_register_cores(struct bcma_bus *bus)
158 158
159static void bcma_unregister_cores(struct bcma_bus *bus) 159static void bcma_unregister_cores(struct bcma_bus *bus)
160{ 160{
161 struct bcma_device *core; 161 struct bcma_device *core, *tmp;
162 162
163 list_for_each_entry(core, &bus->cores, list) { 163 list_for_each_entry_safe(core, tmp, &bus->cores, list) {
164 list_del(&core->list);
164 if (core->dev_registered) 165 if (core->dev_registered)
165 device_unregister(&core->dev); 166 device_unregister(&core->dev);
166 } 167 }
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index f529407db93f..824e09c4d0d7 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -131,6 +131,7 @@ config BLK_CPQ_DA
131config BLK_CPQ_CISS_DA 131config BLK_CPQ_CISS_DA
132 tristate "Compaq Smart Array 5xxx support" 132 tristate "Compaq Smart Array 5xxx support"
133 depends on PCI 133 depends on PCI
134 select CHECK_SIGNATURE
134 help 135 help
135 This is the driver for Compaq Smart Array 5xxx controllers. 136 This is the driver for Compaq Smart Array 5xxx controllers.
136 Everyone using these boards should say Y here. 137 Everyone using these boards should say Y here.
@@ -166,8 +167,8 @@ config BLK_DEV_DAC960
166 module will be called DAC960. 167 module will be called DAC960.
167 168
168config BLK_DEV_UMEM 169config BLK_DEV_UMEM
169 tristate "Micro Memory MM5415 Battery Backed RAM support (EXPERIMENTAL)" 170 tristate "Micro Memory MM5415 Battery Backed RAM support"
170 depends on PCI && EXPERIMENTAL 171 depends on PCI
171 ---help--- 172 ---help---
172 Saying Y here will include support for the MM5415 family of 173 Saying Y here will include support for the MM5415 family of
173 battery backed (Non-volatile) RAM cards. 174 battery backed (Non-volatile) RAM cards.
@@ -430,8 +431,8 @@ config CDROM_PKTCDVD_BUFFERS
430 a disc is opened for writing. 431 a disc is opened for writing.
431 432
432config CDROM_PKTCDVD_WCACHE 433config CDROM_PKTCDVD_WCACHE
433 bool "Enable write caching (EXPERIMENTAL)" 434 bool "Enable write caching"
434 depends on CDROM_PKTCDVD && EXPERIMENTAL 435 depends on CDROM_PKTCDVD
435 help 436 help
436 If enabled, write caching will be set for the CD-R/W device. For now 437 If enabled, write caching will be set for the CD-R/W device. For now
437 this option is dangerous unless the CD-RW media is known good, as we 438 this option is dangerous unless the CD-RW media is known good, as we
@@ -508,8 +509,8 @@ config XEN_BLKDEV_BACKEND
508 509
509 510
510config VIRTIO_BLK 511config VIRTIO_BLK
511 tristate "Virtio block driver (EXPERIMENTAL)" 512 tristate "Virtio block driver"
512 depends on EXPERIMENTAL && VIRTIO 513 depends on VIRTIO
513 ---help--- 514 ---help---
514 This is the virtual block driver for virtio. It can be used with 515 This is the virtual block driver for virtio. It can be used with
515 lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. 516 lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
@@ -528,7 +529,7 @@ config BLK_DEV_HD
528 529
529config BLK_DEV_RBD 530config BLK_DEV_RBD
530 tristate "Rados block device (RBD)" 531 tristate "Rados block device (RBD)"
531 depends on INET && EXPERIMENTAL && BLOCK 532 depends on INET && BLOCK
532 select CEPH_LIB 533 select CEPH_LIB
533 select LIBCRC32C 534 select LIBCRC32C
534 select CRYPTO_AES 535 select CRYPTO_AES
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index b0f553b26d0f..ca83f96756ad 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -5205,7 +5205,6 @@ static void cciss_shutdown(struct pci_dev *pdev)
5205 return; 5205 return;
5206 } 5206 }
5207 /* write all data in the battery backed cache to disk */ 5207 /* write all data in the battery backed cache to disk */
5208 memset(flush_buf, 0, 4);
5209 return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf, 5208 return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf,
5210 4, 0, CTLR_LUNID, TYPE_CMD); 5209 4, 0, CTLR_LUNID, TYPE_CMD);
5211 kfree(flush_buf); 5210 kfree(flush_buf);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index f93a0320e952..f55683ad4ffa 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -162,23 +162,12 @@ static const struct block_device_operations drbd_ops = {
162 .release = drbd_release, 162 .release = drbd_release,
163}; 163};
164 164
165static void bio_destructor_drbd(struct bio *bio)
166{
167 bio_free(bio, drbd_md_io_bio_set);
168}
169
170struct bio *bio_alloc_drbd(gfp_t gfp_mask) 165struct bio *bio_alloc_drbd(gfp_t gfp_mask)
171{ 166{
172 struct bio *bio;
173
174 if (!drbd_md_io_bio_set) 167 if (!drbd_md_io_bio_set)
175 return bio_alloc(gfp_mask, 1); 168 return bio_alloc(gfp_mask, 1);
176 169
177 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set); 170 return bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
178 if (!bio)
179 return NULL;
180 bio->bi_destructor = bio_destructor_drbd;
181 return bio;
182} 171}
183 172
184#ifdef __CHECKER__ 173#ifdef __CHECKER__
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 17c675c52295..1c49d7173966 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4109,12 +4109,19 @@ static struct platform_driver floppy_driver = {
4109 4109
4110static struct platform_device floppy_device[N_DRIVE]; 4110static struct platform_device floppy_device[N_DRIVE];
4111 4111
4112static bool floppy_available(int drive)
4113{
4114 if (!(allowed_drive_mask & (1 << drive)))
4115 return false;
4116 if (fdc_state[FDC(drive)].version == FDC_NONE)
4117 return false;
4118 return true;
4119}
4120
4112static struct kobject *floppy_find(dev_t dev, int *part, void *data) 4121static struct kobject *floppy_find(dev_t dev, int *part, void *data)
4113{ 4122{
4114 int drive = (*part & 3) | ((*part & 0x80) >> 5); 4123 int drive = (*part & 3) | ((*part & 0x80) >> 5);
4115 if (drive >= N_DRIVE || 4124 if (drive >= N_DRIVE || !floppy_available(drive))
4116 !(allowed_drive_mask & (1 << drive)) ||
4117 fdc_state[FDC(drive)].version == FDC_NONE)
4118 return NULL; 4125 return NULL;
4119 if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type)) 4126 if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type))
4120 return NULL; 4127 return NULL;
@@ -4124,8 +4131,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
4124 4131
4125static int __init do_floppy_init(void) 4132static int __init do_floppy_init(void)
4126{ 4133{
4127 int i, unit, drive; 4134 int i, unit, drive, err;
4128 int err, dr;
4129 4135
4130 set_debugt(); 4136 set_debugt();
4131 interruptjiffies = resultjiffies = jiffies; 4137 interruptjiffies = resultjiffies = jiffies;
@@ -4137,34 +4143,32 @@ static int __init do_floppy_init(void)
4137 4143
4138 raw_cmd = NULL; 4144 raw_cmd = NULL;
4139 4145
4140 for (dr = 0; dr < N_DRIVE; dr++) { 4146 floppy_wq = alloc_ordered_workqueue("floppy", 0);
4141 disks[dr] = alloc_disk(1); 4147 if (!floppy_wq)
4142 if (!disks[dr]) { 4148 return -ENOMEM;
4143 err = -ENOMEM;
4144 goto out_put_disk;
4145 }
4146 4149
4147 floppy_wq = alloc_ordered_workqueue("floppy", 0); 4150 for (drive = 0; drive < N_DRIVE; drive++) {
4148 if (!floppy_wq) { 4151 disks[drive] = alloc_disk(1);
4152 if (!disks[drive]) {
4149 err = -ENOMEM; 4153 err = -ENOMEM;
4150 goto out_put_disk; 4154 goto out_put_disk;
4151 } 4155 }
4152 4156
4153 disks[dr]->queue = blk_init_queue(do_fd_request, &floppy_lock); 4157 disks[drive]->queue = blk_init_queue(do_fd_request, &floppy_lock);
4154 if (!disks[dr]->queue) { 4158 if (!disks[drive]->queue) {
4155 err = -ENOMEM; 4159 err = -ENOMEM;
4156 goto out_destroy_workq; 4160 goto out_put_disk;
4157 } 4161 }
4158 4162
4159 blk_queue_max_hw_sectors(disks[dr]->queue, 64); 4163 blk_queue_max_hw_sectors(disks[drive]->queue, 64);
4160 disks[dr]->major = FLOPPY_MAJOR; 4164 disks[drive]->major = FLOPPY_MAJOR;
4161 disks[dr]->first_minor = TOMINOR(dr); 4165 disks[drive]->first_minor = TOMINOR(drive);
4162 disks[dr]->fops = &floppy_fops; 4166 disks[drive]->fops = &floppy_fops;
4163 sprintf(disks[dr]->disk_name, "fd%d", dr); 4167 sprintf(disks[drive]->disk_name, "fd%d", drive);
4164 4168
4165 init_timer(&motor_off_timer[dr]); 4169 init_timer(&motor_off_timer[drive]);
4166 motor_off_timer[dr].data = dr; 4170 motor_off_timer[drive].data = drive;
4167 motor_off_timer[dr].function = motor_off_callback; 4171 motor_off_timer[drive].function = motor_off_callback;
4168 } 4172 }
4169 4173
4170 err = register_blkdev(FLOPPY_MAJOR, "fd"); 4174 err = register_blkdev(FLOPPY_MAJOR, "fd");
@@ -4282,9 +4286,7 @@ static int __init do_floppy_init(void)
4282 } 4286 }
4283 4287
4284 for (drive = 0; drive < N_DRIVE; drive++) { 4288 for (drive = 0; drive < N_DRIVE; drive++) {
4285 if (!(allowed_drive_mask & (1 << drive))) 4289 if (!floppy_available(drive))
4286 continue;
4287 if (fdc_state[FDC(drive)].version == FDC_NONE)
4288 continue; 4290 continue;
4289 4291
4290 floppy_device[drive].name = floppy_device_name; 4292 floppy_device[drive].name = floppy_device_name;
@@ -4293,7 +4295,7 @@ static int __init do_floppy_init(void)
4293 4295
4294 err = platform_device_register(&floppy_device[drive]); 4296 err = platform_device_register(&floppy_device[drive]);
4295 if (err) 4297 if (err)
4296 goto out_release_dma; 4298 goto out_remove_drives;
4297 4299
4298 err = device_create_file(&floppy_device[drive].dev, 4300 err = device_create_file(&floppy_device[drive].dev,
4299 &dev_attr_cmos); 4301 &dev_attr_cmos);
@@ -4311,29 +4313,34 @@ static int __init do_floppy_init(void)
4311 4313
4312out_unreg_platform_dev: 4314out_unreg_platform_dev:
4313 platform_device_unregister(&floppy_device[drive]); 4315 platform_device_unregister(&floppy_device[drive]);
4316out_remove_drives:
4317 while (drive--) {
4318 if (floppy_available(drive)) {
4319 del_gendisk(disks[drive]);
4320 device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos);
4321 platform_device_unregister(&floppy_device[drive]);
4322 }
4323 }
4314out_release_dma: 4324out_release_dma:
4315 if (atomic_read(&usage_count)) 4325 if (atomic_read(&usage_count))
4316 floppy_release_irq_and_dma(); 4326 floppy_release_irq_and_dma();
4317out_unreg_region: 4327out_unreg_region:
4318 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); 4328 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
4319 platform_driver_unregister(&floppy_driver); 4329 platform_driver_unregister(&floppy_driver);
4320out_destroy_workq:
4321 destroy_workqueue(floppy_wq);
4322out_unreg_blkdev: 4330out_unreg_blkdev:
4323 unregister_blkdev(FLOPPY_MAJOR, "fd"); 4331 unregister_blkdev(FLOPPY_MAJOR, "fd");
4324out_put_disk: 4332out_put_disk:
4325 while (dr--) { 4333 for (drive = 0; drive < N_DRIVE; drive++) {
4326 del_timer_sync(&motor_off_timer[dr]); 4334 if (!disks[drive])
4327 if (disks[dr]->queue) { 4335 break;
4328 blk_cleanup_queue(disks[dr]->queue); 4336 if (disks[drive]->queue) {
4329 /* 4337 del_timer_sync(&motor_off_timer[drive]);
4330 * put_disk() is not paired with add_disk() and 4338 blk_cleanup_queue(disks[drive]->queue);
4331 * will put queue reference one extra time. fix it. 4339 disks[drive]->queue = NULL;
4332 */
4333 disks[dr]->queue = NULL;
4334 } 4340 }
4335 put_disk(disks[dr]); 4341 put_disk(disks[drive]);
4336 } 4342 }
4343 destroy_workqueue(floppy_wq);
4337 return err; 4344 return err;
4338} 4345}
4339 4346
@@ -4551,8 +4558,7 @@ static void __exit floppy_module_exit(void)
4551 for (drive = 0; drive < N_DRIVE; drive++) { 4558 for (drive = 0; drive < N_DRIVE; drive++) {
4552 del_timer_sync(&motor_off_timer[drive]); 4559 del_timer_sync(&motor_off_timer[drive]);
4553 4560
4554 if ((allowed_drive_mask & (1 << drive)) && 4561 if (floppy_available(drive)) {
4555 fdc_state[FDC(drive)].version != FDC_NONE) {
4556 del_gendisk(disks[drive]); 4562 del_gendisk(disks[drive]);
4557 device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos); 4563 device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos);
4558 platform_device_unregister(&floppy_device[drive]); 4564 platform_device_unregister(&floppy_device[drive]);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index e9d594fd12cb..54046e51160a 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -976,8 +976,21 @@ static int loop_clr_fd(struct loop_device *lo)
976 if (lo->lo_state != Lo_bound) 976 if (lo->lo_state != Lo_bound)
977 return -ENXIO; 977 return -ENXIO;
978 978
979 if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */ 979 /*
980 return -EBUSY; 980 * If we've explicitly asked to tear down the loop device,
981 * and it has an elevated reference count, set it for auto-teardown when
982 * the last reference goes away. This stops $!~#$@ udev from
983 * preventing teardown because it decided that it needs to run blkid on
984 * the loopback device whenever they appear. xfstests is notorious for
985 * failing tests because blkid via udev races with a losetup
986 * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
987 * command to fail with EBUSY.
988 */
989 if (lo->lo_refcnt > 1) {
990 lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
991 mutex_unlock(&lo->lo_ctl_mutex);
992 return 0;
993 }
981 994
982 if (filp == NULL) 995 if (filp == NULL)
983 return -EINVAL; 996 return -EINVAL;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index f946d31d6917..adc6f36564cf 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2035,8 +2035,9 @@ static unsigned int implicit_sector(unsigned char command,
2035 } 2035 }
2036 return rv; 2036 return rv;
2037} 2037}
2038 2038static void mtip_set_timeout(struct driver_data *dd,
2039static void mtip_set_timeout(struct host_to_dev_fis *fis, unsigned int *timeout) 2039 struct host_to_dev_fis *fis,
2040 unsigned int *timeout, u8 erasemode)
2040{ 2041{
2041 switch (fis->command) { 2042 switch (fis->command) {
2042 case ATA_CMD_DOWNLOAD_MICRO: 2043 case ATA_CMD_DOWNLOAD_MICRO:
@@ -2044,7 +2045,10 @@ static void mtip_set_timeout(struct host_to_dev_fis *fis, unsigned int *timeout)
2044 break; 2045 break;
2045 case ATA_CMD_SEC_ERASE_UNIT: 2046 case ATA_CMD_SEC_ERASE_UNIT:
2046 case 0xFC: 2047 case 0xFC:
2047 *timeout = 240000; /* 4 minutes */ 2048 if (erasemode)
2049 *timeout = ((*(dd->port->identify + 90) * 2) * 60000);
2050 else
2051 *timeout = ((*(dd->port->identify + 89) * 2) * 60000);
2048 break; 2052 break;
2049 case ATA_CMD_STANDBYNOW1: 2053 case ATA_CMD_STANDBYNOW1:
2050 *timeout = 120000; /* 2 minutes */ 2054 *timeout = 120000; /* 2 minutes */
@@ -2087,6 +2091,7 @@ static int exec_drive_taskfile(struct driver_data *dd,
2087 unsigned int transfer_size; 2091 unsigned int transfer_size;
2088 unsigned long task_file_data; 2092 unsigned long task_file_data;
2089 int intotal = outtotal + req_task->out_size; 2093 int intotal = outtotal + req_task->out_size;
2094 int erasemode = 0;
2090 2095
2091 taskout = req_task->out_size; 2096 taskout = req_task->out_size;
2092 taskin = req_task->in_size; 2097 taskin = req_task->in_size;
@@ -2212,7 +2217,13 @@ static int exec_drive_taskfile(struct driver_data *dd,
2212 fis.lba_hi, 2217 fis.lba_hi,
2213 fis.device); 2218 fis.device);
2214 2219
2215 mtip_set_timeout(&fis, &timeout); 2220 /* check for erase mode support during secure erase.*/
2221 if ((fis.command == ATA_CMD_SEC_ERASE_UNIT)
2222 && (outbuf[0] & MTIP_SEC_ERASE_MODE)) {
2223 erasemode = 1;
2224 }
2225
2226 mtip_set_timeout(dd, &fis, &timeout, erasemode);
2216 2227
2217 /* Determine the correct transfer size.*/ 2228 /* Determine the correct transfer size.*/
2218 if (force_single_sector) 2229 if (force_single_sector)
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 18627a1d04c5..5f4a917bd8bb 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -33,6 +33,9 @@
33/* offset of Device Control register in PCIe extended capabilites space */ 33/* offset of Device Control register in PCIe extended capabilites space */
34#define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48 34#define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48
35 35
36/* check for erase mode support during secure erase */
37#define MTIP_SEC_ERASE_MODE 0x3
38
36/* # of times to retry timed out/failed IOs */ 39/* # of times to retry timed out/failed IOs */
37#define MTIP_MAX_RETRIES 2 40#define MTIP_MAX_RETRIES 2
38 41
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index 87311ebac0db..1bbc681688e4 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -266,11 +266,10 @@ static struct bio *bio_chain_clone(struct bio *old_chain, gfp_t gfpmask)
266 struct bio *tmp, *new_chain = NULL, *tail = NULL; 266 struct bio *tmp, *new_chain = NULL, *tail = NULL;
267 267
268 while (old_chain) { 268 while (old_chain) {
269 tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs); 269 tmp = bio_clone_kmalloc(old_chain, gfpmask);
270 if (!tmp) 270 if (!tmp)
271 goto err_out; 271 goto err_out;
272 272
273 __bio_clone(tmp, old_chain);
274 tmp->bi_bdev = NULL; 273 tmp->bi_bdev = NULL;
275 gfpmask &= ~__GFP_WAIT; 274 gfpmask &= ~__GFP_WAIT;
276 tmp->bi_next = NULL; 275 tmp->bi_next = NULL;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index ba66e4445f41..2e7de7a59bfc 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -522,38 +522,6 @@ static void pkt_bio_finished(struct pktcdvd_device *pd)
522 } 522 }
523} 523}
524 524
525static void pkt_bio_destructor(struct bio *bio)
526{
527 kfree(bio->bi_io_vec);
528 kfree(bio);
529}
530
531static struct bio *pkt_bio_alloc(int nr_iovecs)
532{
533 struct bio_vec *bvl = NULL;
534 struct bio *bio;
535
536 bio = kmalloc(sizeof(struct bio), GFP_KERNEL);
537 if (!bio)
538 goto no_bio;
539 bio_init(bio);
540
541 bvl = kcalloc(nr_iovecs, sizeof(struct bio_vec), GFP_KERNEL);
542 if (!bvl)
543 goto no_bvl;
544
545 bio->bi_max_vecs = nr_iovecs;
546 bio->bi_io_vec = bvl;
547 bio->bi_destructor = pkt_bio_destructor;
548
549 return bio;
550
551 no_bvl:
552 kfree(bio);
553 no_bio:
554 return NULL;
555}
556
557/* 525/*
558 * Allocate a packet_data struct 526 * Allocate a packet_data struct
559 */ 527 */
@@ -567,7 +535,7 @@ static struct packet_data *pkt_alloc_packet_data(int frames)
567 goto no_pkt; 535 goto no_pkt;
568 536
569 pkt->frames = frames; 537 pkt->frames = frames;
570 pkt->w_bio = pkt_bio_alloc(frames); 538 pkt->w_bio = bio_kmalloc(GFP_KERNEL, frames);
571 if (!pkt->w_bio) 539 if (!pkt->w_bio)
572 goto no_bio; 540 goto no_bio;
573 541
@@ -581,9 +549,10 @@ static struct packet_data *pkt_alloc_packet_data(int frames)
581 bio_list_init(&pkt->orig_bios); 549 bio_list_init(&pkt->orig_bios);
582 550
583 for (i = 0; i < frames; i++) { 551 for (i = 0; i < frames; i++) {
584 struct bio *bio = pkt_bio_alloc(1); 552 struct bio *bio = bio_kmalloc(GFP_KERNEL, 1);
585 if (!bio) 553 if (!bio)
586 goto no_rd_bio; 554 goto no_rd_bio;
555
587 pkt->r_bios[i] = bio; 556 pkt->r_bios[i] = bio;
588 } 557 }
589 558
@@ -1111,21 +1080,17 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1111 * Schedule reads for missing parts of the packet. 1080 * Schedule reads for missing parts of the packet.
1112 */ 1081 */
1113 for (f = 0; f < pkt->frames; f++) { 1082 for (f = 0; f < pkt->frames; f++) {
1114 struct bio_vec *vec;
1115
1116 int p, offset; 1083 int p, offset;
1084
1117 if (written[f]) 1085 if (written[f])
1118 continue; 1086 continue;
1087
1119 bio = pkt->r_bios[f]; 1088 bio = pkt->r_bios[f];
1120 vec = bio->bi_io_vec; 1089 bio_reset(bio);
1121 bio_init(bio);
1122 bio->bi_max_vecs = 1;
1123 bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); 1090 bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
1124 bio->bi_bdev = pd->bdev; 1091 bio->bi_bdev = pd->bdev;
1125 bio->bi_end_io = pkt_end_io_read; 1092 bio->bi_end_io = pkt_end_io_read;
1126 bio->bi_private = pkt; 1093 bio->bi_private = pkt;
1127 bio->bi_io_vec = vec;
1128 bio->bi_destructor = pkt_bio_destructor;
1129 1094
1130 p = (f * CD_FRAMESIZE) / PAGE_SIZE; 1095 p = (f * CD_FRAMESIZE) / PAGE_SIZE;
1131 offset = (f * CD_FRAMESIZE) % PAGE_SIZE; 1096 offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
@@ -1418,14 +1383,11 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1418 } 1383 }
1419 1384
1420 /* Start the write request */ 1385 /* Start the write request */
1421 bio_init(pkt->w_bio); 1386 bio_reset(pkt->w_bio);
1422 pkt->w_bio->bi_max_vecs = PACKET_MAX_SIZE;
1423 pkt->w_bio->bi_sector = pkt->sector; 1387 pkt->w_bio->bi_sector = pkt->sector;
1424 pkt->w_bio->bi_bdev = pd->bdev; 1388 pkt->w_bio->bi_bdev = pd->bdev;
1425 pkt->w_bio->bi_end_io = pkt_end_io_packet_write; 1389 pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1426 pkt->w_bio->bi_private = pkt; 1390 pkt->w_bio->bi_private = pkt;
1427 pkt->w_bio->bi_io_vec = bvec;
1428 pkt->w_bio->bi_destructor = pkt_bio_destructor;
1429 for (f = 0; f < pkt->frames; f++) 1391 for (f = 0; f < pkt->frames; f++)
1430 if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset)) 1392 if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
1431 BUG(); 1393 BUG();
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 9ad3b5ec1dc1..9a54623e52d7 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -158,8 +158,8 @@ struct xen_vbd {
158 struct block_device *bdev; 158 struct block_device *bdev;
159 /* Cached size parameter. */ 159 /* Cached size parameter. */
160 sector_t size; 160 sector_t size;
161 bool flush_support; 161 unsigned int flush_support:1;
162 bool discard_secure; 162 unsigned int discard_secure:1;
163}; 163};
164 164
165struct backend_info; 165struct backend_info;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 4f66171c6683..f58434c2617c 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -105,11 +105,10 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
105{ 105{
106 struct xen_blkif *blkif; 106 struct xen_blkif *blkif;
107 107
108 blkif = kmem_cache_alloc(xen_blkif_cachep, GFP_KERNEL); 108 blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
109 if (!blkif) 109 if (!blkif)
110 return ERR_PTR(-ENOMEM); 110 return ERR_PTR(-ENOMEM);
111 111
112 memset(blkif, 0, sizeof(*blkif));
113 blkif->domid = domid; 112 blkif->domid = domid;
114 spin_lock_init(&blkif->blk_ring_lock); 113 spin_lock_init(&blkif->blk_ring_lock);
115 atomic_set(&blkif->refcnt, 1); 114 atomic_set(&blkif->refcnt, 1);
@@ -196,7 +195,7 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif)
196 } 195 }
197} 196}
198 197
199void xen_blkif_free(struct xen_blkif *blkif) 198static void xen_blkif_free(struct xen_blkif *blkif)
200{ 199{
201 if (!atomic_dec_and_test(&blkif->refcnt)) 200 if (!atomic_dec_and_test(&blkif->refcnt))
202 BUG(); 201 BUG();
@@ -257,7 +256,7 @@ static struct attribute_group xen_vbdstat_group = {
257VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor); 256VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
258VBD_SHOW(mode, "%s\n", be->mode); 257VBD_SHOW(mode, "%s\n", be->mode);
259 258
260int xenvbd_sysfs_addif(struct xenbus_device *dev) 259static int xenvbd_sysfs_addif(struct xenbus_device *dev)
261{ 260{
262 int error; 261 int error;
263 262
@@ -281,7 +280,7 @@ fail1: device_remove_file(&dev->dev, &dev_attr_physical_device);
281 return error; 280 return error;
282} 281}
283 282
284void xenvbd_sysfs_delif(struct xenbus_device *dev) 283static void xenvbd_sysfs_delif(struct xenbus_device *dev)
285{ 284{
286 sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group); 285 sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
287 device_remove_file(&dev->dev, &dev_attr_mode); 286 device_remove_file(&dev->dev, &dev_attr_mode);
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index d0b27a39f1d4..7ff1d0d208a7 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -52,7 +52,6 @@ obj-$(CONFIG_TELCLOCK) += tlclk.o
52obj-$(CONFIG_MWAVE) += mwave/ 52obj-$(CONFIG_MWAVE) += mwave/
53obj-$(CONFIG_AGP) += agp/ 53obj-$(CONFIG_AGP) += agp/
54obj-$(CONFIG_PCMCIA) += pcmcia/ 54obj-$(CONFIG_PCMCIA) += pcmcia/
55obj-$(CONFIG_IPMI_HANDLER) += ipmi/
56 55
57obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o 56obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
58obj-$(CONFIG_TCG_TPM) += tpm/ 57obj-$(CONFIG_TCG_TPM) += tpm/
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index e01f5eaaec82..38390f7c6ab6 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -667,7 +667,7 @@ static int intel_gtt_init(void)
667 gtt_map_size = intel_private.base.gtt_total_entries * 4; 667 gtt_map_size = intel_private.base.gtt_total_entries * 4;
668 668
669 intel_private.gtt = NULL; 669 intel_private.gtt = NULL;
670 if (INTEL_GTT_GEN < 6) 670 if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2)
671 intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr, 671 intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
672 gtt_map_size); 672 gtt_map_size);
673 if (intel_private.gtt == NULL) 673 if (intel_private.gtt == NULL)
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c
index aab9605f0b43..24ffd8cec51e 100644
--- a/drivers/char/ds1620.c
+++ b/drivers/char/ds1620.c
@@ -74,21 +74,21 @@ static inline void netwinder_ds1620_reset(void)
74 74
75static inline void netwinder_lock(unsigned long *flags) 75static inline void netwinder_lock(unsigned long *flags)
76{ 76{
77 spin_lock_irqsave(&nw_gpio_lock, *flags); 77 raw_spin_lock_irqsave(&nw_gpio_lock, *flags);
78} 78}
79 79
80static inline void netwinder_unlock(unsigned long *flags) 80static inline void netwinder_unlock(unsigned long *flags)
81{ 81{
82 spin_unlock_irqrestore(&nw_gpio_lock, *flags); 82 raw_spin_unlock_irqrestore(&nw_gpio_lock, *flags);
83} 83}
84 84
85static inline void netwinder_set_fan(int i) 85static inline void netwinder_set_fan(int i)
86{ 86{
87 unsigned long flags; 87 unsigned long flags;
88 88
89 spin_lock_irqsave(&nw_gpio_lock, flags); 89 raw_spin_lock_irqsave(&nw_gpio_lock, flags);
90 nw_gpio_modify_op(GPIO_FAN, i ? GPIO_FAN : 0); 90 nw_gpio_modify_op(GPIO_FAN, i ? GPIO_FAN : 0);
91 spin_unlock_irqrestore(&nw_gpio_lock, flags); 91 raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
92} 92}
93 93
94static inline int netwinder_get_fan(void) 94static inline int netwinder_get_fan(void)
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 2c29942b1326..a0c84bb30856 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -1880,7 +1880,7 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
1880 struct ipmi_recv_msg *supplied_recv, 1880 struct ipmi_recv_msg *supplied_recv,
1881 int priority) 1881 int priority)
1882{ 1882{
1883 unsigned char saddr, lun; 1883 unsigned char saddr = 0, lun = 0;
1884 int rv; 1884 int rv;
1885 1885
1886 if (!user) 1886 if (!user)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 83f85cf7fb1b..32a6c7e256bd 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2424,6 +2424,38 @@ static void ipmi_pci_cleanup(struct smi_info *info)
2424 pci_disable_device(pdev); 2424 pci_disable_device(pdev);
2425} 2425}
2426 2426
2427static int __devinit ipmi_pci_probe_regspacing(struct smi_info *info)
2428{
2429 if (info->si_type == SI_KCS) {
2430 unsigned char status;
2431 int regspacing;
2432
2433 info->io.regsize = DEFAULT_REGSIZE;
2434 info->io.regshift = 0;
2435 info->io_size = 2;
2436 info->handlers = &kcs_smi_handlers;
2437
2438 /* detect 1, 4, 16byte spacing */
2439 for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
2440 info->io.regspacing = regspacing;
2441 if (info->io_setup(info)) {
2442 dev_err(info->dev,
2443 "Could not setup I/O space\n");
2444 return DEFAULT_REGSPACING;
2445 }
2446 /* write invalid cmd */
2447 info->io.outputb(&info->io, 1, 0x10);
2448 /* read status back */
2449 status = info->io.inputb(&info->io, 1);
2450 info->io_cleanup(info);
2451 if (status)
2452 return regspacing;
2453 regspacing *= 4;
2454 }
2455 }
2456 return DEFAULT_REGSPACING;
2457}
2458
2427static int __devinit ipmi_pci_probe(struct pci_dev *pdev, 2459static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2428 const struct pci_device_id *ent) 2460 const struct pci_device_id *ent)
2429{ 2461{
@@ -2476,8 +2508,8 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2476 } 2508 }
2477 info->io.addr_data = pci_resource_start(pdev, 0); 2509 info->io.addr_data = pci_resource_start(pdev, 0);
2478 2510
2479 info->io.regspacing = DEFAULT_REGSPACING; 2511 info->io.regspacing = ipmi_pci_probe_regspacing(info);
2480 info->io.regsize = DEFAULT_REGSPACING; 2512 info->io.regsize = DEFAULT_REGSIZE;
2481 info->io.regshift = 0; 2513 info->io.regshift = 0;
2482 2514
2483 info->irq = pdev->irq; 2515 info->irq = pdev->irq;
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
index a0e2f7d70355..e371480d3639 100644
--- a/drivers/char/nwflash.c
+++ b/drivers/char/nwflash.c
@@ -583,9 +583,9 @@ static void kick_open(void)
583 * we want to write a bit pattern XXX1 to Xilinx to enable 583 * we want to write a bit pattern XXX1 to Xilinx to enable
584 * the write gate, which will be open for about the next 2ms. 584 * the write gate, which will be open for about the next 2ms.
585 */ 585 */
586 spin_lock_irqsave(&nw_gpio_lock, flags); 586 raw_spin_lock_irqsave(&nw_gpio_lock, flags);
587 nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE); 587 nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
588 spin_unlock_irqrestore(&nw_gpio_lock, flags); 588 raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
589 589
590 /* 590 /*
591 * let the ISA bus to catch on... 591 * let the ISA bus to catch on...
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 54a3a6d09819..0bb207eaef2f 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -285,7 +285,7 @@ static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
285 285
286static const struct file_operations raw_fops = { 286static const struct file_operations raw_fops = {
287 .read = do_sync_read, 287 .read = do_sync_read,
288 .aio_read = generic_file_aio_read, 288 .aio_read = blkdev_aio_read,
289 .write = do_sync_write, 289 .write = do_sync_write,
290 .aio_write = blkdev_aio_write, 290 .aio_write = blkdev_aio_write,
291 .fsync = blkdev_fsync, 291 .fsync = blkdev_fsync,
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 320debbe32fa..9b4f0116ff21 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -1456,7 +1456,7 @@ static int __devexit sonypi_remove(struct platform_device *dev)
1456 return 0; 1456 return 0;
1457} 1457}
1458 1458
1459#ifdef CONFIG_PM 1459#ifdef CONFIG_PM_SLEEP
1460static int old_camera_power; 1460static int old_camera_power;
1461 1461
1462static int sonypi_suspend(struct device *dev) 1462static int sonypi_suspend(struct device *dev)
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index f26afdb1a702..93211df52aab 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -1182,17 +1182,20 @@ ssize_t tpm_write(struct file *file, const char __user *buf,
1182 size_t size, loff_t *off) 1182 size_t size, loff_t *off)
1183{ 1183{
1184 struct tpm_chip *chip = file->private_data; 1184 struct tpm_chip *chip = file->private_data;
1185 size_t in_size = size, out_size; 1185 size_t in_size = size;
1186 ssize_t out_size;
1186 1187
1187 /* cannot perform a write until the read has cleared 1188 /* cannot perform a write until the read has cleared
1188 either via tpm_read or a user_read_timer timeout */ 1189 either via tpm_read or a user_read_timer timeout.
1189 while (atomic_read(&chip->data_pending) != 0) 1190 This also prevents splitted buffered writes from blocking here.
1190 msleep(TPM_TIMEOUT); 1191 */
1191 1192 if (atomic_read(&chip->data_pending) != 0)
1192 mutex_lock(&chip->buffer_mutex); 1193 return -EBUSY;
1193 1194
1194 if (in_size > TPM_BUFSIZE) 1195 if (in_size > TPM_BUFSIZE)
1195 in_size = TPM_BUFSIZE; 1196 return -E2BIG;
1197
1198 mutex_lock(&chip->buffer_mutex);
1196 1199
1197 if (copy_from_user 1200 if (copy_from_user
1198 (chip->data_buffer, (void __user *) buf, in_size)) { 1201 (chip->data_buffer, (void __user *) buf, in_size)) {
@@ -1202,6 +1205,10 @@ ssize_t tpm_write(struct file *file, const char __user *buf,
1202 1205
1203 /* atomic tpm command send and result receive */ 1206 /* atomic tpm command send and result receive */
1204 out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE); 1207 out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
1208 if (out_size < 0) {
1209 mutex_unlock(&chip->buffer_mutex);
1210 return out_size;
1211 }
1205 1212
1206 atomic_set(&chip->data_pending, out_size); 1213 atomic_set(&chip->data_pending, out_size);
1207 mutex_unlock(&chip->buffer_mutex); 1214 mutex_unlock(&chip->buffer_mutex);
@@ -1259,6 +1266,7 @@ void tpm_remove_hardware(struct device *dev)
1259 1266
1260 misc_deregister(&chip->vendor.miscdev); 1267 misc_deregister(&chip->vendor.miscdev);
1261 sysfs_remove_group(&dev->kobj, chip->vendor.attr_group); 1268 sysfs_remove_group(&dev->kobj, chip->vendor.attr_group);
1269 tpm_remove_ppi(&dev->kobj);
1262 tpm_bios_log_teardown(chip->bios_dir); 1270 tpm_bios_log_teardown(chip->bios_dir);
1263 1271
1264 /* write it this way to be explicit (chip->dev == dev) */ 1272 /* write it this way to be explicit (chip->dev == dev) */
@@ -1476,7 +1484,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
1476 goto put_device; 1484 goto put_device;
1477 } 1485 }
1478 1486
1479 if (sys_add_ppi(&dev->kobj)) { 1487 if (tpm_add_ppi(&dev->kobj)) {
1480 misc_deregister(&chip->vendor.miscdev); 1488 misc_deregister(&chip->vendor.miscdev);
1481 goto put_device; 1489 goto put_device;
1482 } 1490 }
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 02c266aa2bf7..8ef7649a50aa 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -329,10 +329,15 @@ extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long,
329 wait_queue_head_t *); 329 wait_queue_head_t *);
330 330
331#ifdef CONFIG_ACPI 331#ifdef CONFIG_ACPI
332extern ssize_t sys_add_ppi(struct kobject *parent); 332extern int tpm_add_ppi(struct kobject *);
333extern void tpm_remove_ppi(struct kobject *);
333#else 334#else
334static inline ssize_t sys_add_ppi(struct kobject *parent) 335static inline int tpm_add_ppi(struct kobject *parent)
335{ 336{
336 return 0; 337 return 0;
337} 338}
339
340static inline void tpm_remove_ppi(struct kobject *parent)
341{
342}
338#endif 343#endif
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index f27b58cfae98..720ebcf29fdf 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -444,18 +444,20 @@ static struct attribute *ppi_attrs[] = {
444 &dev_attr_vs_operations.attr, NULL, 444 &dev_attr_vs_operations.attr, NULL,
445}; 445};
446static struct attribute_group ppi_attr_grp = { 446static struct attribute_group ppi_attr_grp = {
447 .name = "ppi",
447 .attrs = ppi_attrs 448 .attrs = ppi_attrs
448}; 449};
449 450
450ssize_t sys_add_ppi(struct kobject *parent) 451int tpm_add_ppi(struct kobject *parent)
451{ 452{
452 struct kobject *ppi; 453 return sysfs_create_group(parent, &ppi_attr_grp);
453 ppi = kobject_create_and_add("ppi", parent); 454}
454 if (sysfs_create_group(ppi, &ppi_attr_grp)) 455EXPORT_SYMBOL_GPL(tpm_add_ppi);
455 return -EFAULT; 456
456 else 457void tpm_remove_ppi(struct kobject *parent)
457 return 0; 458{
459 sysfs_remove_group(parent, &ppi_attr_grp);
458} 460}
459EXPORT_SYMBOL_GPL(sys_add_ppi); 461EXPORT_SYMBOL_GPL(tpm_remove_ppi);
460 462
461MODULE_LICENSE("GPL"); 463MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index b40ee1403be9..399831690fed 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -328,6 +328,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
328 cpufreq_update_policy(cpu); 328 cpufreq_update_policy(cpu);
329 break; 329 break;
330 case CPU_DOWN_PREPARE: 330 case CPU_DOWN_PREPARE:
331 case CPU_DOWN_PREPARE_FROZEN:
331 cpufreq_stats_free_sysfs(cpu); 332 cpufreq_stats_free_sysfs(cpu);
332 break; 333 break;
333 case CPU_DEAD: 334 case CPU_DEAD:
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 65f8e9a54975..1f3417a8322d 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -30,20 +30,12 @@
30#include <asm/smp_plat.h> 30#include <asm/smp_plat.h>
31#include <asm/cpu.h> 31#include <asm/cpu.h>
32 32
33#include <plat/clock.h>
34#include <plat/omap-pm.h>
35#include <plat/common.h>
36#include <plat/omap_device.h>
37
38#include <mach/hardware.h>
39
40/* OPP tolerance in percentage */ 33/* OPP tolerance in percentage */
41#define OPP_TOLERANCE 4 34#define OPP_TOLERANCE 4
42 35
43static struct cpufreq_frequency_table *freq_table; 36static struct cpufreq_frequency_table *freq_table;
44static atomic_t freq_table_users = ATOMIC_INIT(0); 37static atomic_t freq_table_users = ATOMIC_INIT(0);
45static struct clk *mpu_clk; 38static struct clk *mpu_clk;
46static char *mpu_clk_name;
47static struct device *mpu_dev; 39static struct device *mpu_dev;
48static struct regulator *mpu_reg; 40static struct regulator *mpu_reg;
49 41
@@ -108,6 +100,14 @@ static int omap_target(struct cpufreq_policy *policy,
108 } 100 }
109 101
110 freq = freqs.new * 1000; 102 freq = freqs.new * 1000;
103 ret = clk_round_rate(mpu_clk, freq);
104 if (IS_ERR_VALUE(ret)) {
105 dev_warn(mpu_dev,
106 "CPUfreq: Cannot find matching frequency for %lu\n",
107 freq);
108 return ret;
109 }
110 freq = ret;
111 111
112 if (mpu_reg) { 112 if (mpu_reg) {
113 opp = opp_find_freq_ceil(mpu_dev, &freq); 113 opp = opp_find_freq_ceil(mpu_dev, &freq);
@@ -172,7 +172,7 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
172{ 172{
173 int result = 0; 173 int result = 0;
174 174
175 mpu_clk = clk_get(NULL, mpu_clk_name); 175 mpu_clk = clk_get(NULL, "cpufreq_ck");
176 if (IS_ERR(mpu_clk)) 176 if (IS_ERR(mpu_clk))
177 return PTR_ERR(mpu_clk); 177 return PTR_ERR(mpu_clk);
178 178
@@ -253,22 +253,10 @@ static struct cpufreq_driver omap_driver = {
253 253
254static int __init omap_cpufreq_init(void) 254static int __init omap_cpufreq_init(void)
255{ 255{
256 if (cpu_is_omap24xx()) 256 mpu_dev = get_cpu_device(0);
257 mpu_clk_name = "virt_prcm_set"; 257 if (!mpu_dev) {
258 else if (cpu_is_omap34xx())
259 mpu_clk_name = "dpll1_ck";
260 else if (cpu_is_omap44xx())
261 mpu_clk_name = "dpll_mpu_ck";
262
263 if (!mpu_clk_name) {
264 pr_err("%s: unsupported Silicon?\n", __func__);
265 return -EINVAL;
266 }
267
268 mpu_dev = omap_device_get_by_hwmod_name("mpu");
269 if (IS_ERR(mpu_dev)) {
270 pr_warning("%s: unable to get the mpu device\n", __func__); 258 pr_warning("%s: unable to get the mpu device\n", __func__);
271 return PTR_ERR(mpu_dev); 259 return -EINVAL;
272 } 260 }
273 261
274 mpu_reg = regulator_get(mpu_dev, "vcc"); 262 mpu_reg = regulator_get(mpu_dev, "vcc");
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 129e80bfff22..e3ebb4fa2c3e 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -5,7 +5,7 @@
5 * http://www.gnu.org/licenses/gpl.html 5 * http://www.gnu.org/licenses/gpl.html
6 * 6 *
7 * Maintainer: 7 * Maintainer:
8 * Andreas Herrmann <andreas.herrmann3@amd.com> 8 * Andreas Herrmann <herrmann.der.user@googlemail.com>
9 * 9 *
10 * Based on the powernow-k7.c module written by Dave Jones. 10 * Based on the powernow-k7.c module written by Dave Jones.
11 * (C) 2003 Dave Jones on behalf of SuSE Labs 11 * (C) 2003 Dave Jones on behalf of SuSE Labs
@@ -1052,14 +1052,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
1052 struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq, 1052 struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq,
1053 .relation = relation }; 1053 .relation = relation };
1054 1054
1055 /* 1055 return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
1056 * Must run on @pol->cpu. cpufreq core is responsible for ensuring
1057 * that we're bound to the current CPU and pol->cpu stays online.
1058 */
1059 if (smp_processor_id() == pol->cpu)
1060 return powernowk8_target_fn(&pta);
1061 else
1062 return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
1063} 1056}
1064 1057
1065/* Driver entry point to verify the policy and range of frequencies */ 1058/* Driver entry point to verify the policy and range of frequencies */
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index e28f6ea46f1a..7f15b8514a18 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -368,7 +368,7 @@ EXPORT_SYMBOL_GPL(cpuidle_enable_device);
368 */ 368 */
369void cpuidle_disable_device(struct cpuidle_device *dev) 369void cpuidle_disable_device(struct cpuidle_device *dev)
370{ 370{
371 if (!dev->enabled) 371 if (!dev || !dev->enabled)
372 return; 372 return;
373 if (!cpuidle_get_driver() || !cpuidle_curr_governor) 373 if (!cpuidle_get_driver() || !cpuidle_curr_governor)
374 return; 374 return;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index d06ea2950dd9..d4c12180c654 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -90,6 +90,17 @@ config DW_DMAC
90 Support the Synopsys DesignWare AHB DMA controller. This 90 Support the Synopsys DesignWare AHB DMA controller. This
91 can be integrated in chips such as the Atmel AT32ap7000. 91 can be integrated in chips such as the Atmel AT32ap7000.
92 92
93config DW_DMAC_BIG_ENDIAN_IO
94 bool "Use big endian I/O register access"
95 default y if AVR32
96 depends on DW_DMAC
97 help
98 Say yes here to use big endian I/O access when reading and writing
99 to the DMA controller registers. This is needed on some platforms,
100 like the Atmel AVR32 architecture.
101
102 If unsure, use the default setting.
103
93config AT_HDMAC 104config AT_HDMAC
94 tristate "Atmel AHB DMA support" 105 tristate "Atmel AHB DMA support"
95 depends on ARCH_AT91 106 depends on ARCH_AT91
@@ -208,6 +219,16 @@ config SIRF_DMA
208 help 219 help
209 Enable support for the CSR SiRFprimaII DMA engine. 220 Enable support for the CSR SiRFprimaII DMA engine.
210 221
222config TI_EDMA
223 tristate "TI EDMA support"
224 depends on ARCH_DAVINCI
225 select DMA_ENGINE
226 select DMA_VIRTUAL_CHANNELS
227 default n
228 help
229 Enable support for the TI EDMA controller. This DMA
230 engine is found on TI DaVinci and AM33xx parts.
231
211config ARCH_HAS_ASYNC_TX_FIND_CHANNEL 232config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
212 bool 233 bool
213 234
@@ -292,6 +313,13 @@ config DMA_OMAP
292 select DMA_ENGINE 313 select DMA_ENGINE
293 select DMA_VIRTUAL_CHANNELS 314 select DMA_VIRTUAL_CHANNELS
294 315
316config MMP_PDMA
317 bool "MMP PDMA support"
318 depends on (ARCH_MMP || ARCH_PXA)
319 select DMA_ENGINE
320 help
321 Support the MMP PDMA engine for PXA and MMP platfrom.
322
295config DMA_ENGINE 323config DMA_ENGINE
296 bool 324 bool
297 325
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 4cf6b128ab9a..7428feaa8705 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_IMX_DMA) += imx-dma.o
23obj-$(CONFIG_MXS_DMA) += mxs-dma.o 23obj-$(CONFIG_MXS_DMA) += mxs-dma.o
24obj-$(CONFIG_TIMB_DMA) += timb_dma.o 24obj-$(CONFIG_TIMB_DMA) += timb_dma.o
25obj-$(CONFIG_SIRF_DMA) += sirf-dma.o 25obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
26obj-$(CONFIG_TI_EDMA) += edma.o
26obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o 27obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
27obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o 28obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
28obj-$(CONFIG_PL330_DMA) += pl330.o 29obj-$(CONFIG_PL330_DMA) += pl330.o
@@ -32,3 +33,4 @@ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
32obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o 33obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
33obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o 34obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
34obj-$(CONFIG_DMA_OMAP) += omap-dma.o 35obj-$(CONFIG_DMA_OMAP) += omap-dma.o
36obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 6fbeebb9486f..d1cc5791476b 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1892,6 +1892,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1892 pl08x->pd = dev_get_platdata(&adev->dev); 1892 pl08x->pd = dev_get_platdata(&adev->dev);
1893 if (!pl08x->pd) { 1893 if (!pl08x->pd) {
1894 dev_err(&adev->dev, "no platform data supplied\n"); 1894 dev_err(&adev->dev, "no platform data supplied\n");
1895 ret = -EINVAL;
1895 goto out_no_platdata; 1896 goto out_no_platdata;
1896 } 1897 }
1897 1898
@@ -1943,6 +1944,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1943 dev_err(&adev->dev, "%s failed to allocate " 1944 dev_err(&adev->dev, "%s failed to allocate "
1944 "physical channel holders\n", 1945 "physical channel holders\n",
1945 __func__); 1946 __func__);
1947 ret = -ENOMEM;
1946 goto out_no_phychans; 1948 goto out_no_phychans;
1947 } 1949 }
1948 1950
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index d3c5a5a88f1e..c4b0eb3cde81 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -36,12 +36,22 @@
36 * which does not support descriptor writeback. 36 * which does not support descriptor writeback.
37 */ 37 */
38 38
39static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave)
40{
41 return slave ? slave->dst_master : 0;
42}
43
44static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
45{
46 return slave ? slave->src_master : 1;
47}
48
39#define DWC_DEFAULT_CTLLO(_chan) ({ \ 49#define DWC_DEFAULT_CTLLO(_chan) ({ \
40 struct dw_dma_slave *__slave = (_chan->private); \ 50 struct dw_dma_slave *__slave = (_chan->private); \
41 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ 51 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
42 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ 52 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
43 int _dms = __slave ? __slave->dst_master : 0; \ 53 int _dms = dwc_get_dms(__slave); \
44 int _sms = __slave ? __slave->src_master : 1; \ 54 int _sms = dwc_get_sms(__slave); \
45 u8 _smsize = __slave ? _sconfig->src_maxburst : \ 55 u8 _smsize = __slave ? _sconfig->src_maxburst : \
46 DW_DMA_MSIZE_16; \ 56 DW_DMA_MSIZE_16; \
47 u8 _dmsize = __slave ? _sconfig->dst_maxburst : \ 57 u8 _dmsize = __slave ? _sconfig->dst_maxburst : \
@@ -56,16 +66,6 @@
56 }) 66 })
57 67
58/* 68/*
59 * This is configuration-dependent and usually a funny size like 4095.
60 *
61 * Note that this is a transfer count, i.e. if we transfer 32-bit
62 * words, we can do 16380 bytes per descriptor.
63 *
64 * This parameter is also system-specific.
65 */
66#define DWC_MAX_COUNT 4095U
67
68/*
69 * Number of descriptors to allocate for each channel. This should be 69 * Number of descriptors to allocate for each channel. This should be
70 * made configurable somehow; preferably, the clients (at least the 70 * made configurable somehow; preferably, the clients (at least the
71 * ones using slave transfers) should be able to give us a hint. 71 * ones using slave transfers) should be able to give us a hint.
@@ -177,6 +177,11 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
177 177
178 cfghi = dws->cfg_hi; 178 cfghi = dws->cfg_hi;
179 cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; 179 cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
180 } else {
181 if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
182 cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id);
183 else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM)
184 cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id);
180 } 185 }
181 186
182 channel_writel(dwc, CFG_LO, cfglo); 187 channel_writel(dwc, CFG_LO, cfglo);
@@ -206,7 +211,7 @@ static inline unsigned int dwc_fast_fls(unsigned long long v)
206 return 0; 211 return 0;
207} 212}
208 213
209static void dwc_dump_chan_regs(struct dw_dma_chan *dwc) 214static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
210{ 215{
211 dev_err(chan2dev(&dwc->chan), 216 dev_err(chan2dev(&dwc->chan),
212 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", 217 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
@@ -227,10 +232,29 @@ static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
227 232
228/*----------------------------------------------------------------------*/ 233/*----------------------------------------------------------------------*/
229 234
235/* Perform single block transfer */
236static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
237 struct dw_desc *desc)
238{
239 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
240 u32 ctllo;
241
242 /* Software emulation of LLP mode relies on interrupts to continue
243 * multi block transfer. */
244 ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
245
246 channel_writel(dwc, SAR, desc->lli.sar);
247 channel_writel(dwc, DAR, desc->lli.dar);
248 channel_writel(dwc, CTL_LO, ctllo);
249 channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
250 channel_set_bit(dw, CH_EN, dwc->mask);
251}
252
230/* Called with dwc->lock held and bh disabled */ 253/* Called with dwc->lock held and bh disabled */
231static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) 254static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
232{ 255{
233 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 256 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
257 unsigned long was_soft_llp;
234 258
235 /* ASSERT: channel is idle */ 259 /* ASSERT: channel is idle */
236 if (dma_readl(dw, CH_EN) & dwc->mask) { 260 if (dma_readl(dw, CH_EN) & dwc->mask) {
@@ -242,6 +266,26 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
242 return; 266 return;
243 } 267 }
244 268
269 if (dwc->nollp) {
270 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
271 &dwc->flags);
272 if (was_soft_llp) {
273 dev_err(chan2dev(&dwc->chan),
274 "BUG: Attempted to start new LLP transfer "
275 "inside ongoing one\n");
276 return;
277 }
278
279 dwc_initialize(dwc);
280
281 dwc->tx_list = &first->tx_list;
282 dwc->tx_node_active = first->tx_list.next;
283
284 dwc_do_single_block(dwc, first);
285
286 return;
287 }
288
245 dwc_initialize(dwc); 289 dwc_initialize(dwc);
246 290
247 channel_writel(dwc, LLP, first->txd.phys); 291 channel_writel(dwc, LLP, first->txd.phys);
@@ -553,8 +597,36 @@ static void dw_dma_tasklet(unsigned long data)
553 dwc_handle_cyclic(dw, dwc, status_err, status_xfer); 597 dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
554 else if (status_err & (1 << i)) 598 else if (status_err & (1 << i))
555 dwc_handle_error(dw, dwc); 599 dwc_handle_error(dw, dwc);
556 else if (status_xfer & (1 << i)) 600 else if (status_xfer & (1 << i)) {
601 unsigned long flags;
602
603 spin_lock_irqsave(&dwc->lock, flags);
604 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
605 if (dwc->tx_node_active != dwc->tx_list) {
606 struct dw_desc *desc =
607 list_entry(dwc->tx_node_active,
608 struct dw_desc,
609 desc_node);
610
611 dma_writel(dw, CLEAR.XFER, dwc->mask);
612
613 /* move pointer to next descriptor */
614 dwc->tx_node_active =
615 dwc->tx_node_active->next;
616
617 dwc_do_single_block(dwc, desc);
618
619 spin_unlock_irqrestore(&dwc->lock, flags);
620 continue;
621 } else {
622 /* we are done here */
623 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
624 }
625 }
626 spin_unlock_irqrestore(&dwc->lock, flags);
627
557 dwc_scan_descriptors(dw, dwc); 628 dwc_scan_descriptors(dw, dwc);
629 }
558 } 630 }
559 631
560 /* 632 /*
@@ -636,6 +708,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
636 size_t len, unsigned long flags) 708 size_t len, unsigned long flags)
637{ 709{
638 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 710 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
711 struct dw_dma_slave *dws = chan->private;
639 struct dw_desc *desc; 712 struct dw_desc *desc;
640 struct dw_desc *first; 713 struct dw_desc *first;
641 struct dw_desc *prev; 714 struct dw_desc *prev;
@@ -643,6 +716,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
643 size_t offset; 716 size_t offset;
644 unsigned int src_width; 717 unsigned int src_width;
645 unsigned int dst_width; 718 unsigned int dst_width;
719 unsigned int data_width;
646 u32 ctllo; 720 u32 ctllo;
647 721
648 dev_vdbg(chan2dev(chan), 722 dev_vdbg(chan2dev(chan),
@@ -655,7 +729,11 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
655 return NULL; 729 return NULL;
656 } 730 }
657 731
658 src_width = dst_width = dwc_fast_fls(src | dest | len); 732 data_width = min_t(unsigned int, dwc->dw->data_width[dwc_get_sms(dws)],
733 dwc->dw->data_width[dwc_get_dms(dws)]);
734
735 src_width = dst_width = min_t(unsigned int, data_width,
736 dwc_fast_fls(src | dest | len));
659 737
660 ctllo = DWC_DEFAULT_CTLLO(chan) 738 ctllo = DWC_DEFAULT_CTLLO(chan)
661 | DWC_CTLL_DST_WIDTH(dst_width) 739 | DWC_CTLL_DST_WIDTH(dst_width)
@@ -667,7 +745,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
667 745
668 for (offset = 0; offset < len; offset += xfer_count << src_width) { 746 for (offset = 0; offset < len; offset += xfer_count << src_width) {
669 xfer_count = min_t(size_t, (len - offset) >> src_width, 747 xfer_count = min_t(size_t, (len - offset) >> src_width,
670 DWC_MAX_COUNT); 748 dwc->block_size);
671 749
672 desc = dwc_desc_get(dwc); 750 desc = dwc_desc_get(dwc);
673 if (!desc) 751 if (!desc)
@@ -725,6 +803,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
725 dma_addr_t reg; 803 dma_addr_t reg;
726 unsigned int reg_width; 804 unsigned int reg_width;
727 unsigned int mem_width; 805 unsigned int mem_width;
806 unsigned int data_width;
728 unsigned int i; 807 unsigned int i;
729 struct scatterlist *sg; 808 struct scatterlist *sg;
730 size_t total_len = 0; 809 size_t total_len = 0;
@@ -748,6 +827,8 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
748 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : 827 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
749 DWC_CTLL_FC(DW_DMA_FC_D_M2P); 828 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
750 829
830 data_width = dwc->dw->data_width[dwc_get_sms(dws)];
831
751 for_each_sg(sgl, sg, sg_len, i) { 832 for_each_sg(sgl, sg, sg_len, i) {
752 struct dw_desc *desc; 833 struct dw_desc *desc;
753 u32 len, dlen, mem; 834 u32 len, dlen, mem;
@@ -755,7 +836,8 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
755 mem = sg_dma_address(sg); 836 mem = sg_dma_address(sg);
756 len = sg_dma_len(sg); 837 len = sg_dma_len(sg);
757 838
758 mem_width = dwc_fast_fls(mem | len); 839 mem_width = min_t(unsigned int,
840 data_width, dwc_fast_fls(mem | len));
759 841
760slave_sg_todev_fill_desc: 842slave_sg_todev_fill_desc:
761 desc = dwc_desc_get(dwc); 843 desc = dwc_desc_get(dwc);
@@ -768,8 +850,8 @@ slave_sg_todev_fill_desc:
768 desc->lli.sar = mem; 850 desc->lli.sar = mem;
769 desc->lli.dar = reg; 851 desc->lli.dar = reg;
770 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); 852 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
771 if ((len >> mem_width) > DWC_MAX_COUNT) { 853 if ((len >> mem_width) > dwc->block_size) {
772 dlen = DWC_MAX_COUNT << mem_width; 854 dlen = dwc->block_size << mem_width;
773 mem += dlen; 855 mem += dlen;
774 len -= dlen; 856 len -= dlen;
775 } else { 857 } else {
@@ -808,6 +890,8 @@ slave_sg_todev_fill_desc:
808 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : 890 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
809 DWC_CTLL_FC(DW_DMA_FC_D_P2M); 891 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
810 892
893 data_width = dwc->dw->data_width[dwc_get_dms(dws)];
894
811 for_each_sg(sgl, sg, sg_len, i) { 895 for_each_sg(sgl, sg, sg_len, i) {
812 struct dw_desc *desc; 896 struct dw_desc *desc;
813 u32 len, dlen, mem; 897 u32 len, dlen, mem;
@@ -815,7 +899,8 @@ slave_sg_todev_fill_desc:
815 mem = sg_dma_address(sg); 899 mem = sg_dma_address(sg);
816 len = sg_dma_len(sg); 900 len = sg_dma_len(sg);
817 901
818 mem_width = dwc_fast_fls(mem | len); 902 mem_width = min_t(unsigned int,
903 data_width, dwc_fast_fls(mem | len));
819 904
820slave_sg_fromdev_fill_desc: 905slave_sg_fromdev_fill_desc:
821 desc = dwc_desc_get(dwc); 906 desc = dwc_desc_get(dwc);
@@ -828,8 +913,8 @@ slave_sg_fromdev_fill_desc:
828 desc->lli.sar = reg; 913 desc->lli.sar = reg;
829 desc->lli.dar = mem; 914 desc->lli.dar = mem;
830 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); 915 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
831 if ((len >> reg_width) > DWC_MAX_COUNT) { 916 if ((len >> reg_width) > dwc->block_size) {
832 dlen = DWC_MAX_COUNT << reg_width; 917 dlen = dwc->block_size << reg_width;
833 mem += dlen; 918 mem += dlen;
834 len -= dlen; 919 len -= dlen;
835 } else { 920 } else {
@@ -945,6 +1030,8 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
945 } else if (cmd == DMA_TERMINATE_ALL) { 1030 } else if (cmd == DMA_TERMINATE_ALL) {
946 spin_lock_irqsave(&dwc->lock, flags); 1031 spin_lock_irqsave(&dwc->lock, flags);
947 1032
1033 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
1034
948 dwc_chan_disable(dw, dwc); 1035 dwc_chan_disable(dw, dwc);
949 1036
950 dwc->paused = false; 1037 dwc->paused = false;
@@ -1187,6 +1274,13 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1187 unsigned long flags; 1274 unsigned long flags;
1188 1275
1189 spin_lock_irqsave(&dwc->lock, flags); 1276 spin_lock_irqsave(&dwc->lock, flags);
1277 if (dwc->nollp) {
1278 spin_unlock_irqrestore(&dwc->lock, flags);
1279 dev_dbg(chan2dev(&dwc->chan),
1280 "channel doesn't support LLP transfers\n");
1281 return ERR_PTR(-EINVAL);
1282 }
1283
1190 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { 1284 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1191 spin_unlock_irqrestore(&dwc->lock, flags); 1285 spin_unlock_irqrestore(&dwc->lock, flags);
1192 dev_dbg(chan2dev(&dwc->chan), 1286 dev_dbg(chan2dev(&dwc->chan),
@@ -1212,7 +1306,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1212 periods = buf_len / period_len; 1306 periods = buf_len / period_len;
1213 1307
1214 /* Check for too big/unaligned periods and unaligned DMA buffer. */ 1308 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1215 if (period_len > (DWC_MAX_COUNT << reg_width)) 1309 if (period_len > (dwc->block_size << reg_width))
1216 goto out_err; 1310 goto out_err;
1217 if (unlikely(period_len & ((1 << reg_width) - 1))) 1311 if (unlikely(period_len & ((1 << reg_width) - 1)))
1218 goto out_err; 1312 goto out_err;
@@ -1374,6 +1468,11 @@ static int __devinit dw_probe(struct platform_device *pdev)
1374 struct resource *io; 1468 struct resource *io;
1375 struct dw_dma *dw; 1469 struct dw_dma *dw;
1376 size_t size; 1470 size_t size;
1471 void __iomem *regs;
1472 bool autocfg;
1473 unsigned int dw_params;
1474 unsigned int nr_channels;
1475 unsigned int max_blk_size = 0;
1377 int irq; 1476 int irq;
1378 int err; 1477 int err;
1379 int i; 1478 int i;
@@ -1390,32 +1489,46 @@ static int __devinit dw_probe(struct platform_device *pdev)
1390 if (irq < 0) 1489 if (irq < 0)
1391 return irq; 1490 return irq;
1392 1491
1393 size = sizeof(struct dw_dma); 1492 regs = devm_request_and_ioremap(&pdev->dev, io);
1394 size += pdata->nr_channels * sizeof(struct dw_dma_chan); 1493 if (!regs)
1395 dw = kzalloc(size, GFP_KERNEL); 1494 return -EBUSY;
1495
1496 dw_params = dma_read_byaddr(regs, DW_PARAMS);
1497 autocfg = dw_params >> DW_PARAMS_EN & 0x1;
1498
1499 if (autocfg)
1500 nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
1501 else
1502 nr_channels = pdata->nr_channels;
1503
1504 size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan);
1505 dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
1396 if (!dw) 1506 if (!dw)
1397 return -ENOMEM; 1507 return -ENOMEM;
1398 1508
1399 if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) { 1509 dw->clk = devm_clk_get(&pdev->dev, "hclk");
1400 err = -EBUSY; 1510 if (IS_ERR(dw->clk))
1401 goto err_kfree; 1511 return PTR_ERR(dw->clk);
1402 } 1512 clk_prepare_enable(dw->clk);
1403 1513
1404 dw->regs = ioremap(io->start, DW_REGLEN); 1514 dw->regs = regs;
1405 if (!dw->regs) { 1515
1406 err = -ENOMEM; 1516 /* get hardware configuration parameters */
1407 goto err_release_r; 1517 if (autocfg) {
1408 } 1518 max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
1409 1519
1410 dw->clk = clk_get(&pdev->dev, "hclk"); 1520 dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1411 if (IS_ERR(dw->clk)) { 1521 for (i = 0; i < dw->nr_masters; i++) {
1412 err = PTR_ERR(dw->clk); 1522 dw->data_width[i] =
1413 goto err_clk; 1523 (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2;
1524 }
1525 } else {
1526 dw->nr_masters = pdata->nr_masters;
1527 memcpy(dw->data_width, pdata->data_width, 4);
1414 } 1528 }
1415 clk_prepare_enable(dw->clk);
1416 1529
1417 /* Calculate all channel mask before DMA setup */ 1530 /* Calculate all channel mask before DMA setup */
1418 dw->all_chan_mask = (1 << pdata->nr_channels) - 1; 1531 dw->all_chan_mask = (1 << nr_channels) - 1;
1419 1532
1420 /* force dma off, just in case */ 1533 /* force dma off, just in case */
1421 dw_dma_off(dw); 1534 dw_dma_off(dw);
@@ -1423,17 +1536,19 @@ static int __devinit dw_probe(struct platform_device *pdev)
1423 /* disable BLOCK interrupts as well */ 1536 /* disable BLOCK interrupts as well */
1424 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); 1537 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1425 1538
1426 err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); 1539 err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0,
1540 "dw_dmac", dw);
1427 if (err) 1541 if (err)
1428 goto err_irq; 1542 return err;
1429 1543
1430 platform_set_drvdata(pdev, dw); 1544 platform_set_drvdata(pdev, dw);
1431 1545
1432 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); 1546 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1433 1547
1434 INIT_LIST_HEAD(&dw->dma.channels); 1548 INIT_LIST_HEAD(&dw->dma.channels);
1435 for (i = 0; i < pdata->nr_channels; i++) { 1549 for (i = 0; i < nr_channels; i++) {
1436 struct dw_dma_chan *dwc = &dw->chan[i]; 1550 struct dw_dma_chan *dwc = &dw->chan[i];
1551 int r = nr_channels - i - 1;
1437 1552
1438 dwc->chan.device = &dw->dma; 1553 dwc->chan.device = &dw->dma;
1439 dma_cookie_init(&dwc->chan); 1554 dma_cookie_init(&dwc->chan);
@@ -1445,7 +1560,7 @@ static int __devinit dw_probe(struct platform_device *pdev)
1445 1560
1446 /* 7 is highest priority & 0 is lowest. */ 1561 /* 7 is highest priority & 0 is lowest. */
1447 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) 1562 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1448 dwc->priority = pdata->nr_channels - i - 1; 1563 dwc->priority = r;
1449 else 1564 else
1450 dwc->priority = i; 1565 dwc->priority = i;
1451 1566
@@ -1458,6 +1573,32 @@ static int __devinit dw_probe(struct platform_device *pdev)
1458 INIT_LIST_HEAD(&dwc->free_list); 1573 INIT_LIST_HEAD(&dwc->free_list);
1459 1574
1460 channel_clear_bit(dw, CH_EN, dwc->mask); 1575 channel_clear_bit(dw, CH_EN, dwc->mask);
1576
1577 dwc->dw = dw;
1578
1579 /* hardware configuration */
1580 if (autocfg) {
1581 unsigned int dwc_params;
1582
1583 dwc_params = dma_read_byaddr(regs + r * sizeof(u32),
1584 DWC_PARAMS);
1585
1586 /* Decode maximum block size for given channel. The
1587 * stored 4 bit value represents blocks from 0x00 for 3
1588 * up to 0x0a for 4095. */
1589 dwc->block_size =
1590 (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
1591 dwc->nollp =
1592 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
1593 } else {
1594 dwc->block_size = pdata->block_size;
1595
1596 /* Check if channel supports multi block transfer */
1597 channel_writel(dwc, LLP, 0xfffffffc);
1598 dwc->nollp =
1599 (channel_readl(dwc, LLP) & 0xfffffffc) == 0;
1600 channel_writel(dwc, LLP, 0);
1601 }
1461 } 1602 }
1462 1603
1463 /* Clear all interrupts on all channels. */ 1604 /* Clear all interrupts on all channels. */
@@ -1486,35 +1627,21 @@ static int __devinit dw_probe(struct platform_device *pdev)
1486 dma_writel(dw, CFG, DW_CFG_DMA_EN); 1627 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1487 1628
1488 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", 1629 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
1489 dev_name(&pdev->dev), pdata->nr_channels); 1630 dev_name(&pdev->dev), nr_channels);
1490 1631
1491 dma_async_device_register(&dw->dma); 1632 dma_async_device_register(&dw->dma);
1492 1633
1493 return 0; 1634 return 0;
1494
1495err_irq:
1496 clk_disable_unprepare(dw->clk);
1497 clk_put(dw->clk);
1498err_clk:
1499 iounmap(dw->regs);
1500 dw->regs = NULL;
1501err_release_r:
1502 release_resource(io);
1503err_kfree:
1504 kfree(dw);
1505 return err;
1506} 1635}
1507 1636
1508static int __devexit dw_remove(struct platform_device *pdev) 1637static int __devexit dw_remove(struct platform_device *pdev)
1509{ 1638{
1510 struct dw_dma *dw = platform_get_drvdata(pdev); 1639 struct dw_dma *dw = platform_get_drvdata(pdev);
1511 struct dw_dma_chan *dwc, *_dwc; 1640 struct dw_dma_chan *dwc, *_dwc;
1512 struct resource *io;
1513 1641
1514 dw_dma_off(dw); 1642 dw_dma_off(dw);
1515 dma_async_device_unregister(&dw->dma); 1643 dma_async_device_unregister(&dw->dma);
1516 1644
1517 free_irq(platform_get_irq(pdev, 0), dw);
1518 tasklet_kill(&dw->tasklet); 1645 tasklet_kill(&dw->tasklet);
1519 1646
1520 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, 1647 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
@@ -1523,17 +1650,6 @@ static int __devexit dw_remove(struct platform_device *pdev)
1523 channel_clear_bit(dw, CH_EN, dwc->mask); 1650 channel_clear_bit(dw, CH_EN, dwc->mask);
1524 } 1651 }
1525 1652
1526 clk_disable_unprepare(dw->clk);
1527 clk_put(dw->clk);
1528
1529 iounmap(dw->regs);
1530 dw->regs = NULL;
1531
1532 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1533 release_mem_region(io->start, DW_REGLEN);
1534
1535 kfree(dw);
1536
1537 return 0; 1653 return 0;
1538} 1654}
1539 1655
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index 50830bee087a..88965597b7d0 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -82,9 +82,47 @@ struct dw_dma_regs {
82 DW_REG(ID); 82 DW_REG(ID);
83 DW_REG(TEST); 83 DW_REG(TEST);
84 84
85 /* reserved */
86 DW_REG(__reserved0);
87 DW_REG(__reserved1);
88
85 /* optional encoded params, 0x3c8..0x3f7 */ 89 /* optional encoded params, 0x3c8..0x3f7 */
90 u32 __reserved;
91
92 /* per-channel configuration registers */
93 u32 DWC_PARAMS[DW_DMA_MAX_NR_CHANNELS];
94 u32 MULTI_BLK_TYPE;
95 u32 MAX_BLK_SIZE;
96
97 /* top-level parameters */
98 u32 DW_PARAMS;
86}; 99};
87 100
101#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
102#define dma_readl_native ioread32be
103#define dma_writel_native iowrite32be
104#else
105#define dma_readl_native readl
106#define dma_writel_native writel
107#endif
108
109/* To access the registers in early stage of probe */
110#define dma_read_byaddr(addr, name) \
111 dma_readl_native((addr) + offsetof(struct dw_dma_regs, name))
112
113/* Bitfields in DW_PARAMS */
114#define DW_PARAMS_NR_CHAN 8 /* number of channels */
115#define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */
116#define DW_PARAMS_DATA_WIDTH(n) (15 + 2 * (n))
117#define DW_PARAMS_DATA_WIDTH1 15 /* master 1 data width */
118#define DW_PARAMS_DATA_WIDTH2 17 /* master 2 data width */
119#define DW_PARAMS_DATA_WIDTH3 19 /* master 3 data width */
120#define DW_PARAMS_DATA_WIDTH4 21 /* master 4 data width */
121#define DW_PARAMS_EN 28 /* encoded parameters */
122
123/* Bitfields in DWC_PARAMS */
124#define DWC_PARAMS_MBLK_EN 11 /* multi block transfer */
125
88/* Bitfields in CTL_LO */ 126/* Bitfields in CTL_LO */
89#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ 127#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */
90#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ 128#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */
@@ -140,10 +178,9 @@ struct dw_dma_regs {
140/* Bitfields in CFG */ 178/* Bitfields in CFG */
141#define DW_CFG_DMA_EN (1 << 0) 179#define DW_CFG_DMA_EN (1 << 0)
142 180
143#define DW_REGLEN 0x400
144
145enum dw_dmac_flags { 181enum dw_dmac_flags {
146 DW_DMA_IS_CYCLIC = 0, 182 DW_DMA_IS_CYCLIC = 0,
183 DW_DMA_IS_SOFT_LLP = 1,
147}; 184};
148 185
149struct dw_dma_chan { 186struct dw_dma_chan {
@@ -154,6 +191,10 @@ struct dw_dma_chan {
154 bool paused; 191 bool paused;
155 bool initialized; 192 bool initialized;
156 193
194 /* software emulation of the LLP transfers */
195 struct list_head *tx_list;
196 struct list_head *tx_node_active;
197
157 spinlock_t lock; 198 spinlock_t lock;
158 199
159 /* these other elements are all protected by lock */ 200 /* these other elements are all protected by lock */
@@ -165,8 +206,15 @@ struct dw_dma_chan {
165 206
166 unsigned int descs_allocated; 207 unsigned int descs_allocated;
167 208
209 /* hardware configuration */
210 unsigned int block_size;
211 bool nollp;
212
168 /* configuration passed via DMA_SLAVE_CONFIG */ 213 /* configuration passed via DMA_SLAVE_CONFIG */
169 struct dma_slave_config dma_sconfig; 214 struct dma_slave_config dma_sconfig;
215
216 /* backlink to dw_dma */
217 struct dw_dma *dw;
170}; 218};
171 219
172static inline struct dw_dma_chan_regs __iomem * 220static inline struct dw_dma_chan_regs __iomem *
@@ -176,9 +224,9 @@ __dwc_regs(struct dw_dma_chan *dwc)
176} 224}
177 225
178#define channel_readl(dwc, name) \ 226#define channel_readl(dwc, name) \
179 readl(&(__dwc_regs(dwc)->name)) 227 dma_readl_native(&(__dwc_regs(dwc)->name))
180#define channel_writel(dwc, name, val) \ 228#define channel_writel(dwc, name, val) \
181 writel((val), &(__dwc_regs(dwc)->name)) 229 dma_writel_native((val), &(__dwc_regs(dwc)->name))
182 230
183static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) 231static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
184{ 232{
@@ -193,6 +241,10 @@ struct dw_dma {
193 241
194 u8 all_chan_mask; 242 u8 all_chan_mask;
195 243
244 /* hardware configuration */
245 unsigned char nr_masters;
246 unsigned char data_width[4];
247
196 struct dw_dma_chan chan[0]; 248 struct dw_dma_chan chan[0];
197}; 249};
198 250
@@ -202,9 +254,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
202} 254}
203 255
204#define dma_readl(dw, name) \ 256#define dma_readl(dw, name) \
205 readl(&(__dw_regs(dw)->name)) 257 dma_readl_native(&(__dw_regs(dw)->name))
206#define dma_writel(dw, name, val) \ 258#define dma_writel(dw, name, val) \
207 writel((val), &(__dw_regs(dw)->name)) 259 dma_writel_native((val), &(__dw_regs(dw)->name))
208 260
209#define channel_set_bit(dw, reg, mask) \ 261#define channel_set_bit(dw, reg, mask) \
210 dma_writel(dw, reg, ((mask) << 8) | (mask)) 262 dma_writel(dw, reg, ((mask) << 8) | (mask))
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
new file mode 100644
index 000000000000..05aea3ce8506
--- /dev/null
+++ b/drivers/dma/edma.c
@@ -0,0 +1,671 @@
1/*
2 * TI EDMA DMA engine driver
3 *
4 * Copyright 2012 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/dmaengine.h>
17#include <linux/dma-mapping.h>
18#include <linux/err.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/list.h>
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26
27#include <mach/edma.h>
28
29#include "dmaengine.h"
30#include "virt-dma.h"
31
32/*
33 * This will go away when the private EDMA API is folded
34 * into this driver and the platform device(s) are
35 * instantiated in the arch code. We can only get away
36 * with this simplification because DA8XX may not be built
37 * in the same kernel image with other DaVinci parts. This
38 * avoids having to sprinkle dmaengine driver platform devices
39 * and data throughout all the existing board files.
40 */
41#ifdef CONFIG_ARCH_DAVINCI_DA8XX
42#define EDMA_CTLRS 2
43#define EDMA_CHANS 32
44#else
45#define EDMA_CTLRS 1
46#define EDMA_CHANS 64
47#endif /* CONFIG_ARCH_DAVINCI_DA8XX */
48
49/* Max of 16 segments per channel to conserve PaRAM slots */
50#define MAX_NR_SG 16
51#define EDMA_MAX_SLOTS MAX_NR_SG
52#define EDMA_DESCRIPTORS 16
53
54struct edma_desc {
55 struct virt_dma_desc vdesc;
56 struct list_head node;
57 int absync;
58 int pset_nr;
59 struct edmacc_param pset[0];
60};
61
62struct edma_cc;
63
64struct edma_chan {
65 struct virt_dma_chan vchan;
66 struct list_head node;
67 struct edma_desc *edesc;
68 struct edma_cc *ecc;
69 int ch_num;
70 bool alloced;
71 int slot[EDMA_MAX_SLOTS];
72 dma_addr_t addr;
73 int addr_width;
74 int maxburst;
75};
76
77struct edma_cc {
78 int ctlr;
79 struct dma_device dma_slave;
80 struct edma_chan slave_chans[EDMA_CHANS];
81 int num_slave_chans;
82 int dummy_slot;
83};
84
85static inline struct edma_cc *to_edma_cc(struct dma_device *d)
86{
87 return container_of(d, struct edma_cc, dma_slave);
88}
89
90static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
91{
92 return container_of(c, struct edma_chan, vchan.chan);
93}
94
95static inline struct edma_desc
96*to_edma_desc(struct dma_async_tx_descriptor *tx)
97{
98 return container_of(tx, struct edma_desc, vdesc.tx);
99}
100
101static void edma_desc_free(struct virt_dma_desc *vdesc)
102{
103 kfree(container_of(vdesc, struct edma_desc, vdesc));
104}
105
106/* Dispatch a queued descriptor to the controller (caller holds lock) */
107static void edma_execute(struct edma_chan *echan)
108{
109 struct virt_dma_desc *vdesc = vchan_next_desc(&echan->vchan);
110 struct edma_desc *edesc;
111 int i;
112
113 if (!vdesc) {
114 echan->edesc = NULL;
115 return;
116 }
117
118 list_del(&vdesc->node);
119
120 echan->edesc = edesc = to_edma_desc(&vdesc->tx);
121
122 /* Write descriptor PaRAM set(s) */
123 for (i = 0; i < edesc->pset_nr; i++) {
124 edma_write_slot(echan->slot[i], &edesc->pset[i]);
125 dev_dbg(echan->vchan.chan.device->dev,
126 "\n pset[%d]:\n"
127 " chnum\t%d\n"
128 " slot\t%d\n"
129 " opt\t%08x\n"
130 " src\t%08x\n"
131 " dst\t%08x\n"
132 " abcnt\t%08x\n"
133 " ccnt\t%08x\n"
134 " bidx\t%08x\n"
135 " cidx\t%08x\n"
136 " lkrld\t%08x\n",
137 i, echan->ch_num, echan->slot[i],
138 edesc->pset[i].opt,
139 edesc->pset[i].src,
140 edesc->pset[i].dst,
141 edesc->pset[i].a_b_cnt,
142 edesc->pset[i].ccnt,
143 edesc->pset[i].src_dst_bidx,
144 edesc->pset[i].src_dst_cidx,
145 edesc->pset[i].link_bcntrld);
146 /* Link to the previous slot if not the last set */
147 if (i != (edesc->pset_nr - 1))
148 edma_link(echan->slot[i], echan->slot[i+1]);
149 /* Final pset links to the dummy pset */
150 else
151 edma_link(echan->slot[i], echan->ecc->dummy_slot);
152 }
153
154 edma_start(echan->ch_num);
155}
156
157static int edma_terminate_all(struct edma_chan *echan)
158{
159 unsigned long flags;
160 LIST_HEAD(head);
161
162 spin_lock_irqsave(&echan->vchan.lock, flags);
163
164 /*
165 * Stop DMA activity: we assume the callback will not be called
166 * after edma_dma() returns (even if it does, it will see
167 * echan->edesc is NULL and exit.)
168 */
169 if (echan->edesc) {
170 echan->edesc = NULL;
171 edma_stop(echan->ch_num);
172 }
173
174 vchan_get_all_descriptors(&echan->vchan, &head);
175 spin_unlock_irqrestore(&echan->vchan.lock, flags);
176 vchan_dma_desc_free_list(&echan->vchan, &head);
177
178 return 0;
179}
180
181
182static int edma_slave_config(struct edma_chan *echan,
183 struct dma_slave_config *config)
184{
185 if ((config->src_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES) ||
186 (config->dst_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES))
187 return -EINVAL;
188
189 if (config->direction == DMA_MEM_TO_DEV) {
190 if (config->dst_addr)
191 echan->addr = config->dst_addr;
192 if (config->dst_addr_width)
193 echan->addr_width = config->dst_addr_width;
194 if (config->dst_maxburst)
195 echan->maxburst = config->dst_maxburst;
196 } else if (config->direction == DMA_DEV_TO_MEM) {
197 if (config->src_addr)
198 echan->addr = config->src_addr;
199 if (config->src_addr_width)
200 echan->addr_width = config->src_addr_width;
201 if (config->src_maxburst)
202 echan->maxburst = config->src_maxburst;
203 }
204
205 return 0;
206}
207
208static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
209 unsigned long arg)
210{
211 int ret = 0;
212 struct dma_slave_config *config;
213 struct edma_chan *echan = to_edma_chan(chan);
214
215 switch (cmd) {
216 case DMA_TERMINATE_ALL:
217 edma_terminate_all(echan);
218 break;
219 case DMA_SLAVE_CONFIG:
220 config = (struct dma_slave_config *)arg;
221 ret = edma_slave_config(echan, config);
222 break;
223 default:
224 ret = -ENOSYS;
225 }
226
227 return ret;
228}
229
230static struct dma_async_tx_descriptor *edma_prep_slave_sg(
231 struct dma_chan *chan, struct scatterlist *sgl,
232 unsigned int sg_len, enum dma_transfer_direction direction,
233 unsigned long tx_flags, void *context)
234{
235 struct edma_chan *echan = to_edma_chan(chan);
236 struct device *dev = chan->device->dev;
237 struct edma_desc *edesc;
238 struct scatterlist *sg;
239 int i;
240 int acnt, bcnt, ccnt, src, dst, cidx;
241 int src_bidx, dst_bidx, src_cidx, dst_cidx;
242
243 if (unlikely(!echan || !sgl || !sg_len))
244 return NULL;
245
246 if (echan->addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
247 dev_err(dev, "Undefined slave buswidth\n");
248 return NULL;
249 }
250
251 if (sg_len > MAX_NR_SG) {
252 dev_err(dev, "Exceeded max SG segments %d > %d\n",
253 sg_len, MAX_NR_SG);
254 return NULL;
255 }
256
257 edesc = kzalloc(sizeof(*edesc) + sg_len *
258 sizeof(edesc->pset[0]), GFP_ATOMIC);
259 if (!edesc) {
260 dev_dbg(dev, "Failed to allocate a descriptor\n");
261 return NULL;
262 }
263
264 edesc->pset_nr = sg_len;
265
266 for_each_sg(sgl, sg, sg_len, i) {
267 /* Allocate a PaRAM slot, if needed */
268 if (echan->slot[i] < 0) {
269 echan->slot[i] =
270 edma_alloc_slot(EDMA_CTLR(echan->ch_num),
271 EDMA_SLOT_ANY);
272 if (echan->slot[i] < 0) {
273 dev_err(dev, "Failed to allocate slot\n");
274 return NULL;
275 }
276 }
277
278 acnt = echan->addr_width;
279
280 /*
281 * If the maxburst is equal to the fifo width, use
282 * A-synced transfers. This allows for large contiguous
283 * buffer transfers using only one PaRAM set.
284 */
285 if (echan->maxburst == 1) {
286 edesc->absync = false;
287 ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
288 bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
289 if (bcnt)
290 ccnt++;
291 else
292 bcnt = SZ_64K - 1;
293 cidx = acnt;
294 /*
295 * If maxburst is greater than the fifo address_width,
296 * use AB-synced transfers where A count is the fifo
297 * address_width and B count is the maxburst. In this
298 * case, we are limited to transfers of C count frames
299 * of (address_width * maxburst) where C count is limited
300 * to SZ_64K-1. This places an upper bound on the length
301 * of an SG segment that can be handled.
302 */
303 } else {
304 edesc->absync = true;
305 bcnt = echan->maxburst;
306 ccnt = sg_dma_len(sg) / (acnt * bcnt);
307 if (ccnt > (SZ_64K - 1)) {
308 dev_err(dev, "Exceeded max SG segment size\n");
309 return NULL;
310 }
311 cidx = acnt * bcnt;
312 }
313
314 if (direction == DMA_MEM_TO_DEV) {
315 src = sg_dma_address(sg);
316 dst = echan->addr;
317 src_bidx = acnt;
318 src_cidx = cidx;
319 dst_bidx = 0;
320 dst_cidx = 0;
321 } else {
322 src = echan->addr;
323 dst = sg_dma_address(sg);
324 src_bidx = 0;
325 src_cidx = 0;
326 dst_bidx = acnt;
327 dst_cidx = cidx;
328 }
329
330 edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
331 /* Configure A or AB synchronized transfers */
332 if (edesc->absync)
333 edesc->pset[i].opt |= SYNCDIM;
334 /* If this is the last set, enable completion interrupt flag */
335 if (i == sg_len - 1)
336 edesc->pset[i].opt |= TCINTEN;
337
338 edesc->pset[i].src = src;
339 edesc->pset[i].dst = dst;
340
341 edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx;
342 edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx;
343
344 edesc->pset[i].a_b_cnt = bcnt << 16 | acnt;
345 edesc->pset[i].ccnt = ccnt;
346 edesc->pset[i].link_bcntrld = 0xffffffff;
347
348 }
349
350 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
351}
352
353static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
354{
355 struct edma_chan *echan = data;
356 struct device *dev = echan->vchan.chan.device->dev;
357 struct edma_desc *edesc;
358 unsigned long flags;
359
360 /* Stop the channel */
361 edma_stop(echan->ch_num);
362
363 switch (ch_status) {
364 case DMA_COMPLETE:
365 dev_dbg(dev, "transfer complete on channel %d\n", ch_num);
366
367 spin_lock_irqsave(&echan->vchan.lock, flags);
368
369 edesc = echan->edesc;
370 if (edesc) {
371 edma_execute(echan);
372 vchan_cookie_complete(&edesc->vdesc);
373 }
374
375 spin_unlock_irqrestore(&echan->vchan.lock, flags);
376
377 break;
378 case DMA_CC_ERROR:
379 dev_dbg(dev, "transfer error on channel %d\n", ch_num);
380 break;
381 default:
382 break;
383 }
384}
385
386/* Alloc channel resources */
387static int edma_alloc_chan_resources(struct dma_chan *chan)
388{
389 struct edma_chan *echan = to_edma_chan(chan);
390 struct device *dev = chan->device->dev;
391 int ret;
392 int a_ch_num;
393 LIST_HEAD(descs);
394
395 a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback,
396 chan, EVENTQ_DEFAULT);
397
398 if (a_ch_num < 0) {
399 ret = -ENODEV;
400 goto err_no_chan;
401 }
402
403 if (a_ch_num != echan->ch_num) {
404 dev_err(dev, "failed to allocate requested channel %u:%u\n",
405 EDMA_CTLR(echan->ch_num),
406 EDMA_CHAN_SLOT(echan->ch_num));
407 ret = -ENODEV;
408 goto err_wrong_chan;
409 }
410
411 echan->alloced = true;
412 echan->slot[0] = echan->ch_num;
413
414 dev_info(dev, "allocated channel for %u:%u\n",
415 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
416
417 return 0;
418
419err_wrong_chan:
420 edma_free_channel(a_ch_num);
421err_no_chan:
422 return ret;
423}
424
425/* Free channel resources */
426static void edma_free_chan_resources(struct dma_chan *chan)
427{
428 struct edma_chan *echan = to_edma_chan(chan);
429 struct device *dev = chan->device->dev;
430 int i;
431
432 /* Terminate transfers */
433 edma_stop(echan->ch_num);
434
435 vchan_free_chan_resources(&echan->vchan);
436
437 /* Free EDMA PaRAM slots */
438 for (i = 1; i < EDMA_MAX_SLOTS; i++) {
439 if (echan->slot[i] >= 0) {
440 edma_free_slot(echan->slot[i]);
441 echan->slot[i] = -1;
442 }
443 }
444
445 /* Free EDMA channel */
446 if (echan->alloced) {
447 edma_free_channel(echan->ch_num);
448 echan->alloced = false;
449 }
450
451 dev_info(dev, "freeing channel for %u\n", echan->ch_num);
452}
453
454/* Send pending descriptor to hardware */
455static void edma_issue_pending(struct dma_chan *chan)
456{
457 struct edma_chan *echan = to_edma_chan(chan);
458 unsigned long flags;
459
460 spin_lock_irqsave(&echan->vchan.lock, flags);
461 if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
462 edma_execute(echan);
463 spin_unlock_irqrestore(&echan->vchan.lock, flags);
464}
465
466static size_t edma_desc_size(struct edma_desc *edesc)
467{
468 int i;
469 size_t size;
470
471 if (edesc->absync)
472 for (size = i = 0; i < edesc->pset_nr; i++)
473 size += (edesc->pset[i].a_b_cnt & 0xffff) *
474 (edesc->pset[i].a_b_cnt >> 16) *
475 edesc->pset[i].ccnt;
476 else
477 size = (edesc->pset[0].a_b_cnt & 0xffff) *
478 (edesc->pset[0].a_b_cnt >> 16) +
479 (edesc->pset[0].a_b_cnt & 0xffff) *
480 (SZ_64K - 1) * edesc->pset[0].ccnt;
481
482 return size;
483}
484
485/* Check request completion status */
486static enum dma_status edma_tx_status(struct dma_chan *chan,
487 dma_cookie_t cookie,
488 struct dma_tx_state *txstate)
489{
490 struct edma_chan *echan = to_edma_chan(chan);
491 struct virt_dma_desc *vdesc;
492 enum dma_status ret;
493 unsigned long flags;
494
495 ret = dma_cookie_status(chan, cookie, txstate);
496 if (ret == DMA_SUCCESS || !txstate)
497 return ret;
498
499 spin_lock_irqsave(&echan->vchan.lock, flags);
500 vdesc = vchan_find_desc(&echan->vchan, cookie);
501 if (vdesc) {
502 txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx));
503 } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
504 struct edma_desc *edesc = echan->edesc;
505 txstate->residue = edma_desc_size(edesc);
506 } else {
507 txstate->residue = 0;
508 }
509 spin_unlock_irqrestore(&echan->vchan.lock, flags);
510
511 return ret;
512}
513
514static void __init edma_chan_init(struct edma_cc *ecc,
515 struct dma_device *dma,
516 struct edma_chan *echans)
517{
518 int i, j;
519
520 for (i = 0; i < EDMA_CHANS; i++) {
521 struct edma_chan *echan = &echans[i];
522 echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i);
523 echan->ecc = ecc;
524 echan->vchan.desc_free = edma_desc_free;
525
526 vchan_init(&echan->vchan, dma);
527
528 INIT_LIST_HEAD(&echan->node);
529 for (j = 0; j < EDMA_MAX_SLOTS; j++)
530 echan->slot[j] = -1;
531 }
532}
533
534static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
535 struct device *dev)
536{
537 dma->device_prep_slave_sg = edma_prep_slave_sg;
538 dma->device_alloc_chan_resources = edma_alloc_chan_resources;
539 dma->device_free_chan_resources = edma_free_chan_resources;
540 dma->device_issue_pending = edma_issue_pending;
541 dma->device_tx_status = edma_tx_status;
542 dma->device_control = edma_control;
543 dma->dev = dev;
544
545 INIT_LIST_HEAD(&dma->channels);
546}
547
548static int __devinit edma_probe(struct platform_device *pdev)
549{
550 struct edma_cc *ecc;
551 int ret;
552
553 ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
554 if (!ecc) {
555 dev_err(&pdev->dev, "Can't allocate controller\n");
556 return -ENOMEM;
557 }
558
559 ecc->ctlr = pdev->id;
560 ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY);
561 if (ecc->dummy_slot < 0) {
562 dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n");
563 return -EIO;
564 }
565
566 dma_cap_zero(ecc->dma_slave.cap_mask);
567 dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask);
568
569 edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev);
570
571 edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans);
572
573 ret = dma_async_device_register(&ecc->dma_slave);
574 if (ret)
575 goto err_reg1;
576
577 platform_set_drvdata(pdev, ecc);
578
579 dev_info(&pdev->dev, "TI EDMA DMA engine driver\n");
580
581 return 0;
582
583err_reg1:
584 edma_free_slot(ecc->dummy_slot);
585 return ret;
586}
587
588static int __devexit edma_remove(struct platform_device *pdev)
589{
590 struct device *dev = &pdev->dev;
591 struct edma_cc *ecc = dev_get_drvdata(dev);
592
593 dma_async_device_unregister(&ecc->dma_slave);
594 edma_free_slot(ecc->dummy_slot);
595
596 return 0;
597}
598
599static struct platform_driver edma_driver = {
600 .probe = edma_probe,
601 .remove = __devexit_p(edma_remove),
602 .driver = {
603 .name = "edma-dma-engine",
604 .owner = THIS_MODULE,
605 },
606};
607
608bool edma_filter_fn(struct dma_chan *chan, void *param)
609{
610 if (chan->device->dev->driver == &edma_driver.driver) {
611 struct edma_chan *echan = to_edma_chan(chan);
612 unsigned ch_req = *(unsigned *)param;
613 return ch_req == echan->ch_num;
614 }
615 return false;
616}
617EXPORT_SYMBOL(edma_filter_fn);
618
619static struct platform_device *pdev0, *pdev1;
620
621static const struct platform_device_info edma_dev_info0 = {
622 .name = "edma-dma-engine",
623 .id = 0,
624 .dma_mask = DMA_BIT_MASK(32),
625};
626
627static const struct platform_device_info edma_dev_info1 = {
628 .name = "edma-dma-engine",
629 .id = 1,
630 .dma_mask = DMA_BIT_MASK(32),
631};
632
633static int edma_init(void)
634{
635 int ret = platform_driver_register(&edma_driver);
636
637 if (ret == 0) {
638 pdev0 = platform_device_register_full(&edma_dev_info0);
639 if (IS_ERR(pdev0)) {
640 platform_driver_unregister(&edma_driver);
641 ret = PTR_ERR(pdev0);
642 goto out;
643 }
644 }
645
646 if (EDMA_CTLRS == 2) {
647 pdev1 = platform_device_register_full(&edma_dev_info1);
648 if (IS_ERR(pdev1)) {
649 platform_driver_unregister(&edma_driver);
650 platform_device_unregister(pdev0);
651 ret = PTR_ERR(pdev1);
652 }
653 }
654
655out:
656 return ret;
657}
658subsys_initcall(edma_init);
659
660static void __exit edma_exit(void)
661{
662 platform_device_unregister(pdev0);
663 if (pdev1)
664 platform_device_unregister(pdev1);
665 platform_driver_unregister(&edma_driver);
666}
667module_exit(edma_exit);
668
669MODULE_AUTHOR("Matt Porter <mporter@ti.com>");
670MODULE_DESCRIPTION("TI EDMA DMA engine driver");
671MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index f11b5b2b1a1c..7d9554cc4976 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -474,8 +474,10 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
474 slot = i; 474 slot = i;
475 break; 475 break;
476 } 476 }
477 if (slot < 0) 477 if (slot < 0) {
478 spin_unlock_irqrestore(&imxdma->lock, flags);
478 return -EBUSY; 479 return -EBUSY;
480 }
479 481
480 imxdma->slots_2d[slot].xsr = d->x; 482 imxdma->slots_2d[slot].xsr = d->x;
481 imxdma->slots_2d[slot].ysr = d->y; 483 imxdma->slots_2d[slot].ysr = d->y;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 86895760b598..b9d667851445 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -434,12 +434,11 @@ static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t f
434 return NULL; 434 return NULL;
435 memset(hw, 0, sizeof(*hw)); 435 memset(hw, 0, sizeof(*hw));
436 436
437 desc = kmem_cache_alloc(ioat2_cache, flags); 437 desc = kmem_cache_zalloc(ioat2_cache, flags);
438 if (!desc) { 438 if (!desc) {
439 pci_pool_free(dma->dma_pool, hw, phys); 439 pci_pool_free(dma->dma_pool, hw, phys);
440 return NULL; 440 return NULL;
441 } 441 }
442 memset(desc, 0, sizeof(*desc));
443 442
444 dma_async_tx_descriptor_init(&desc->txd, chan); 443 dma_async_tx_descriptor_init(&desc->txd, chan);
445 desc->txd.tx_submit = ioat2_tx_submit_unlock; 444 desc->txd.tx_submit = ioat2_tx_submit_unlock;
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index 5e3a40f79945..c0573061b45d 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -40,6 +40,17 @@ MODULE_VERSION(IOAT_DMA_VERSION);
40MODULE_LICENSE("Dual BSD/GPL"); 40MODULE_LICENSE("Dual BSD/GPL");
41MODULE_AUTHOR("Intel Corporation"); 41MODULE_AUTHOR("Intel Corporation");
42 42
43#define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20
44#define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21
45#define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22
46#define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23
47#define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24
48#define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25
49#define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26
50#define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27
51#define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e
52#define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f
53
43static struct pci_device_id ioat_pci_tbl[] = { 54static struct pci_device_id ioat_pci_tbl[] = {
44 /* I/OAT v1 platforms */ 55 /* I/OAT v1 platforms */
45 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, 56 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
@@ -83,6 +94,17 @@ static struct pci_device_id ioat_pci_tbl[] = {
83 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) }, 94 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
84 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) }, 95 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
85 96
97 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) },
98 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) },
99 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) },
100 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) },
101 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) },
102 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) },
103 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) },
104 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) },
105 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
106 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
107
86 { 0, } 108 { 0, }
87}; 109};
88MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); 110MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
new file mode 100644
index 000000000000..14da1f403edf
--- /dev/null
+++ b/drivers/dma/mmp_pdma.c
@@ -0,0 +1,875 @@
1/*
2 * Copyright 2012 Marvell International Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/module.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/interrupt.h>
12#include <linux/dma-mapping.h>
13#include <linux/slab.h>
14#include <linux/dmaengine.h>
15#include <linux/platform_device.h>
16#include <linux/device.h>
17#include <linux/platform_data/mmp_dma.h>
18#include <linux/dmapool.h>
19#include <linux/of_device.h>
20#include <linux/of.h>
21
22#include "dmaengine.h"
23
24#define DCSR 0x0000
25#define DALGN 0x00a0
26#define DINT 0x00f0
27#define DDADR 0x0200
28#define DSADR 0x0204
29#define DTADR 0x0208
30#define DCMD 0x020c
31
32#define DCSR_RUN (1 << 31) /* Run Bit (read / write) */
33#define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */
34#define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */
35#define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */
36#define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */
37#define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */
38#define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */
39#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */
40
41#define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */
42#define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */
43#define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */
44#define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */
45#define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */
46#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
47#define DCSR_EORINTR (1 << 9) /* The end of Receive */
48
49#define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
50#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
51
52#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
53#define DDADR_STOP (1 << 0) /* Stop (read / write) */
54
55#define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */
56#define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */
57#define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */
58#define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */
59#define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */
60#define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */
61#define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */
62#define DCMD_BURST8 (1 << 16) /* 8 byte burst */
63#define DCMD_BURST16 (2 << 16) /* 16 byte burst */
64#define DCMD_BURST32 (3 << 16) /* 32 byte burst */
65#define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
66#define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
67#define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
68#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
69
70#define PDMA_ALIGNMENT 3
71#define PDMA_MAX_DESC_BYTES 0x1000
72
73struct mmp_pdma_desc_hw {
74 u32 ddadr; /* Points to the next descriptor + flags */
75 u32 dsadr; /* DSADR value for the current transfer */
76 u32 dtadr; /* DTADR value for the current transfer */
77 u32 dcmd; /* DCMD value for the current transfer */
78} __aligned(32);
79
80struct mmp_pdma_desc_sw {
81 struct mmp_pdma_desc_hw desc;
82 struct list_head node;
83 struct list_head tx_list;
84 struct dma_async_tx_descriptor async_tx;
85};
86
87struct mmp_pdma_phy;
88
89struct mmp_pdma_chan {
90 struct device *dev;
91 struct dma_chan chan;
92 struct dma_async_tx_descriptor desc;
93 struct mmp_pdma_phy *phy;
94 enum dma_transfer_direction dir;
95
96 /* channel's basic info */
97 struct tasklet_struct tasklet;
98 u32 dcmd;
99 u32 drcmr;
100 u32 dev_addr;
101
102 /* list for desc */
103 spinlock_t desc_lock; /* Descriptor list lock */
104 struct list_head chain_pending; /* Link descriptors queue for pending */
105 struct list_head chain_running; /* Link descriptors queue for running */
106 bool idle; /* channel statue machine */
107
108 struct dma_pool *desc_pool; /* Descriptors pool */
109};
110
111struct mmp_pdma_phy {
112 int idx;
113 void __iomem *base;
114 struct mmp_pdma_chan *vchan;
115};
116
117struct mmp_pdma_device {
118 int dma_channels;
119 void __iomem *base;
120 struct device *dev;
121 struct dma_device device;
122 struct mmp_pdma_phy *phy;
123};
124
125#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
126#define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
127#define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
128#define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
129
130static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
131{
132 u32 reg = (phy->idx << 4) + DDADR;
133
134 writel(addr, phy->base + reg);
135}
136
137static void enable_chan(struct mmp_pdma_phy *phy)
138{
139 u32 reg;
140
141 if (!phy->vchan)
142 return;
143
144 reg = phy->vchan->drcmr;
145 reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2);
146 writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
147
148 reg = (phy->idx << 2) + DCSR;
149 writel(readl(phy->base + reg) | DCSR_RUN,
150 phy->base + reg);
151}
152
153static void disable_chan(struct mmp_pdma_phy *phy)
154{
155 u32 reg;
156
157 if (phy) {
158 reg = (phy->idx << 2) + DCSR;
159 writel(readl(phy->base + reg) & ~DCSR_RUN,
160 phy->base + reg);
161 }
162}
163
164static int clear_chan_irq(struct mmp_pdma_phy *phy)
165{
166 u32 dcsr;
167 u32 dint = readl(phy->base + DINT);
168 u32 reg = (phy->idx << 2) + DCSR;
169
170 if (dint & BIT(phy->idx)) {
171 /* clear irq */
172 dcsr = readl(phy->base + reg);
173 writel(dcsr, phy->base + reg);
174 if ((dcsr & DCSR_BUSERR) && (phy->vchan))
175 dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
176 return 0;
177 }
178 return -EAGAIN;
179}
180
181static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
182{
183 struct mmp_pdma_phy *phy = dev_id;
184
185 if (clear_chan_irq(phy) == 0) {
186 tasklet_schedule(&phy->vchan->tasklet);
187 return IRQ_HANDLED;
188 } else
189 return IRQ_NONE;
190}
191
192static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
193{
194 struct mmp_pdma_device *pdev = dev_id;
195 struct mmp_pdma_phy *phy;
196 u32 dint = readl(pdev->base + DINT);
197 int i, ret;
198 int irq_num = 0;
199
200 while (dint) {
201 i = __ffs(dint);
202 dint &= (dint - 1);
203 phy = &pdev->phy[i];
204 ret = mmp_pdma_chan_handler(irq, phy);
205 if (ret == IRQ_HANDLED)
206 irq_num++;
207 }
208
209 if (irq_num)
210 return IRQ_HANDLED;
211 else
212 return IRQ_NONE;
213}
214
215/* lookup free phy channel as descending priority */
216static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
217{
218 int prio, i;
219 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
220 struct mmp_pdma_phy *phy;
221
222 /*
223 * dma channel priorities
224 * ch 0 - 3, 16 - 19 <--> (0)
225 * ch 4 - 7, 20 - 23 <--> (1)
226 * ch 8 - 11, 24 - 27 <--> (2)
227 * ch 12 - 15, 28 - 31 <--> (3)
228 */
229 for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
230 for (i = 0; i < pdev->dma_channels; i++) {
231 if (prio != ((i & 0xf) >> 2))
232 continue;
233 phy = &pdev->phy[i];
234 if (!phy->vchan) {
235 phy->vchan = pchan;
236 return phy;
237 }
238 }
239 }
240
241 return NULL;
242}
243
244/* desc->tx_list ==> pending list */
245static void append_pending_queue(struct mmp_pdma_chan *chan,
246 struct mmp_pdma_desc_sw *desc)
247{
248 struct mmp_pdma_desc_sw *tail =
249 to_mmp_pdma_desc(chan->chain_pending.prev);
250
251 if (list_empty(&chan->chain_pending))
252 goto out_splice;
253
254 /* one irq per queue, even appended */
255 tail->desc.ddadr = desc->async_tx.phys;
256 tail->desc.dcmd &= ~DCMD_ENDIRQEN;
257
258 /* softly link to pending list */
259out_splice:
260 list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
261}
262
263/**
264 * start_pending_queue - transfer any pending transactions
265 * pending list ==> running list
266 */
267static void start_pending_queue(struct mmp_pdma_chan *chan)
268{
269 struct mmp_pdma_desc_sw *desc;
270
271 /* still in running, irq will start the pending list */
272 if (!chan->idle) {
273 dev_dbg(chan->dev, "DMA controller still busy\n");
274 return;
275 }
276
277 if (list_empty(&chan->chain_pending)) {
278 /* chance to re-fetch phy channel with higher prio */
279 if (chan->phy) {
280 chan->phy->vchan = NULL;
281 chan->phy = NULL;
282 }
283 dev_dbg(chan->dev, "no pending list\n");
284 return;
285 }
286
287 if (!chan->phy) {
288 chan->phy = lookup_phy(chan);
289 if (!chan->phy) {
290 dev_dbg(chan->dev, "no free dma channel\n");
291 return;
292 }
293 }
294
295 /*
296 * pending -> running
297 * reintilize pending list
298 */
299 desc = list_first_entry(&chan->chain_pending,
300 struct mmp_pdma_desc_sw, node);
301 list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
302
303 /*
304 * Program the descriptor's address into the DMA controller,
305 * then start the DMA transaction
306 */
307 set_desc(chan->phy, desc->async_tx.phys);
308 enable_chan(chan->phy);
309 chan->idle = false;
310}
311
312
313/* desc->tx_list ==> pending list */
314static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
315{
316 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
317 struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
318 struct mmp_pdma_desc_sw *child;
319 unsigned long flags;
320 dma_cookie_t cookie = -EBUSY;
321
322 spin_lock_irqsave(&chan->desc_lock, flags);
323
324 list_for_each_entry(child, &desc->tx_list, node) {
325 cookie = dma_cookie_assign(&child->async_tx);
326 }
327
328 append_pending_queue(chan, desc);
329
330 spin_unlock_irqrestore(&chan->desc_lock, flags);
331
332 return cookie;
333}
334
335struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
336{
337 struct mmp_pdma_desc_sw *desc;
338 dma_addr_t pdesc;
339
340 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
341 if (!desc) {
342 dev_err(chan->dev, "out of memory for link descriptor\n");
343 return NULL;
344 }
345
346 memset(desc, 0, sizeof(*desc));
347 INIT_LIST_HEAD(&desc->tx_list);
348 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
349 /* each desc has submit */
350 desc->async_tx.tx_submit = mmp_pdma_tx_submit;
351 desc->async_tx.phys = pdesc;
352
353 return desc;
354}
355
356/**
357 * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
358 *
359 * This function will create a dma pool for descriptor allocation.
360 * Request irq only when channel is requested
361 * Return - The number of allocated descriptors.
362 */
363
364static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
365{
366 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
367
368 if (chan->desc_pool)
369 return 1;
370
371 chan->desc_pool =
372 dma_pool_create(dev_name(&dchan->dev->device), chan->dev,
373 sizeof(struct mmp_pdma_desc_sw),
374 __alignof__(struct mmp_pdma_desc_sw), 0);
375 if (!chan->desc_pool) {
376 dev_err(chan->dev, "unable to allocate descriptor pool\n");
377 return -ENOMEM;
378 }
379 if (chan->phy) {
380 chan->phy->vchan = NULL;
381 chan->phy = NULL;
382 }
383 chan->idle = true;
384 chan->dev_addr = 0;
385 return 1;
386}
387
388static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
389 struct list_head *list)
390{
391 struct mmp_pdma_desc_sw *desc, *_desc;
392
393 list_for_each_entry_safe(desc, _desc, list, node) {
394 list_del(&desc->node);
395 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
396 }
397}
398
399static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
400{
401 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
402 unsigned long flags;
403
404 spin_lock_irqsave(&chan->desc_lock, flags);
405 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
406 mmp_pdma_free_desc_list(chan, &chan->chain_running);
407 spin_unlock_irqrestore(&chan->desc_lock, flags);
408
409 dma_pool_destroy(chan->desc_pool);
410 chan->desc_pool = NULL;
411 chan->idle = true;
412 chan->dev_addr = 0;
413 if (chan->phy) {
414 chan->phy->vchan = NULL;
415 chan->phy = NULL;
416 }
417 return;
418}
419
420static struct dma_async_tx_descriptor *
421mmp_pdma_prep_memcpy(struct dma_chan *dchan,
422 dma_addr_t dma_dst, dma_addr_t dma_src,
423 size_t len, unsigned long flags)
424{
425 struct mmp_pdma_chan *chan;
426 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
427 size_t copy = 0;
428
429 if (!dchan)
430 return NULL;
431
432 if (!len)
433 return NULL;
434
435 chan = to_mmp_pdma_chan(dchan);
436
437 if (!chan->dir) {
438 chan->dir = DMA_MEM_TO_MEM;
439 chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
440 chan->dcmd |= DCMD_BURST32;
441 }
442
443 do {
444 /* Allocate the link descriptor from DMA pool */
445 new = mmp_pdma_alloc_descriptor(chan);
446 if (!new) {
447 dev_err(chan->dev, "no memory for desc\n");
448 goto fail;
449 }
450
451 copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
452
453 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
454 new->desc.dsadr = dma_src;
455 new->desc.dtadr = dma_dst;
456
457 if (!first)
458 first = new;
459 else
460 prev->desc.ddadr = new->async_tx.phys;
461
462 new->async_tx.cookie = 0;
463 async_tx_ack(&new->async_tx);
464
465 prev = new;
466 len -= copy;
467
468 if (chan->dir == DMA_MEM_TO_DEV) {
469 dma_src += copy;
470 } else if (chan->dir == DMA_DEV_TO_MEM) {
471 dma_dst += copy;
472 } else if (chan->dir == DMA_MEM_TO_MEM) {
473 dma_src += copy;
474 dma_dst += copy;
475 }
476
477 /* Insert the link descriptor to the LD ring */
478 list_add_tail(&new->node, &first->tx_list);
479 } while (len);
480
481 first->async_tx.flags = flags; /* client is in control of this ack */
482 first->async_tx.cookie = -EBUSY;
483
484 /* last desc and fire IRQ */
485 new->desc.ddadr = DDADR_STOP;
486 new->desc.dcmd |= DCMD_ENDIRQEN;
487
488 return &first->async_tx;
489
490fail:
491 if (first)
492 mmp_pdma_free_desc_list(chan, &first->tx_list);
493 return NULL;
494}
495
496static struct dma_async_tx_descriptor *
497mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
498 unsigned int sg_len, enum dma_transfer_direction dir,
499 unsigned long flags, void *context)
500{
501 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
502 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
503 size_t len, avail;
504 struct scatterlist *sg;
505 dma_addr_t addr;
506 int i;
507
508 if ((sgl == NULL) || (sg_len == 0))
509 return NULL;
510
511 for_each_sg(sgl, sg, sg_len, i) {
512 addr = sg_dma_address(sg);
513 avail = sg_dma_len(sgl);
514
515 do {
516 len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
517
518 /* allocate and populate the descriptor */
519 new = mmp_pdma_alloc_descriptor(chan);
520 if (!new) {
521 dev_err(chan->dev, "no memory for desc\n");
522 goto fail;
523 }
524
525 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
526 if (dir == DMA_MEM_TO_DEV) {
527 new->desc.dsadr = addr;
528 new->desc.dtadr = chan->dev_addr;
529 } else {
530 new->desc.dsadr = chan->dev_addr;
531 new->desc.dtadr = addr;
532 }
533
534 if (!first)
535 first = new;
536 else
537 prev->desc.ddadr = new->async_tx.phys;
538
539 new->async_tx.cookie = 0;
540 async_tx_ack(&new->async_tx);
541 prev = new;
542
543 /* Insert the link descriptor to the LD ring */
544 list_add_tail(&new->node, &first->tx_list);
545
546 /* update metadata */
547 addr += len;
548 avail -= len;
549 } while (avail);
550 }
551
552 first->async_tx.cookie = -EBUSY;
553 first->async_tx.flags = flags;
554
555 /* last desc and fire IRQ */
556 new->desc.ddadr = DDADR_STOP;
557 new->desc.dcmd |= DCMD_ENDIRQEN;
558
559 return &first->async_tx;
560
561fail:
562 if (first)
563 mmp_pdma_free_desc_list(chan, &first->tx_list);
564 return NULL;
565}
566
567static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
568 unsigned long arg)
569{
570 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
571 struct dma_slave_config *cfg = (void *)arg;
572 unsigned long flags;
573 int ret = 0;
574 u32 maxburst = 0, addr = 0;
575 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
576
577 if (!dchan)
578 return -EINVAL;
579
580 switch (cmd) {
581 case DMA_TERMINATE_ALL:
582 disable_chan(chan->phy);
583 if (chan->phy) {
584 chan->phy->vchan = NULL;
585 chan->phy = NULL;
586 }
587 spin_lock_irqsave(&chan->desc_lock, flags);
588 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
589 mmp_pdma_free_desc_list(chan, &chan->chain_running);
590 spin_unlock_irqrestore(&chan->desc_lock, flags);
591 chan->idle = true;
592 break;
593 case DMA_SLAVE_CONFIG:
594 if (cfg->direction == DMA_DEV_TO_MEM) {
595 chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
596 maxburst = cfg->src_maxburst;
597 width = cfg->src_addr_width;
598 addr = cfg->src_addr;
599 } else if (cfg->direction == DMA_MEM_TO_DEV) {
600 chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
601 maxburst = cfg->dst_maxburst;
602 width = cfg->dst_addr_width;
603 addr = cfg->dst_addr;
604 }
605
606 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
607 chan->dcmd |= DCMD_WIDTH1;
608 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
609 chan->dcmd |= DCMD_WIDTH2;
610 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
611 chan->dcmd |= DCMD_WIDTH4;
612
613 if (maxburst == 8)
614 chan->dcmd |= DCMD_BURST8;
615 else if (maxburst == 16)
616 chan->dcmd |= DCMD_BURST16;
617 else if (maxburst == 32)
618 chan->dcmd |= DCMD_BURST32;
619
620 if (cfg) {
621 chan->dir = cfg->direction;
622 chan->drcmr = cfg->slave_id;
623 }
624 chan->dev_addr = addr;
625 break;
626 default:
627 return -ENOSYS;
628 }
629
630 return ret;
631}
632
633static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
634 dma_cookie_t cookie, struct dma_tx_state *txstate)
635{
636 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
637 enum dma_status ret;
638 unsigned long flags;
639
640 spin_lock_irqsave(&chan->desc_lock, flags);
641 ret = dma_cookie_status(dchan, cookie, txstate);
642 spin_unlock_irqrestore(&chan->desc_lock, flags);
643
644 return ret;
645}
646
647/**
648 * mmp_pdma_issue_pending - Issue the DMA start command
649 * pending list ==> running list
650 */
651static void mmp_pdma_issue_pending(struct dma_chan *dchan)
652{
653 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
654 unsigned long flags;
655
656 spin_lock_irqsave(&chan->desc_lock, flags);
657 start_pending_queue(chan);
658 spin_unlock_irqrestore(&chan->desc_lock, flags);
659}
660
661/*
662 * dma_do_tasklet
663 * Do call back
664 * Start pending list
665 */
666static void dma_do_tasklet(unsigned long data)
667{
668 struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data;
669 struct mmp_pdma_desc_sw *desc, *_desc;
670 LIST_HEAD(chain_cleanup);
671 unsigned long flags;
672
673 /* submit pending list; callback for each desc; free desc */
674
675 spin_lock_irqsave(&chan->desc_lock, flags);
676
677 /* update the cookie if we have some descriptors to cleanup */
678 if (!list_empty(&chan->chain_running)) {
679 dma_cookie_t cookie;
680
681 desc = to_mmp_pdma_desc(chan->chain_running.prev);
682 cookie = desc->async_tx.cookie;
683 dma_cookie_complete(&desc->async_tx);
684
685 dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
686 }
687
688 /*
689 * move the descriptors to a temporary list so we can drop the lock
690 * during the entire cleanup operation
691 */
692 list_splice_tail_init(&chan->chain_running, &chain_cleanup);
693
694 /* the hardware is now idle and ready for more */
695 chan->idle = true;
696
697 /* Start any pending transactions automatically */
698 start_pending_queue(chan);
699 spin_unlock_irqrestore(&chan->desc_lock, flags);
700
701 /* Run the callback for each descriptor, in order */
702 list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
703 struct dma_async_tx_descriptor *txd = &desc->async_tx;
704
705 /* Remove from the list of transactions */
706 list_del(&desc->node);
707 /* Run the link descriptor callback function */
708 if (txd->callback)
709 txd->callback(txd->callback_param);
710
711 dma_pool_free(chan->desc_pool, desc, txd->phys);
712 }
713}
714
715static int __devexit mmp_pdma_remove(struct platform_device *op)
716{
717 struct mmp_pdma_device *pdev = platform_get_drvdata(op);
718
719 dma_async_device_unregister(&pdev->device);
720 return 0;
721}
722
723static int __devinit mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
724 int idx, int irq)
725{
726 struct mmp_pdma_phy *phy = &pdev->phy[idx];
727 struct mmp_pdma_chan *chan;
728 int ret;
729
730 chan = devm_kzalloc(pdev->dev,
731 sizeof(struct mmp_pdma_chan), GFP_KERNEL);
732 if (chan == NULL)
733 return -ENOMEM;
734
735 phy->idx = idx;
736 phy->base = pdev->base;
737
738 if (irq) {
739 ret = devm_request_irq(pdev->dev, irq,
740 mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy);
741 if (ret) {
742 dev_err(pdev->dev, "channel request irq fail!\n");
743 return ret;
744 }
745 }
746
747 spin_lock_init(&chan->desc_lock);
748 chan->dev = pdev->dev;
749 chan->chan.device = &pdev->device;
750 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
751 INIT_LIST_HEAD(&chan->chain_pending);
752 INIT_LIST_HEAD(&chan->chain_running);
753
754 /* register virt channel to dma engine */
755 list_add_tail(&chan->chan.device_node,
756 &pdev->device.channels);
757
758 return 0;
759}
760
761static struct of_device_id mmp_pdma_dt_ids[] = {
762 { .compatible = "marvell,pdma-1.0", },
763 {}
764};
765MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
766
767static int __devinit mmp_pdma_probe(struct platform_device *op)
768{
769 struct mmp_pdma_device *pdev;
770 const struct of_device_id *of_id;
771 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
772 struct resource *iores;
773 int i, ret, irq = 0;
774 int dma_channels = 0, irq_num = 0;
775
776 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
777 if (!pdev)
778 return -ENOMEM;
779 pdev->dev = &op->dev;
780
781 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
782 if (!iores)
783 return -EINVAL;
784
785 pdev->base = devm_request_and_ioremap(pdev->dev, iores);
786 if (!pdev->base)
787 return -EADDRNOTAVAIL;
788
789 of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
790 if (of_id)
791 of_property_read_u32(pdev->dev->of_node,
792 "#dma-channels", &dma_channels);
793 else if (pdata && pdata->dma_channels)
794 dma_channels = pdata->dma_channels;
795 else
796 dma_channels = 32; /* default 32 channel */
797 pdev->dma_channels = dma_channels;
798
799 for (i = 0; i < dma_channels; i++) {
800 if (platform_get_irq(op, i) > 0)
801 irq_num++;
802 }
803
804 pdev->phy = devm_kzalloc(pdev->dev,
805 dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL);
806 if (pdev->phy == NULL)
807 return -ENOMEM;
808
809 INIT_LIST_HEAD(&pdev->device.channels);
810
811 if (irq_num != dma_channels) {
812 /* all chan share one irq, demux inside */
813 irq = platform_get_irq(op, 0);
814 ret = devm_request_irq(pdev->dev, irq,
815 mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev);
816 if (ret)
817 return ret;
818 }
819
820 for (i = 0; i < dma_channels; i++) {
821 irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
822 ret = mmp_pdma_chan_init(pdev, i, irq);
823 if (ret)
824 return ret;
825 }
826
827 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
828 dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
829 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
830 pdev->device.dev = &op->dev;
831 pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
832 pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
833 pdev->device.device_tx_status = mmp_pdma_tx_status;
834 pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
835 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
836 pdev->device.device_issue_pending = mmp_pdma_issue_pending;
837 pdev->device.device_control = mmp_pdma_control;
838 pdev->device.copy_align = PDMA_ALIGNMENT;
839
840 if (pdev->dev->coherent_dma_mask)
841 dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
842 else
843 dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
844
845 ret = dma_async_device_register(&pdev->device);
846 if (ret) {
847 dev_err(pdev->device.dev, "unable to register\n");
848 return ret;
849 }
850
851 dev_info(pdev->device.dev, "initialized\n");
852 return 0;
853}
854
855static const struct platform_device_id mmp_pdma_id_table[] = {
856 { "mmp-pdma", },
857 { },
858};
859
860static struct platform_driver mmp_pdma_driver = {
861 .driver = {
862 .name = "mmp-pdma",
863 .owner = THIS_MODULE,
864 .of_match_table = mmp_pdma_dt_ids,
865 },
866 .id_table = mmp_pdma_id_table,
867 .probe = mmp_pdma_probe,
868 .remove = __devexit_p(mmp_pdma_remove),
869};
870
871module_platform_driver(mmp_pdma_driver);
872
873MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
874MODULE_AUTHOR("Marvell International Ltd.");
875MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 6d9c82e891d7..f3e8d71bcbc7 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -20,6 +20,7 @@
20#include <linux/device.h> 20#include <linux/device.h>
21#include <mach/regs-icu.h> 21#include <mach/regs-icu.h>
22#include <linux/platform_data/dma-mmp_tdma.h> 22#include <linux/platform_data/dma-mmp_tdma.h>
23#include <linux/of_device.h>
23 24
24#include "dmaengine.h" 25#include "dmaengine.h"
25 26
@@ -127,7 +128,6 @@ struct mmp_tdma_device {
127 void __iomem *base; 128 void __iomem *base;
128 struct dma_device device; 129 struct dma_device device;
129 struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM]; 130 struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM];
130 int irq;
131}; 131};
132 132
133#define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan) 133#define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan)
@@ -492,7 +492,7 @@ static int __devinit mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
492 return -ENOMEM; 492 return -ENOMEM;
493 } 493 }
494 if (irq) 494 if (irq)
495 tdmac->irq = irq + idx; 495 tdmac->irq = irq;
496 tdmac->dev = tdev->dev; 496 tdmac->dev = tdev->dev;
497 tdmac->chan.device = &tdev->device; 497 tdmac->chan.device = &tdev->device;
498 tdmac->idx = idx; 498 tdmac->idx = idx;
@@ -505,34 +505,43 @@ static int __devinit mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
505 /* add the channel to tdma_chan list */ 505 /* add the channel to tdma_chan list */
506 list_add_tail(&tdmac->chan.device_node, 506 list_add_tail(&tdmac->chan.device_node,
507 &tdev->device.channels); 507 &tdev->device.channels);
508
509 return 0; 508 return 0;
510} 509}
511 510
511static struct of_device_id mmp_tdma_dt_ids[] = {
512 { .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA},
513 { .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU},
514 {}
515};
516MODULE_DEVICE_TABLE(of, mmp_tdma_dt_ids);
517
512static int __devinit mmp_tdma_probe(struct platform_device *pdev) 518static int __devinit mmp_tdma_probe(struct platform_device *pdev)
513{ 519{
514 const struct platform_device_id *id = platform_get_device_id(pdev); 520 enum mmp_tdma_type type;
515 enum mmp_tdma_type type = id->driver_data; 521 const struct of_device_id *of_id;
516 struct mmp_tdma_device *tdev; 522 struct mmp_tdma_device *tdev;
517 struct resource *iores; 523 struct resource *iores;
518 int i, ret; 524 int i, ret;
519 int irq = 0; 525 int irq = 0, irq_num = 0;
520 int chan_num = TDMA_CHANNEL_NUM; 526 int chan_num = TDMA_CHANNEL_NUM;
521 527
528 of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
529 if (of_id)
530 type = (enum mmp_tdma_type) of_id->data;
531 else
532 type = platform_get_device_id(pdev)->driver_data;
533
522 /* always have couple channels */ 534 /* always have couple channels */
523 tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL); 535 tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
524 if (!tdev) 536 if (!tdev)
525 return -ENOMEM; 537 return -ENOMEM;
526 538
527 tdev->dev = &pdev->dev; 539 tdev->dev = &pdev->dev;
528 iores = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
529 if (!iores)
530 return -EINVAL;
531 540
532 if (resource_size(iores) != chan_num) 541 for (i = 0; i < chan_num; i++) {
533 tdev->irq = iores->start; 542 if (platform_get_irq(pdev, i) > 0)
534 else 543 irq_num++;
535 irq = iores->start; 544 }
536 545
537 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 546 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
538 if (!iores) 547 if (!iores)
@@ -542,25 +551,26 @@ static int __devinit mmp_tdma_probe(struct platform_device *pdev)
542 if (!tdev->base) 551 if (!tdev->base)
543 return -EADDRNOTAVAIL; 552 return -EADDRNOTAVAIL;
544 553
545 if (tdev->irq) { 554 INIT_LIST_HEAD(&tdev->device.channels);
546 ret = devm_request_irq(&pdev->dev, tdev->irq, 555
556 if (irq_num != chan_num) {
557 irq = platform_get_irq(pdev, 0);
558 ret = devm_request_irq(&pdev->dev, irq,
547 mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev); 559 mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev);
548 if (ret) 560 if (ret)
549 return ret; 561 return ret;
550 } 562 }
551 563
552 dma_cap_set(DMA_SLAVE, tdev->device.cap_mask);
553 dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask);
554
555 INIT_LIST_HEAD(&tdev->device.channels);
556
557 /* initialize channel parameters */ 564 /* initialize channel parameters */
558 for (i = 0; i < chan_num; i++) { 565 for (i = 0; i < chan_num; i++) {
566 irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i);
559 ret = mmp_tdma_chan_init(tdev, i, irq, type); 567 ret = mmp_tdma_chan_init(tdev, i, irq, type);
560 if (ret) 568 if (ret)
561 return ret; 569 return ret;
562 } 570 }
563 571
572 dma_cap_set(DMA_SLAVE, tdev->device.cap_mask);
573 dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask);
564 tdev->device.dev = &pdev->dev; 574 tdev->device.dev = &pdev->dev;
565 tdev->device.device_alloc_chan_resources = 575 tdev->device.device_alloc_chan_resources =
566 mmp_tdma_alloc_chan_resources; 576 mmp_tdma_alloc_chan_resources;
@@ -595,6 +605,7 @@ static struct platform_driver mmp_tdma_driver = {
595 .driver = { 605 .driver = {
596 .name = "mmp-tdma", 606 .name = "mmp-tdma",
597 .owner = THIS_MODULE, 607 .owner = THIS_MODULE,
608 .of_match_table = mmp_tdma_dt_ids,
598 }, 609 },
599 .id_table = mmp_tdma_id_table, 610 .id_table = mmp_tdma_id_table,
600 .probe = mmp_tdma_probe, 611 .probe = mmp_tdma_probe,
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 734a4eb84d65..9f02e794b12b 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -101,7 +101,8 @@ struct mxs_dma_ccw {
101 u32 pio_words[MXS_PIO_WORDS]; 101 u32 pio_words[MXS_PIO_WORDS];
102}; 102};
103 103
104#define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw)) 104#define CCW_BLOCK_SIZE (4 * PAGE_SIZE)
105#define NUM_CCW (int)(CCW_BLOCK_SIZE / sizeof(struct mxs_dma_ccw))
105 106
106struct mxs_dma_chan { 107struct mxs_dma_chan {
107 struct mxs_dma_engine *mxs_dma; 108 struct mxs_dma_engine *mxs_dma;
@@ -354,14 +355,15 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
354 355
355 mxs_chan->chan_irq = data->chan_irq; 356 mxs_chan->chan_irq = data->chan_irq;
356 357
357 mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, 358 mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
358 &mxs_chan->ccw_phys, GFP_KERNEL); 359 CCW_BLOCK_SIZE, &mxs_chan->ccw_phys,
360 GFP_KERNEL);
359 if (!mxs_chan->ccw) { 361 if (!mxs_chan->ccw) {
360 ret = -ENOMEM; 362 ret = -ENOMEM;
361 goto err_alloc; 363 goto err_alloc;
362 } 364 }
363 365
364 memset(mxs_chan->ccw, 0, PAGE_SIZE); 366 memset(mxs_chan->ccw, 0, CCW_BLOCK_SIZE);
365 367
366 if (mxs_chan->chan_irq != NO_IRQ) { 368 if (mxs_chan->chan_irq != NO_IRQ) {
367 ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, 369 ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
@@ -387,7 +389,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
387err_clk: 389err_clk:
388 free_irq(mxs_chan->chan_irq, mxs_dma); 390 free_irq(mxs_chan->chan_irq, mxs_dma);
389err_irq: 391err_irq:
390 dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, 392 dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
391 mxs_chan->ccw, mxs_chan->ccw_phys); 393 mxs_chan->ccw, mxs_chan->ccw_phys);
392err_alloc: 394err_alloc:
393 return ret; 395 return ret;
@@ -402,7 +404,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
402 404
403 free_irq(mxs_chan->chan_irq, mxs_dma); 405 free_irq(mxs_chan->chan_irq, mxs_dma);
404 406
405 dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, 407 dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
406 mxs_chan->ccw, mxs_chan->ccw_phys); 408 mxs_chan->ccw, mxs_chan->ccw_phys);
407 409
408 clk_disable_unprepare(mxs_dma->clk); 410 clk_disable_unprepare(mxs_dma->clk);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 169c0dbd71ae..665668b6f2b1 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -23,7 +23,6 @@
23#include <linux/dmaengine.h> 23#include <linux/dmaengine.h>
24#include <linux/amba/bus.h> 24#include <linux/amba/bus.h>
25#include <linux/amba/pl330.h> 25#include <linux/amba/pl330.h>
26#include <linux/pm_runtime.h>
27#include <linux/scatterlist.h> 26#include <linux/scatterlist.h>
28#include <linux/of.h> 27#include <linux/of.h>
29 28
@@ -586,8 +585,6 @@ struct dma_pl330_dmac {
586 585
587 /* Peripheral channels connected to this DMAC */ 586 /* Peripheral channels connected to this DMAC */
588 struct dma_pl330_chan *peripherals; /* keep at end */ 587 struct dma_pl330_chan *peripherals; /* keep at end */
589
590 struct clk *clk;
591}; 588};
592 589
593struct dma_pl330_desc { 590struct dma_pl330_desc {
@@ -2395,7 +2392,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
2395 pch->pl330_chid = pl330_request_channel(&pdmac->pif); 2392 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
2396 if (!pch->pl330_chid) { 2393 if (!pch->pl330_chid) {
2397 spin_unlock_irqrestore(&pch->lock, flags); 2394 spin_unlock_irqrestore(&pch->lock, flags);
2398 return 0; 2395 return -ENOMEM;
2399 } 2396 }
2400 2397
2401 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); 2398 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
@@ -2889,29 +2886,17 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2889 goto probe_err1; 2886 goto probe_err1;
2890 } 2887 }
2891 2888
2892 pdmac->clk = clk_get(&adev->dev, "dma");
2893 if (IS_ERR(pdmac->clk)) {
2894 dev_err(&adev->dev, "Cannot get operation clock.\n");
2895 ret = -EINVAL;
2896 goto probe_err2;
2897 }
2898
2899 amba_set_drvdata(adev, pdmac); 2889 amba_set_drvdata(adev, pdmac);
2900 2890
2901#ifndef CONFIG_PM_RUNTIME
2902 /* enable dma clk */
2903 clk_enable(pdmac->clk);
2904#endif
2905
2906 irq = adev->irq[0]; 2891 irq = adev->irq[0];
2907 ret = request_irq(irq, pl330_irq_handler, 0, 2892 ret = request_irq(irq, pl330_irq_handler, 0,
2908 dev_name(&adev->dev), pi); 2893 dev_name(&adev->dev), pi);
2909 if (ret) 2894 if (ret)
2910 goto probe_err3; 2895 goto probe_err2;
2911 2896
2912 ret = pl330_add(pi); 2897 ret = pl330_add(pi);
2913 if (ret) 2898 if (ret)
2914 goto probe_err4; 2899 goto probe_err3;
2915 2900
2916 INIT_LIST_HEAD(&pdmac->desc_pool); 2901 INIT_LIST_HEAD(&pdmac->desc_pool);
2917 spin_lock_init(&pdmac->pool_lock); 2902 spin_lock_init(&pdmac->pool_lock);
@@ -2933,7 +2918,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2933 if (!pdmac->peripherals) { 2918 if (!pdmac->peripherals) {
2934 ret = -ENOMEM; 2919 ret = -ENOMEM;
2935 dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n"); 2920 dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n");
2936 goto probe_err5; 2921 goto probe_err4;
2937 } 2922 }
2938 2923
2939 for (i = 0; i < num_chan; i++) { 2924 for (i = 0; i < num_chan; i++) {
@@ -2961,6 +2946,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2961 if (pi->pcfg.num_peri) { 2946 if (pi->pcfg.num_peri) {
2962 dma_cap_set(DMA_SLAVE, pd->cap_mask); 2947 dma_cap_set(DMA_SLAVE, pd->cap_mask);
2963 dma_cap_set(DMA_CYCLIC, pd->cap_mask); 2948 dma_cap_set(DMA_CYCLIC, pd->cap_mask);
2949 dma_cap_set(DMA_PRIVATE, pd->cap_mask);
2964 } 2950 }
2965 } 2951 }
2966 2952
@@ -2976,7 +2962,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2976 ret = dma_async_device_register(pd); 2962 ret = dma_async_device_register(pd);
2977 if (ret) { 2963 if (ret) {
2978 dev_err(&adev->dev, "unable to register DMAC\n"); 2964 dev_err(&adev->dev, "unable to register DMAC\n");
2979 goto probe_err5; 2965 goto probe_err4;
2980 } 2966 }
2981 2967
2982 dev_info(&adev->dev, 2968 dev_info(&adev->dev,
@@ -2989,15 +2975,10 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2989 2975
2990 return 0; 2976 return 0;
2991 2977
2992probe_err5:
2993 pl330_del(pi);
2994probe_err4: 2978probe_err4:
2995 free_irq(irq, pi); 2979 pl330_del(pi);
2996probe_err3: 2980probe_err3:
2997#ifndef CONFIG_PM_RUNTIME 2981 free_irq(irq, pi);
2998 clk_disable(pdmac->clk);
2999#endif
3000 clk_put(pdmac->clk);
3001probe_err2: 2982probe_err2:
3002 iounmap(pi->base); 2983 iounmap(pi->base);
3003probe_err1: 2984probe_err1:
@@ -3044,10 +3025,6 @@ static int __devexit pl330_remove(struct amba_device *adev)
3044 res = &adev->res; 3025 res = &adev->res;
3045 release_mem_region(res->start, resource_size(res)); 3026 release_mem_region(res->start, resource_size(res));
3046 3027
3047#ifndef CONFIG_PM_RUNTIME
3048 clk_disable(pdmac->clk);
3049#endif
3050
3051 kfree(pdmac); 3028 kfree(pdmac);
3052 3029
3053 return 0; 3030 return 0;
@@ -3063,49 +3040,10 @@ static struct amba_id pl330_ids[] = {
3063 3040
3064MODULE_DEVICE_TABLE(amba, pl330_ids); 3041MODULE_DEVICE_TABLE(amba, pl330_ids);
3065 3042
3066#ifdef CONFIG_PM_RUNTIME
3067static int pl330_runtime_suspend(struct device *dev)
3068{
3069 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
3070
3071 if (!pdmac) {
3072 dev_err(dev, "failed to get dmac\n");
3073 return -ENODEV;
3074 }
3075
3076 clk_disable(pdmac->clk);
3077
3078 return 0;
3079}
3080
3081static int pl330_runtime_resume(struct device *dev)
3082{
3083 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
3084
3085 if (!pdmac) {
3086 dev_err(dev, "failed to get dmac\n");
3087 return -ENODEV;
3088 }
3089
3090 clk_enable(pdmac->clk);
3091
3092 return 0;
3093}
3094#else
3095#define pl330_runtime_suspend NULL
3096#define pl330_runtime_resume NULL
3097#endif /* CONFIG_PM_RUNTIME */
3098
3099static const struct dev_pm_ops pl330_pm_ops = {
3100 .runtime_suspend = pl330_runtime_suspend,
3101 .runtime_resume = pl330_runtime_resume,
3102};
3103
3104static struct amba_driver pl330_driver = { 3043static struct amba_driver pl330_driver = {
3105 .drv = { 3044 .drv = {
3106 .owner = THIS_MODULE, 3045 .owner = THIS_MODULE,
3107 .name = "dma-pl330", 3046 .name = "dma-pl330",
3108 .pm = &pl330_pm_ops,
3109 }, 3047 },
3110 .id_table = pl330_ids, 3048 .id_table = pl330_ids,
3111 .probe = pl330_probe, 3049 .probe = pl330_probe,
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 3eed8b35b0f1..d451caace806 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -109,7 +109,7 @@ static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
109 sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc, 109 sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
110 node); 110 node);
111 /* Move the first queued descriptor to active list */ 111 /* Move the first queued descriptor to active list */
112 list_move_tail(&schan->queued, &schan->active); 112 list_move_tail(&sdesc->node, &schan->active);
113 113
114 /* Start the DMA transfer */ 114 /* Start the DMA transfer */
115 writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 + 115 writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
@@ -428,7 +428,7 @@ static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
428 unsigned long iflags; 428 unsigned long iflags;
429 int ret; 429 int ret;
430 430
431 if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) { 431 if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) {
432 ret = -EINVAL; 432 ret = -EINVAL;
433 goto err_dir; 433 goto err_dir;
434 } 434 }
@@ -570,21 +570,19 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op)
570 570
571 if (of_property_read_u32(dn, "cell-index", &id)) { 571 if (of_property_read_u32(dn, "cell-index", &id)) {
572 dev_err(dev, "Fail to get DMAC index\n"); 572 dev_err(dev, "Fail to get DMAC index\n");
573 ret = -ENODEV; 573 return -ENODEV;
574 goto free_mem;
575 } 574 }
576 575
577 sdma->irq = irq_of_parse_and_map(dn, 0); 576 sdma->irq = irq_of_parse_and_map(dn, 0);
578 if (sdma->irq == NO_IRQ) { 577 if (sdma->irq == NO_IRQ) {
579 dev_err(dev, "Error mapping IRQ!\n"); 578 dev_err(dev, "Error mapping IRQ!\n");
580 ret = -EINVAL; 579 return -EINVAL;
581 goto free_mem;
582 } 580 }
583 581
584 ret = of_address_to_resource(dn, 0, &res); 582 ret = of_address_to_resource(dn, 0, &res);
585 if (ret) { 583 if (ret) {
586 dev_err(dev, "Error parsing memory region!\n"); 584 dev_err(dev, "Error parsing memory region!\n");
587 goto free_mem; 585 goto irq_dispose;
588 } 586 }
589 587
590 regs_start = res.start; 588 regs_start = res.start;
@@ -597,12 +595,11 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op)
597 goto irq_dispose; 595 goto irq_dispose;
598 } 596 }
599 597
600 ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, 598 ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma);
601 sdma);
602 if (ret) { 599 if (ret) {
603 dev_err(dev, "Error requesting IRQ!\n"); 600 dev_err(dev, "Error requesting IRQ!\n");
604 ret = -EINVAL; 601 ret = -EINVAL;
605 goto unmap_mem; 602 goto irq_dispose;
606 } 603 }
607 604
608 dma = &sdma->dma; 605 dma = &sdma->dma;
@@ -652,13 +649,9 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op)
652 return 0; 649 return 0;
653 650
654free_irq: 651free_irq:
655 devm_free_irq(dev, sdma->irq, sdma); 652 free_irq(sdma->irq, sdma);
656irq_dispose: 653irq_dispose:
657 irq_dispose_mapping(sdma->irq); 654 irq_dispose_mapping(sdma->irq);
658unmap_mem:
659 iounmap(sdma->base);
660free_mem:
661 devm_kfree(dev, sdma);
662 return ret; 655 return ret;
663} 656}
664 657
@@ -668,10 +661,8 @@ static int __devexit sirfsoc_dma_remove(struct platform_device *op)
668 struct sirfsoc_dma *sdma = dev_get_drvdata(dev); 661 struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
669 662
670 dma_async_device_unregister(&sdma->dma); 663 dma_async_device_unregister(&sdma->dma);
671 devm_free_irq(dev, sdma->irq, sdma); 664 free_irq(sdma->irq, sdma);
672 irq_dispose_mapping(sdma->irq); 665 irq_dispose_mapping(sdma->irq);
673 iounmap(sdma->base);
674 devm_kfree(dev, sdma);
675 return 0; 666 return 0;
676} 667}
677 668
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index eee8d9b9a20b..ae55091c2272 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2921,19 +2921,23 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2921 struct d40_base *base = NULL; 2921 struct d40_base *base = NULL;
2922 int num_log_chans = 0; 2922 int num_log_chans = 0;
2923 int num_phy_chans; 2923 int num_phy_chans;
2924 int clk_ret = -EINVAL;
2924 int i; 2925 int i;
2925 u32 pid; 2926 u32 pid;
2926 u32 cid; 2927 u32 cid;
2927 u8 rev; 2928 u8 rev;
2928 2929
2929 clk = clk_get(&pdev->dev, NULL); 2930 clk = clk_get(&pdev->dev, NULL);
2930
2931 if (IS_ERR(clk)) { 2931 if (IS_ERR(clk)) {
2932 d40_err(&pdev->dev, "No matching clock found\n"); 2932 d40_err(&pdev->dev, "No matching clock found\n");
2933 goto failure; 2933 goto failure;
2934 } 2934 }
2935 2935
2936 clk_enable(clk); 2936 clk_ret = clk_prepare_enable(clk);
2937 if (clk_ret) {
2938 d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
2939 goto failure;
2940 }
2937 2941
2938 /* Get IO for DMAC base address */ 2942 /* Get IO for DMAC base address */
2939 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); 2943 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
@@ -3063,10 +3067,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3063 return base; 3067 return base;
3064 3068
3065failure: 3069failure:
3066 if (!IS_ERR(clk)) { 3070 if (!clk_ret)
3067 clk_disable(clk); 3071 clk_disable_unprepare(clk);
3072 if (!IS_ERR(clk))
3068 clk_put(clk); 3073 clk_put(clk);
3069 }
3070 if (virtbase) 3074 if (virtbase)
3071 iounmap(virtbase); 3075 iounmap(virtbase);
3072 if (res) 3076 if (res)
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 45fbeed1c1a5..528c62dd4b00 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -169,6 +169,7 @@ typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
169/* tegra_dma_channel: Channel specific information */ 169/* tegra_dma_channel: Channel specific information */
170struct tegra_dma_channel { 170struct tegra_dma_channel {
171 struct dma_chan dma_chan; 171 struct dma_chan dma_chan;
172 char name[30];
172 bool config_init; 173 bool config_init;
173 int id; 174 int id;
174 int irq; 175 int irq;
@@ -475,8 +476,7 @@ static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
475 while (!list_empty(&tdc->pending_sg_req)) { 476 while (!list_empty(&tdc->pending_sg_req)) {
476 sgreq = list_first_entry(&tdc->pending_sg_req, 477 sgreq = list_first_entry(&tdc->pending_sg_req,
477 typeof(*sgreq), node); 478 typeof(*sgreq), node);
478 list_del(&sgreq->node); 479 list_move_tail(&sgreq->node, &tdc->free_sg_req);
479 list_add_tail(&sgreq->node, &tdc->free_sg_req);
480 if (sgreq->last_sg) { 480 if (sgreq->last_sg) {
481 dma_desc = sgreq->dma_desc; 481 dma_desc = sgreq->dma_desc;
482 dma_desc->dma_status = DMA_ERROR; 482 dma_desc->dma_status = DMA_ERROR;
@@ -570,8 +570,7 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
570 570
571 /* If not last req then put at end of pending list */ 571 /* If not last req then put at end of pending list */
572 if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) { 572 if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
573 list_del(&sgreq->node); 573 list_move_tail(&sgreq->node, &tdc->pending_sg_req);
574 list_add_tail(&sgreq->node, &tdc->pending_sg_req);
575 sgreq->configured = false; 574 sgreq->configured = false;
576 st = handle_continuous_head_request(tdc, sgreq, to_terminate); 575 st = handle_continuous_head_request(tdc, sgreq, to_terminate);
577 if (!st) 576 if (!st)
@@ -1284,7 +1283,6 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev)
1284 INIT_LIST_HEAD(&tdma->dma_dev.channels); 1283 INIT_LIST_HEAD(&tdma->dma_dev.channels);
1285 for (i = 0; i < cdata->nr_channels; i++) { 1284 for (i = 0; i < cdata->nr_channels; i++) {
1286 struct tegra_dma_channel *tdc = &tdma->channels[i]; 1285 struct tegra_dma_channel *tdc = &tdma->channels[i];
1287 char irq_name[30];
1288 1286
1289 tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + 1287 tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
1290 i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE; 1288 i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE;
@@ -1296,9 +1294,9 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev)
1296 goto err_irq; 1294 goto err_irq;
1297 } 1295 }
1298 tdc->irq = res->start; 1296 tdc->irq = res->start;
1299 snprintf(irq_name, sizeof(irq_name), "apbdma.%d", i); 1297 snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
1300 ret = devm_request_irq(&pdev->dev, tdc->irq, 1298 ret = devm_request_irq(&pdev->dev, tdc->irq,
1301 tegra_dma_isr, 0, irq_name, tdc); 1299 tegra_dma_isr, 0, tdc->name, tdc);
1302 if (ret) { 1300 if (ret) {
1303 dev_err(&pdev->dev, 1301 dev_err(&pdev->dev,
1304 "request_irq failed with err %d channel %d\n", 1302 "request_irq failed with err %d channel %d\n",
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 5a297a26211d..cc8e7c78a23c 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -170,8 +170,11 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
170 * memory controller and apply to register. Search for the first 170 * memory controller and apply to register. Search for the first
171 * bandwidth entry that is greater or equal than the setting requested 171 * bandwidth entry that is greater or equal than the setting requested
172 * and program that. If at last entry, turn off DRAM scrubbing. 172 * and program that. If at last entry, turn off DRAM scrubbing.
173 *
174 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
175 * by falling back to the last element in scrubrates[].
173 */ 176 */
174 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { 177 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
175 /* 178 /*
176 * skip scrub rates which aren't recommended 179 * skip scrub rates which aren't recommended
177 * (see F10 BKDG, F3x58) 180 * (see F10 BKDG, F3x58)
@@ -181,12 +184,6 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
181 184
182 if (scrubrates[i].bandwidth <= new_bw) 185 if (scrubrates[i].bandwidth <= new_bw)
183 break; 186 break;
184
185 /*
186 * if no suitable bandwidth found, turn off DRAM scrubbing
187 * entirely by falling back to the last element in the
188 * scrubrates array.
189 */
190 } 187 }
191 188
192 scrubval = scrubrates[i].scrubval; 189 scrubval = scrubrates[i].scrubval;
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index 725eb5aa8d8c..e87196f6d2d2 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -14,6 +14,7 @@
14 * 14 *
15 */ 15 */
16 16
17#include <linux/module.h>
17#include <linux/slab.h> 18#include <linux/slab.h>
18#include <linux/device.h> 19#include <linux/device.h>
19#include <linux/platform_device.h> 20#include <linux/platform_device.h>
@@ -161,13 +162,12 @@ static int __devinit adc_jack_probe(struct platform_device *pdev)
161 err = request_any_context_irq(data->irq, adc_jack_irq_thread, 162 err = request_any_context_irq(data->irq, adc_jack_irq_thread,
162 pdata->irq_flags, pdata->name, data); 163 pdata->irq_flags, pdata->name, data);
163 164
164 if (err) { 165 if (err < 0) {
165 dev_err(&pdev->dev, "error: irq %d\n", data->irq); 166 dev_err(&pdev->dev, "error: irq %d\n", data->irq);
166 err = -EINVAL;
167 goto err_irq; 167 goto err_irq;
168 } 168 }
169 169
170 goto out; 170 return 0;
171 171
172err_irq: 172err_irq:
173 extcon_dev_unregister(&data->edev); 173 extcon_dev_unregister(&data->edev);
@@ -196,3 +196,7 @@ static struct platform_driver adc_jack_driver = {
196}; 196};
197 197
198module_platform_driver(adc_jack_driver); 198module_platform_driver(adc_jack_driver);
199
200MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
201MODULE_DESCRIPTION("ADC Jack extcon driver");
202MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c
index 946a3188b2b7..d398821097f3 100644
--- a/drivers/extcon/extcon-class.c
+++ b/drivers/extcon/extcon-class.c
@@ -41,7 +41,7 @@
41 * every single port-type of the following cable names. Please choose cable 41 * every single port-type of the following cable names. Please choose cable
42 * names that are actually used in your extcon device. 42 * names that are actually used in your extcon device.
43 */ 43 */
44const char *extcon_cable_name[] = { 44const char extcon_cable_name[][CABLE_NAME_MAX + 1] = {
45 [EXTCON_USB] = "USB", 45 [EXTCON_USB] = "USB",
46 [EXTCON_USB_HOST] = "USB-Host", 46 [EXTCON_USB_HOST] = "USB-Host",
47 [EXTCON_TA] = "TA", 47 [EXTCON_TA] = "TA",
@@ -62,8 +62,6 @@ const char *extcon_cable_name[] = {
62 [EXTCON_VIDEO_IN] = "Video-in", 62 [EXTCON_VIDEO_IN] = "Video-in",
63 [EXTCON_VIDEO_OUT] = "Video-out", 63 [EXTCON_VIDEO_OUT] = "Video-out",
64 [EXTCON_MECHANICAL] = "Mechanical", 64 [EXTCON_MECHANICAL] = "Mechanical",
65
66 NULL,
67}; 65};
68 66
69static struct class *extcon_class; 67static struct class *extcon_class;
@@ -91,17 +89,13 @@ static int check_mutually_exclusive(struct extcon_dev *edev, u32 new_state)
91 return 0; 89 return 0;
92 90
93 for (i = 0; edev->mutually_exclusive[i]; i++) { 91 for (i = 0; edev->mutually_exclusive[i]; i++) {
94 int count = 0, j; 92 int weight;
95 u32 correspondants = new_state & edev->mutually_exclusive[i]; 93 u32 correspondants = new_state & edev->mutually_exclusive[i];
96 u32 exp = 1; 94
97 95 /* calculate the total number of bits set */
98 for (j = 0; j < 32; j++) { 96 weight = hweight32(correspondants);
99 if (exp & correspondants) 97 if (weight > 1)
100 count++; 98 return i + 1;
101 if (count > 1)
102 return i + 1;
103 exp <<= 1;
104 }
105 } 99 }
106 100
107 return 0; 101 return 0;
@@ -362,7 +356,7 @@ int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name)
362EXPORT_SYMBOL_GPL(extcon_get_cable_state); 356EXPORT_SYMBOL_GPL(extcon_get_cable_state);
363 357
364/** 358/**
365 * extcon_get_cable_state_() - Set the status of a specific cable. 359 * extcon_set_cable_state_() - Set the status of a specific cable.
366 * @edev: the extcon device that has the cable. 360 * @edev: the extcon device that has the cable.
367 * @index: cable index that can be retrieved by extcon_find_cable_index(). 361 * @index: cable index that can be retrieved by extcon_find_cable_index().
368 * @cable_state: the new cable status. The default semantics is 362 * @cable_state: the new cable status. The default semantics is
@@ -382,7 +376,7 @@ int extcon_set_cable_state_(struct extcon_dev *edev,
382EXPORT_SYMBOL_GPL(extcon_set_cable_state_); 376EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
383 377
384/** 378/**
385 * extcon_get_cable_state() - Set the status of a specific cable. 379 * extcon_set_cable_state() - Set the status of a specific cable.
386 * @edev: the extcon device that has the cable. 380 * @edev: the extcon device that has the cable.
387 * @cable_name: cable name. 381 * @cable_name: cable name.
388 * @cable_state: the new cable status. The default semantics is 382 * @cable_state: the new cable status. The default semantics is
@@ -447,6 +441,8 @@ static int _call_per_cable(struct notifier_block *nb, unsigned long val,
447 * extcon device. 441 * extcon device.
448 * @obj: an empty extcon_specific_cable_nb object to be returned. 442 * @obj: an empty extcon_specific_cable_nb object to be returned.
449 * @extcon_name: the name of extcon device. 443 * @extcon_name: the name of extcon device.
444 * if NULL, extcon_register_interest will register
445 * every cable with the target cable_name given.
450 * @cable_name: the target cable name. 446 * @cable_name: the target cable name.
451 * @nb: the notifier block to get notified. 447 * @nb: the notifier block to get notified.
452 * 448 *
@@ -466,22 +462,44 @@ int extcon_register_interest(struct extcon_specific_cable_nb *obj,
466 const char *extcon_name, const char *cable_name, 462 const char *extcon_name, const char *cable_name,
467 struct notifier_block *nb) 463 struct notifier_block *nb)
468{ 464{
469 if (!obj || !extcon_name || !cable_name || !nb) 465 if (!obj || !cable_name || !nb)
470 return -EINVAL; 466 return -EINVAL;
471 467
472 obj->edev = extcon_get_extcon_dev(extcon_name); 468 if (extcon_name) {
473 if (!obj->edev) 469 obj->edev = extcon_get_extcon_dev(extcon_name);
474 return -ENODEV; 470 if (!obj->edev)
471 return -ENODEV;
475 472
476 obj->cable_index = extcon_find_cable_index(obj->edev, cable_name); 473 obj->cable_index = extcon_find_cable_index(obj->edev, cable_name);
477 if (obj->cable_index < 0) 474 if (obj->cable_index < 0)
478 return -ENODEV; 475 return -ENODEV;
476
477 obj->user_nb = nb;
479 478
480 obj->user_nb = nb; 479 obj->internal_nb.notifier_call = _call_per_cable;
481 480
482 obj->internal_nb.notifier_call = _call_per_cable; 481 return raw_notifier_chain_register(&obj->edev->nh, &obj->internal_nb);
482 } else {
483 struct class_dev_iter iter;
484 struct extcon_dev *extd;
485 struct device *dev;
486
487 if (!extcon_class)
488 return -ENODEV;
489 class_dev_iter_init(&iter, extcon_class, NULL, NULL);
490 while ((dev = class_dev_iter_next(&iter))) {
491 extd = (struct extcon_dev *)dev_get_drvdata(dev);
492
493 if (extcon_find_cable_index(extd, cable_name) < 0)
494 continue;
495
496 class_dev_iter_exit(&iter);
497 return extcon_register_interest(obj, extd->name,
498 cable_name, nb);
499 }
483 500
484 return raw_notifier_chain_register(&obj->edev->nh, &obj->internal_nb); 501 return -ENODEV;
502 }
485} 503}
486 504
487/** 505/**
@@ -551,43 +569,9 @@ static int create_extcon_class(void)
551 return 0; 569 return 0;
552} 570}
553 571
554static void extcon_cleanup(struct extcon_dev *edev, bool skip)
555{
556 mutex_lock(&extcon_dev_list_lock);
557 list_del(&edev->entry);
558 mutex_unlock(&extcon_dev_list_lock);
559
560 if (!skip && get_device(edev->dev)) {
561 int index;
562
563 if (edev->mutually_exclusive && edev->max_supported) {
564 for (index = 0; edev->mutually_exclusive[index];
565 index++)
566 kfree(edev->d_attrs_muex[index].attr.name);
567 kfree(edev->d_attrs_muex);
568 kfree(edev->attrs_muex);
569 }
570
571 for (index = 0; index < edev->max_supported; index++)
572 kfree(edev->cables[index].attr_g.name);
573
574 if (edev->max_supported) {
575 kfree(edev->extcon_dev_type.groups);
576 kfree(edev->cables);
577 }
578
579 device_unregister(edev->dev);
580 put_device(edev->dev);
581 }
582
583 kfree(edev->dev);
584}
585
586static void extcon_dev_release(struct device *dev) 572static void extcon_dev_release(struct device *dev)
587{ 573{
588 struct extcon_dev *edev = (struct extcon_dev *) dev_get_drvdata(dev); 574 kfree(dev);
589
590 extcon_cleanup(edev, true);
591} 575}
592 576
593static const char *muex_name = "mutually_exclusive"; 577static const char *muex_name = "mutually_exclusive";
@@ -813,7 +797,40 @@ EXPORT_SYMBOL_GPL(extcon_dev_register);
813 */ 797 */
814void extcon_dev_unregister(struct extcon_dev *edev) 798void extcon_dev_unregister(struct extcon_dev *edev)
815{ 799{
816 extcon_cleanup(edev, false); 800 int index;
801
802 mutex_lock(&extcon_dev_list_lock);
803 list_del(&edev->entry);
804 mutex_unlock(&extcon_dev_list_lock);
805
806 if (IS_ERR_OR_NULL(get_device(edev->dev))) {
807 dev_err(edev->dev, "Failed to unregister extcon_dev (%s)\n",
808 dev_name(edev->dev));
809 return;
810 }
811
812 if (edev->mutually_exclusive && edev->max_supported) {
813 for (index = 0; edev->mutually_exclusive[index];
814 index++)
815 kfree(edev->d_attrs_muex[index].attr.name);
816 kfree(edev->d_attrs_muex);
817 kfree(edev->attrs_muex);
818 }
819
820 for (index = 0; index < edev->max_supported; index++)
821 kfree(edev->cables[index].attr_g.name);
822
823 if (edev->max_supported) {
824 kfree(edev->extcon_dev_type.groups);
825 kfree(edev->cables);
826 }
827
828#if defined(CONFIG_ANDROID)
829 if (switch_class)
830 class_compat_remove_link(switch_class, edev->dev, NULL);
831#endif
832 device_unregister(edev->dev);
833 put_device(edev->dev);
817} 834}
818EXPORT_SYMBOL_GPL(extcon_dev_unregister); 835EXPORT_SYMBOL_GPL(extcon_dev_unregister);
819 836
@@ -825,6 +842,9 @@ module_init(extcon_class_init);
825 842
826static void __exit extcon_class_exit(void) 843static void __exit extcon_class_exit(void)
827{ 844{
845#if defined(CONFIG_ANDROID)
846 class_compat_unregister(switch_class);
847#endif
828 class_destroy(extcon_class); 848 class_destroy(extcon_class);
829} 849}
830module_exit(extcon_class_exit); 850module_exit(extcon_class_exit);
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index 3cc152e690b0..71d3ab7b3d8d 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -26,7 +26,6 @@
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/extcon.h>
30#include <linux/workqueue.h> 29#include <linux/workqueue.h>
31#include <linux/gpio.h> 30#include <linux/gpio.h>
32#include <linux/extcon.h> 31#include <linux/extcon.h>
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index e21387e2da5c..a17d0d91ada2 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -239,25 +239,19 @@ const char *max77693_extcon_cable[] = {
239static int max77693_muic_set_debounce_time(struct max77693_muic_info *info, 239static int max77693_muic_set_debounce_time(struct max77693_muic_info *info,
240 enum max77693_muic_adc_debounce_time time) 240 enum max77693_muic_adc_debounce_time time)
241{ 241{
242 int ret = 0; 242 int ret;
243 u8 ctrl3;
244 243
245 switch (time) { 244 switch (time) {
246 case ADC_DEBOUNCE_TIME_5MS: 245 case ADC_DEBOUNCE_TIME_5MS:
247 case ADC_DEBOUNCE_TIME_10MS: 246 case ADC_DEBOUNCE_TIME_10MS:
248 case ADC_DEBOUNCE_TIME_25MS: 247 case ADC_DEBOUNCE_TIME_25MS:
249 case ADC_DEBOUNCE_TIME_38_62MS: 248 case ADC_DEBOUNCE_TIME_38_62MS:
250 ret = max77693_read_reg(info->max77693->regmap_muic, 249 ret = max77693_update_reg(info->max77693->regmap_muic,
251 MAX77693_MUIC_REG_CTRL3, &ctrl3); 250 MAX77693_MUIC_REG_CTRL3,
252 ctrl3 &= ~CONTROL3_ADCDBSET_MASK; 251 time << CONTROL3_ADCDBSET_SHIFT,
253 ctrl3 |= (time << CONTROL3_ADCDBSET_SHIFT); 252 CONTROL3_ADCDBSET_MASK);
254 253 if (ret)
255 ret = max77693_write_reg(info->max77693->regmap_muic,
256 MAX77693_MUIC_REG_CTRL3, ctrl3);
257 if (ret) {
258 dev_err(info->dev, "failed to set ADC debounce time\n"); 254 dev_err(info->dev, "failed to set ADC debounce time\n");
259 ret = -EINVAL;
260 }
261 break; 255 break;
262 default: 256 default:
263 dev_err(info->dev, "invalid ADC debounce time\n"); 257 dev_err(info->dev, "invalid ADC debounce time\n");
@@ -657,6 +651,8 @@ out:
657static int __devinit max77693_muic_probe(struct platform_device *pdev) 651static int __devinit max77693_muic_probe(struct platform_device *pdev)
658{ 652{
659 struct max77693_dev *max77693 = dev_get_drvdata(pdev->dev.parent); 653 struct max77693_dev *max77693 = dev_get_drvdata(pdev->dev.parent);
654 struct max77693_platform_data *pdata = dev_get_platdata(max77693->dev);
655 struct max77693_muic_platform_data *muic_pdata = pdata->muic_data;
660 struct max77693_muic_info *info; 656 struct max77693_muic_info *info;
661 int ret, i; 657 int ret, i;
662 u8 id; 658 u8 id;
@@ -727,6 +723,31 @@ static int __devinit max77693_muic_probe(struct platform_device *pdev)
727 goto err_extcon; 723 goto err_extcon;
728 } 724 }
729 725
726 /* Initialize MUIC register by using platform data */
727 for (i = 0 ; i < muic_pdata->num_init_data ; i++) {
728 enum max77693_irq_source irq_src = MAX77693_IRQ_GROUP_NR;
729
730 max77693_write_reg(info->max77693->regmap_muic,
731 muic_pdata->init_data[i].addr,
732 muic_pdata->init_data[i].data);
733
734 switch (muic_pdata->init_data[i].addr) {
735 case MAX77693_MUIC_REG_INTMASK1:
736 irq_src = MUIC_INT1;
737 break;
738 case MAX77693_MUIC_REG_INTMASK2:
739 irq_src = MUIC_INT2;
740 break;
741 case MAX77693_MUIC_REG_INTMASK3:
742 irq_src = MUIC_INT3;
743 break;
744 }
745
746 if (irq_src < MAX77693_IRQ_GROUP_NR)
747 info->max77693->irq_masks_cur[irq_src]
748 = muic_pdata->init_data[i].data;
749 }
750
730 /* Check revision number of MUIC device*/ 751 /* Check revision number of MUIC device*/
731 ret = max77693_read_reg(info->max77693->regmap_muic, 752 ret = max77693_read_reg(info->max77693->regmap_muic,
732 MAX77693_MUIC_REG_ID, &id); 753 MAX77693_MUIC_REG_ID, &id);
@@ -762,6 +783,7 @@ static int __devexit max77693_muic_remove(struct platform_device *pdev)
762 free_irq(muic_irqs[i].virq, info); 783 free_irq(muic_irqs[i].virq, info);
763 cancel_work_sync(&info->irq_work); 784 cancel_work_sync(&info->irq_work);
764 extcon_dev_unregister(info->edev); 785 extcon_dev_unregister(info->edev);
786 kfree(info->edev);
765 kfree(info); 787 kfree(info);
766 788
767 return 0; 789 return 0;
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index ef9090a4271d..77b66b0cc8f5 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -271,8 +271,6 @@ out:
271static int max8997_muic_handle_charger_type_detach( 271static int max8997_muic_handle_charger_type_detach(
272 struct max8997_muic_info *info) 272 struct max8997_muic_info *info)
273{ 273{
274 int ret = 0;
275
276 switch (info->pre_charger_type) { 274 switch (info->pre_charger_type) {
277 case MAX8997_CHARGER_TYPE_USB: 275 case MAX8997_CHARGER_TYPE_USB:
278 extcon_set_cable_state(info->edev, "USB", false); 276 extcon_set_cable_state(info->edev, "USB", false);
@@ -290,11 +288,11 @@ static int max8997_muic_handle_charger_type_detach(
290 extcon_set_cable_state(info->edev, "Fast-charger", false); 288 extcon_set_cable_state(info->edev, "Fast-charger", false);
291 break; 289 break;
292 default: 290 default:
293 ret = -EINVAL; 291 return -EINVAL;
294 break; 292 break;
295 } 293 }
296 294
297 return ret; 295 return 0;
298} 296}
299 297
300static int max8997_muic_handle_charger_type(struct max8997_muic_info *info, 298static int max8997_muic_handle_charger_type(struct max8997_muic_info *info,
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 2783f69dada6..f8d22872d753 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -473,8 +473,8 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
473 client->bus_reset_closure = a->bus_reset_closure; 473 client->bus_reset_closure = a->bus_reset_closure;
474 if (a->bus_reset != 0) { 474 if (a->bus_reset != 0) {
475 fill_bus_reset_event(&bus_reset, client); 475 fill_bus_reset_event(&bus_reset, client);
476 ret = copy_to_user(u64_to_uptr(a->bus_reset), 476 /* unaligned size of bus_reset is 36 bytes */
477 &bus_reset, sizeof(bus_reset)); 477 ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
478 } 478 }
479 if (ret == 0 && list_empty(&client->link)) 479 if (ret == 0 && list_empty(&client->link))
480 list_add_tail(&client->link, &client->device->client_list); 480 list_add_tail(&client->link, &client->device->client_list);
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index c1cdc9236666..90723e65b081 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -237,7 +237,7 @@ static ssize_t memmap_attr_show(struct kobject *kobj,
237 * firmware_map_add() or firmware_map_add_early() afterwards, the entries 237 * firmware_map_add() or firmware_map_add_early() afterwards, the entries
238 * are not added to sysfs. 238 * are not added to sysfs.
239 */ 239 */
240static int __init memmap_init(void) 240static int __init firmware_memmap_init(void)
241{ 241{
242 struct firmware_map_entry *entry; 242 struct firmware_map_entry *entry;
243 243
@@ -246,5 +246,5 @@ static int __init memmap_init(void)
246 246
247 return 0; 247 return 0;
248} 248}
249late_initcall(memmap_init); 249late_initcall(firmware_memmap_init);
250 250
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index ed3e55161bdc..f05e54258ffb 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -153,7 +153,7 @@ static int __devinit gen_74x164_probe(struct spi_device *spi)
153 } 153 }
154 154
155 chip->gpio_chip.ngpio = GEN_74X164_NUMBER_GPIOS * chip->registers; 155 chip->gpio_chip.ngpio = GEN_74X164_NUMBER_GPIOS * chip->registers;
156 chip->buffer = devm_kzalloc(&spi->dev, chip->gpio_chip.ngpio, GFP_KERNEL); 156 chip->buffer = devm_kzalloc(&spi->dev, chip->registers, GFP_KERNEL);
157 if (!chip->buffer) { 157 if (!chip->buffer) {
158 ret = -ENOMEM; 158 ret = -ENOMEM;
159 goto exit_destroy; 159 goto exit_destroy;
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 902af437eaf2..cf7afb9eb61a 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -244,6 +244,8 @@ static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned pin,
244 if (ret) 244 if (ret)
245 return ret; 245 return ret;
246 246
247 mvebu_gpio_set(chip, pin, value);
248
247 spin_lock_irqsave(&mvchip->lock, flags); 249 spin_lock_irqsave(&mvchip->lock, flags);
248 u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip)); 250 u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip));
249 u &= ~(1 << pin); 251 u &= ~(1 << pin);
@@ -381,11 +383,13 @@ static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type)
381 u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip)); 383 u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
382 u &= ~(1 << pin); 384 u &= ~(1 << pin);
383 writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip)); 385 writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip));
386 break;
384 case IRQ_TYPE_EDGE_FALLING: 387 case IRQ_TYPE_EDGE_FALLING:
385 case IRQ_TYPE_LEVEL_LOW: 388 case IRQ_TYPE_LEVEL_LOW:
386 u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip)); 389 u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
387 u |= 1 << pin; 390 u |= 1 << pin;
388 writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip)); 391 writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip));
392 break;
389 case IRQ_TYPE_EDGE_BOTH: { 393 case IRQ_TYPE_EDGE_BOTH: {
390 u32 v; 394 u32 v;
391 395
@@ -401,6 +405,7 @@ static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type)
401 else 405 else
402 u &= ~(1 << pin); /* rising */ 406 u &= ~(1 << pin); /* rising */
403 writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip)); 407 writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip));
408 break;
404 } 409 }
405 } 410 }
406 return 0; 411 return 0;
@@ -641,7 +646,7 @@ static int __devinit mvebu_gpio_probe(struct platform_device *pdev)
641 ct->handler = handle_edge_irq; 646 ct->handler = handle_edge_irq;
642 ct->chip.name = mvchip->chip.label; 647 ct->chip.name = mvchip->chip.label;
643 648
644 irq_setup_generic_chip(gc, IRQ_MSK(ngpios), IRQ_GC_INIT_MASK_CACHE, 649 irq_setup_generic_chip(gc, IRQ_MSK(ngpios), 0,
645 IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE); 650 IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
646 651
647 /* Setup irq domain on top of the generic chip. */ 652 /* Setup irq domain on top of the generic chip. */
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 94cbc842fbc3..d335af1d4d85 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -251,6 +251,40 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
251 } 251 }
252} 252}
253 253
254/**
255 * _clear_gpio_debounce - clear debounce settings for a gpio
256 * @bank: the gpio bank we're acting upon
257 * @gpio: the gpio number on this @gpio
258 *
259 * If a gpio is using debounce, then clear the debounce enable bit and if
260 * this is the only gpio in this bank using debounce, then clear the debounce
261 * time too. The debounce clock will also be disabled when calling this function
262 * if this is the only gpio in the bank using debounce.
263 */
264static void _clear_gpio_debounce(struct gpio_bank *bank, unsigned gpio)
265{
266 u32 gpio_bit = GPIO_BIT(bank, gpio);
267
268 if (!bank->dbck_flag)
269 return;
270
271 if (!(bank->dbck_enable_mask & gpio_bit))
272 return;
273
274 bank->dbck_enable_mask &= ~gpio_bit;
275 bank->context.debounce_en &= ~gpio_bit;
276 __raw_writel(bank->context.debounce_en,
277 bank->base + bank->regs->debounce_en);
278
279 if (!bank->dbck_enable_mask) {
280 bank->context.debounce = 0;
281 __raw_writel(bank->context.debounce, bank->base +
282 bank->regs->debounce);
283 clk_disable(bank->dbck);
284 bank->dbck_enabled = false;
285 }
286}
287
254static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio, 288static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
255 unsigned trigger) 289 unsigned trigger)
256{ 290{
@@ -539,6 +573,7 @@ static void _reset_gpio(struct gpio_bank *bank, int gpio)
539 _set_gpio_irqenable(bank, gpio, 0); 573 _set_gpio_irqenable(bank, gpio, 0);
540 _clear_gpio_irqstatus(bank, gpio); 574 _clear_gpio_irqstatus(bank, gpio);
541 _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE); 575 _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
576 _clear_gpio_debounce(bank, gpio);
542} 577}
543 578
544/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */ 579/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index 031c6adf5b65..1a3e2b9b4772 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -116,7 +116,7 @@ static void timbgpio_irq_disable(struct irq_data *d)
116 unsigned long flags; 116 unsigned long flags;
117 117
118 spin_lock_irqsave(&tgpio->lock, flags); 118 spin_lock_irqsave(&tgpio->lock, flags);
119 tgpio->last_ier &= ~(1 << offset); 119 tgpio->last_ier &= ~(1UL << offset);
120 iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER); 120 iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
121 spin_unlock_irqrestore(&tgpio->lock, flags); 121 spin_unlock_irqrestore(&tgpio->lock, flags);
122} 122}
@@ -128,7 +128,7 @@ static void timbgpio_irq_enable(struct irq_data *d)
128 unsigned long flags; 128 unsigned long flags;
129 129
130 spin_lock_irqsave(&tgpio->lock, flags); 130 spin_lock_irqsave(&tgpio->lock, flags);
131 tgpio->last_ier |= 1 << offset; 131 tgpio->last_ier |= 1UL << offset;
132 iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER); 132 iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
133 spin_unlock_irqrestore(&tgpio->lock, flags); 133 spin_unlock_irqrestore(&tgpio->lock, flags);
134} 134}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 5d6c71edc739..1c8d9e3380e1 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -623,9 +623,11 @@ static ssize_t export_store(struct class *class,
623 */ 623 */
624 624
625 status = gpio_request(gpio, "sysfs"); 625 status = gpio_request(gpio, "sysfs");
626 if (status < 0) 626 if (status < 0) {
627 if (status == -EPROBE_DEFER)
628 status = -ENODEV;
627 goto done; 629 goto done;
628 630 }
629 status = gpio_export(gpio, true); 631 status = gpio_export(gpio, true);
630 if (status < 0) 632 if (status < 0)
631 gpio_free(gpio); 633 gpio_free(gpio);
@@ -1191,8 +1193,10 @@ int gpio_request(unsigned gpio, const char *label)
1191 1193
1192 spin_lock_irqsave(&gpio_lock, flags); 1194 spin_lock_irqsave(&gpio_lock, flags);
1193 1195
1194 if (!gpio_is_valid(gpio)) 1196 if (!gpio_is_valid(gpio)) {
1197 status = -EINVAL;
1195 goto done; 1198 goto done;
1199 }
1196 desc = &gpio_desc[gpio]; 1200 desc = &gpio_desc[gpio];
1197 chip = desc->chip; 1201 chip = desc->chip;
1198 if (chip == NULL) 1202 if (chip == NULL)
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 09e11a5d921a..fd9d0af4d536 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -206,7 +206,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
206 size_t size; 206 size_t size;
207 int ret; 207 int ret;
208 208
209 DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n", 209 DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
210 sizes->surface_width, sizes->surface_height, 210 sizes->surface_width, sizes->surface_height,
211 sizes->surface_bpp); 211 sizes->surface_bpp);
212 212
@@ -220,7 +220,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
220 220
221 size = mode_cmd.pitches[0] * mode_cmd.height; 221 size = mode_cmd.pitches[0] * mode_cmd.height;
222 obj = drm_gem_cma_create(dev, size); 222 obj = drm_gem_cma_create(dev, size);
223 if (!obj) 223 if (IS_ERR(obj))
224 return -ENOMEM; 224 return -ENOMEM;
225 225
226 fbi = framebuffer_alloc(0, dev->dev); 226 fbi = framebuffer_alloc(0, dev->dev);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index cdf8b1e7602d..d4b20ceda3fb 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -205,8 +205,6 @@ static int drm_gem_one_name_info(int id, void *ptr, void *data)
205 struct drm_gem_object *obj = ptr; 205 struct drm_gem_object *obj = ptr;
206 struct seq_file *m = data; 206 struct seq_file *m = data;
207 207
208 seq_printf(m, "name %d size %zd\n", obj->name, obj->size);
209
210 seq_printf(m, "%6d %8zd %7d %8d\n", 208 seq_printf(m, "%6d %8zd %7d %8d\n",
211 obj->name, obj->size, 209 obj->name, obj->size,
212 atomic_read(&obj->handle_count), 210 atomic_read(&obj->handle_count),
@@ -239,7 +237,7 @@ int drm_vma_info(struct seq_file *m, void *data)
239 mutex_lock(&dev->struct_mutex); 237 mutex_lock(&dev->struct_mutex);
240 seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n", 238 seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n",
241 atomic_read(&dev->vma_count), 239 atomic_read(&dev->vma_count),
242 high_memory, (void *)virt_to_phys(high_memory)); 240 high_memory, (void *)(unsigned long)virt_to_phys(high_memory));
243 241
244 list_for_each_entry(pt, &dev->vmalist, head) { 242 list_for_each_entry(pt, &dev->vmalist, head) {
245 vma = pt->vma; 243 vma = pt->vma;
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index aaeb6f8d69ce..b8a282ea8751 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -64,7 +64,6 @@ int drm_get_platform_dev(struct platform_device *platdev,
64 } 64 }
65 65
66 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 66 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
67 dev_set_drvdata(&platdev->dev, dev);
68 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL); 67 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
69 if (ret) 68 if (ret)
70 goto err_g1; 69 goto err_g1;
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 59a26e577b57..fc345d4ebb03 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,6 +1,6 @@
1config DRM_EXYNOS 1config DRM_EXYNOS
2 tristate "DRM Support for Samsung SoC EXYNOS Series" 2 tristate "DRM Support for Samsung SoC EXYNOS Series"
3 depends on DRM && PLAT_SAMSUNG 3 depends on DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select FB_CFB_FILLRECT 5 select FB_CFB_FILLRECT
6 select FB_CFB_COPYAREA 6 select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 18c271862ca8..0f68a2872673 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -374,6 +374,7 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
374 exynos_connector->encoder_id = encoder->base.id; 374 exynos_connector->encoder_id = encoder->base.id;
375 exynos_connector->manager = manager; 375 exynos_connector->manager = manager;
376 exynos_connector->dpms = DRM_MODE_DPMS_OFF; 376 exynos_connector->dpms = DRM_MODE_DPMS_OFF;
377 connector->dpms = DRM_MODE_DPMS_OFF;
377 connector->encoder = encoder; 378 connector->encoder = encoder;
378 379
379 err = drm_mode_connector_attach_encoder(connector, encoder); 380 err = drm_mode_connector_attach_encoder(connector, encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index e51503fbaf2b..241ad1eeec64 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -43,12 +43,14 @@
43 * @manager: specific encoder has its own manager to control a hardware 43 * @manager: specific encoder has its own manager to control a hardware
44 * appropriately and we can access a hardware drawing on this manager. 44 * appropriately and we can access a hardware drawing on this manager.
45 * @dpms: store the encoder dpms value. 45 * @dpms: store the encoder dpms value.
46 * @updated: indicate whether overlay data updating is needed or not.
46 */ 47 */
47struct exynos_drm_encoder { 48struct exynos_drm_encoder {
48 struct drm_crtc *old_crtc; 49 struct drm_crtc *old_crtc;
49 struct drm_encoder drm_encoder; 50 struct drm_encoder drm_encoder;
50 struct exynos_drm_manager *manager; 51 struct exynos_drm_manager *manager;
51 int dpms; 52 int dpms;
53 bool updated;
52}; 54};
53 55
54static void exynos_drm_connector_power(struct drm_encoder *encoder, int mode) 56static void exynos_drm_connector_power(struct drm_encoder *encoder, int mode)
@@ -85,7 +87,9 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
85 switch (mode) { 87 switch (mode) {
86 case DRM_MODE_DPMS_ON: 88 case DRM_MODE_DPMS_ON:
87 if (manager_ops && manager_ops->apply) 89 if (manager_ops && manager_ops->apply)
88 manager_ops->apply(manager->dev); 90 if (!exynos_encoder->updated)
91 manager_ops->apply(manager->dev);
92
89 exynos_drm_connector_power(encoder, mode); 93 exynos_drm_connector_power(encoder, mode);
90 exynos_encoder->dpms = mode; 94 exynos_encoder->dpms = mode;
91 break; 95 break;
@@ -94,6 +98,7 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
94 case DRM_MODE_DPMS_OFF: 98 case DRM_MODE_DPMS_OFF:
95 exynos_drm_connector_power(encoder, mode); 99 exynos_drm_connector_power(encoder, mode);
96 exynos_encoder->dpms = mode; 100 exynos_encoder->dpms = mode;
101 exynos_encoder->updated = false;
97 break; 102 break;
98 default: 103 default:
99 DRM_ERROR("unspecified mode %d\n", mode); 104 DRM_ERROR("unspecified mode %d\n", mode);
@@ -205,13 +210,22 @@ static void exynos_drm_encoder_prepare(struct drm_encoder *encoder)
205 210
206static void exynos_drm_encoder_commit(struct drm_encoder *encoder) 211static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
207{ 212{
208 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); 213 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
214 struct exynos_drm_manager *manager = exynos_encoder->manager;
209 struct exynos_drm_manager_ops *manager_ops = manager->ops; 215 struct exynos_drm_manager_ops *manager_ops = manager->ops;
210 216
211 DRM_DEBUG_KMS("%s\n", __FILE__); 217 DRM_DEBUG_KMS("%s\n", __FILE__);
212 218
213 if (manager_ops && manager_ops->commit) 219 if (manager_ops && manager_ops->commit)
214 manager_ops->commit(manager->dev); 220 manager_ops->commit(manager->dev);
221
222 /*
223 * this will avoid one issue that overlay data is updated to
224 * real hardware two times.
225 * And this variable will be used to check if the data was
226 * already updated or not by exynos_drm_encoder_dpms function.
227 */
228 exynos_encoder->updated = true;
215} 229}
216 230
217static void exynos_drm_encoder_disable(struct drm_encoder *encoder) 231static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
@@ -401,19 +415,6 @@ void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data)
401 manager_ops->dpms(manager->dev, mode); 415 manager_ops->dpms(manager->dev, mode);
402 416
403 /* 417 /*
404 * set current mode to new one so that data aren't updated into
405 * registers by drm_helper_connector_dpms two times.
406 *
407 * in case that drm_crtc_helper_set_mode() is called,
408 * overlay_ops->commit() and manager_ops->commit() callbacks
409 * can be called two times, first at drm_crtc_helper_set_mode()
410 * and second at drm_helper_connector_dpms().
411 * so with this setting, when drm_helper_connector_dpms() is called
412 * encoder->funcs->dpms() will be ignored.
413 */
414 exynos_encoder->dpms = mode;
415
416 /*
417 * if this condition is ok then it means that the crtc is already 418 * if this condition is ok then it means that the crtc is already
418 * detached from encoder and last function for detaching is properly 419 * detached from encoder and last function for detaching is properly
419 * done, so clear pipe from manager to prevent repeated call. 420 * done, so clear pipe from manager to prevent repeated call.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index a32837951dd2..130a2b510d4a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -19,8 +19,8 @@
19#include <linux/clk.h> 19#include <linux/clk.h>
20#include <linux/pm_runtime.h> 20#include <linux/pm_runtime.h>
21 21
22#include <video/samsung_fimd.h>
22#include <drm/exynos_drm.h> 23#include <drm/exynos_drm.h>
23#include <plat/regs-fb-v4.h>
24 24
25#include "exynos_drm_drv.h" 25#include "exynos_drm_drv.h"
26#include "exynos_drm_fbdev.h" 26#include "exynos_drm_fbdev.h"
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 614b2e9ac462..e7fbb823fd8e 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1142,7 +1142,7 @@ static int __devinit mixer_probe(struct platform_device *pdev)
1142 const struct of_device_id *match; 1142 const struct of_device_id *match;
1143 match = of_match_node(of_match_ptr(mixer_match_types), 1143 match = of_match_node(of_match_ptr(mixer_match_types),
1144 pdev->dev.of_node); 1144 pdev->dev.of_node);
1145 drv = match->data; 1145 drv = (struct mixer_drv_data *)match->data;
1146 } else { 1146 } else {
1147 drv = (struct mixer_drv_data *) 1147 drv = (struct mixer_drv_data *)
1148 platform_get_device_id(pdev)->driver_data; 1148 platform_get_device_id(pdev)->driver_data;
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 38f3a6cb8c7d..3edd981e0770 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -303,10 +303,10 @@ static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
303 303
304 ch7xxx_readb(dvo, CH7xxx_PM, &val); 304 ch7xxx_readb(dvo, CH7xxx_PM, &val);
305 305
306 if (val & CH7xxx_PM_FPD) 306 if (val & (CH7xxx_PM_DVIL | CH7xxx_PM_DVIP))
307 return false;
308 else
309 return true; 307 return true;
308 else
309 return false;
310} 310}
311 311
312static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) 312static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index c9bfd83dde64..61ae104dca8c 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1505,7 +1505,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1505 goto put_gmch; 1505 goto put_gmch;
1506 } 1506 }
1507 1507
1508 i915_kick_out_firmware_fb(dev_priv); 1508 if (drm_core_check_feature(dev, DRIVER_MODESET))
1509 i915_kick_out_firmware_fb(dev_priv);
1509 1510
1510 pci_set_master(dev->pdev); 1511 pci_set_master(dev->pdev);
1511 1512
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index aac4e5e1a5b9..6770ee6084b4 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -118,6 +118,13 @@ module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
118MODULE_PARM_DESC(i915_enable_ppgtt, 118MODULE_PARM_DESC(i915_enable_ppgtt,
119 "Enable PPGTT (default: true)"); 119 "Enable PPGTT (default: true)");
120 120
121unsigned int i915_preliminary_hw_support __read_mostly = 0;
122module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
123MODULE_PARM_DESC(preliminary_hw_support,
124 "Enable preliminary hardware support. "
125 "Enable Haswell and ValleyView Support. "
126 "(default: false)");
127
121static struct drm_driver driver; 128static struct drm_driver driver;
122extern int intel_agp_enabled; 129extern int intel_agp_enabled;
123 130
@@ -826,6 +833,12 @@ i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
826 struct intel_device_info *intel_info = 833 struct intel_device_info *intel_info =
827 (struct intel_device_info *) ent->driver_data; 834 (struct intel_device_info *) ent->driver_data;
828 835
836 if (intel_info->is_haswell || intel_info->is_valleyview)
837 if(!i915_preliminary_hw_support) {
838 DRM_ERROR("Preliminary hardware support disabled\n");
839 return -ENODEV;
840 }
841
829 /* Only bind to function 0 of the device. Early generations 842 /* Only bind to function 0 of the device. Early generations
830 * used function 1 as a placeholder for multi-head. This causes 843 * used function 1 as a placeholder for multi-head. This causes
831 * us confusion instead, especially on the systems where both 844 * us confusion instead, especially on the systems where both
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4f2831aa5fed..f511fa2f4168 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1217,6 +1217,7 @@ extern int i915_enable_rc6 __read_mostly;
1217extern int i915_enable_fbc __read_mostly; 1217extern int i915_enable_fbc __read_mostly;
1218extern bool i915_enable_hangcheck __read_mostly; 1218extern bool i915_enable_hangcheck __read_mostly;
1219extern int i915_enable_ppgtt __read_mostly; 1219extern int i915_enable_ppgtt __read_mostly;
1220extern unsigned int i915_preliminary_hw_support __read_mostly;
1220 1221
1221extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1222extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1222extern int i915_resume(struct drm_device *dev); 1223extern int i915_resume(struct drm_device *dev);
@@ -1341,9 +1342,14 @@ int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
1341static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) 1342static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
1342{ 1343{
1343 struct scatterlist *sg = obj->pages->sgl; 1344 struct scatterlist *sg = obj->pages->sgl;
1344 while (n >= SG_MAX_SINGLE_ALLOC) { 1345 int nents = obj->pages->nents;
1346 while (nents > SG_MAX_SINGLE_ALLOC) {
1347 if (n < SG_MAX_SINGLE_ALLOC - 1)
1348 break;
1349
1345 sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1); 1350 sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
1346 n -= SG_MAX_SINGLE_ALLOC - 1; 1351 n -= SG_MAX_SINGLE_ALLOC - 1;
1352 nents -= SG_MAX_SINGLE_ALLOC - 1;
1347 } 1353 }
1348 return sg_page(sg+n); 1354 return sg_page(sg+n);
1349} 1355}
@@ -1427,7 +1433,7 @@ int __must_check i915_gpu_idle(struct drm_device *dev);
1427int __must_check i915_gem_idle(struct drm_device *dev); 1433int __must_check i915_gem_idle(struct drm_device *dev);
1428int i915_add_request(struct intel_ring_buffer *ring, 1434int i915_add_request(struct intel_ring_buffer *ring,
1429 struct drm_file *file, 1435 struct drm_file *file,
1430 struct drm_i915_gem_request *request); 1436 u32 *seqno);
1431int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, 1437int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
1432 uint32_t seqno); 1438 uint32_t seqno);
1433int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 1439int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 19dbdd7dd564..107f09befe92 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1407,8 +1407,10 @@ out:
1407 return VM_FAULT_NOPAGE; 1407 return VM_FAULT_NOPAGE;
1408 case -ENOMEM: 1408 case -ENOMEM:
1409 return VM_FAULT_OOM; 1409 return VM_FAULT_OOM;
1410 case -ENOSPC:
1411 return VM_FAULT_SIGBUS;
1410 default: 1412 default:
1411 WARN_ON_ONCE(ret); 1413 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1412 return VM_FAULT_SIGBUS; 1414 return VM_FAULT_SIGBUS;
1413 } 1415 }
1414} 1416}
@@ -1822,10 +1824,11 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1822 sg_set_page(sg, page, PAGE_SIZE, 0); 1824 sg_set_page(sg, page, PAGE_SIZE, 0);
1823 } 1825 }
1824 1826
1827 obj->pages = st;
1828
1825 if (i915_gem_object_needs_bit17_swizzle(obj)) 1829 if (i915_gem_object_needs_bit17_swizzle(obj))
1826 i915_gem_object_do_bit_17_swizzle(obj); 1830 i915_gem_object_do_bit_17_swizzle(obj);
1827 1831
1828 obj->pages = st;
1829 return 0; 1832 return 0;
1830 1833
1831err_pages: 1834err_pages:
@@ -1955,11 +1958,12 @@ i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
1955int 1958int
1956i915_add_request(struct intel_ring_buffer *ring, 1959i915_add_request(struct intel_ring_buffer *ring,
1957 struct drm_file *file, 1960 struct drm_file *file,
1958 struct drm_i915_gem_request *request) 1961 u32 *out_seqno)
1959{ 1962{
1960 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1963 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1961 uint32_t seqno; 1964 struct drm_i915_gem_request *request;
1962 u32 request_ring_position; 1965 u32 request_ring_position;
1966 u32 seqno;
1963 int was_empty; 1967 int was_empty;
1964 int ret; 1968 int ret;
1965 1969
@@ -1974,11 +1978,9 @@ i915_add_request(struct intel_ring_buffer *ring,
1974 if (ret) 1978 if (ret)
1975 return ret; 1979 return ret;
1976 1980
1977 if (request == NULL) { 1981 request = kmalloc(sizeof(*request), GFP_KERNEL);
1978 request = kmalloc(sizeof(*request), GFP_KERNEL); 1982 if (request == NULL)
1979 if (request == NULL) 1983 return -ENOMEM;
1980 return -ENOMEM;
1981 }
1982 1984
1983 seqno = i915_gem_next_request_seqno(ring); 1985 seqno = i915_gem_next_request_seqno(ring);
1984 1986
@@ -2030,6 +2032,8 @@ i915_add_request(struct intel_ring_buffer *ring,
2030 } 2032 }
2031 } 2033 }
2032 2034
2035 if (out_seqno)
2036 *out_seqno = seqno;
2033 return 0; 2037 return 0;
2034} 2038}
2035 2039
@@ -3959,6 +3963,9 @@ i915_gem_init_hw(struct drm_device *dev)
3959 if (!intel_enable_gtt()) 3963 if (!intel_enable_gtt())
3960 return -EIO; 3964 return -EIO;
3961 3965
3966 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
3967 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
3968
3962 i915_gem_l3_remap(dev); 3969 i915_gem_l3_remap(dev);
3963 3970
3964 i915_gem_init_swizzling(dev); 3971 i915_gem_init_swizzling(dev);
@@ -4098,7 +4105,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4098 } 4105 }
4099 4106
4100 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 4107 BUG_ON(!list_empty(&dev_priv->mm.active_list));
4101 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4102 mutex_unlock(&dev->struct_mutex); 4108 mutex_unlock(&dev->struct_mutex);
4103 4109
4104 ret = drm_irq_install(dev); 4110 ret = drm_irq_install(dev);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 64c1be0a9cfd..a4162ddff6c5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -521,7 +521,7 @@
521 */ 521 */
522# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) 522# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
523#define _3D_CHICKEN3 0x02090 523#define _3D_CHICKEN3 0x02090
524#define _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL (1 << 5) 524#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
525 525
526#define MI_MODE 0x0209c 526#define MI_MODE 0x0209c
527# define VS_TIMER_DISPATCH (1 << 6) 527# define VS_TIMER_DISPATCH (1 << 6)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 893f30164b7e..b726b478a4f5 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -219,20 +219,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
219 intel_encoder_to_crt(to_intel_encoder(encoder)); 219 intel_encoder_to_crt(to_intel_encoder(encoder));
220 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 220 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
221 struct drm_i915_private *dev_priv = dev->dev_private; 221 struct drm_i915_private *dev_priv = dev->dev_private;
222 int dpll_md_reg; 222 u32 adpa;
223 u32 adpa, dpll_md;
224
225 dpll_md_reg = DPLL_MD(intel_crtc->pipe);
226
227 /*
228 * Disable separate mode multiplier used when cloning SDVO to CRT
229 * XXX this needs to be adjusted when we really are cloning
230 */
231 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
232 dpll_md = I915_READ(dpll_md_reg);
233 I915_WRITE(dpll_md_reg,
234 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
235 }
236 223
237 adpa = ADPA_HOTPLUG_BITS; 224 adpa = ADPA_HOTPLUG_BITS;
238 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 225 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -742,7 +729,7 @@ void intel_crt_init(struct drm_device *dev)
742 729
743 crt->base.type = INTEL_OUTPUT_ANALOG; 730 crt->base.type = INTEL_OUTPUT_ANALOG;
744 crt->base.cloneable = true; 731 crt->base.cloneable = true;
745 if (IS_HASWELL(dev)) 732 if (IS_HASWELL(dev) || IS_I830(dev))
746 crt->base.crtc_mask = (1 << 0); 733 crt->base.crtc_mask = (1 << 0);
747 else 734 else
748 crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 735 crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2b6ce9b2674a..461a637f1ef7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3253,6 +3253,16 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3253 3253
3254 if (HAS_PCH_CPT(dev)) 3254 if (HAS_PCH_CPT(dev))
3255 intel_cpt_verify_modeset(dev, intel_crtc->pipe); 3255 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3256
3257 /*
3258 * There seems to be a race in PCH platform hw (at least on some
3259 * outputs) where an enabled pipe still completes any pageflip right
3260 * away (as if the pipe is off) instead of waiting for vblank. As soon
3261 * as the first vblank happend, everything works as expected. Hence just
3262 * wait for one vblank before returning to avoid strange things
3263 * happening.
3264 */
3265 intel_wait_for_vblank(dev, intel_crtc->pipe);
3256} 3266}
3257 3267
3258static void ironlake_crtc_disable(struct drm_crtc *crtc) 3268static void ironlake_crtc_disable(struct drm_crtc *crtc)
@@ -7882,6 +7892,34 @@ struct intel_quirk {
7882 void (*hook)(struct drm_device *dev); 7892 void (*hook)(struct drm_device *dev);
7883}; 7893};
7884 7894
7895/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
7896struct intel_dmi_quirk {
7897 void (*hook)(struct drm_device *dev);
7898 const struct dmi_system_id (*dmi_id_list)[];
7899};
7900
7901static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
7902{
7903 DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
7904 return 1;
7905}
7906
7907static const struct intel_dmi_quirk intel_dmi_quirks[] = {
7908 {
7909 .dmi_id_list = &(const struct dmi_system_id[]) {
7910 {
7911 .callback = intel_dmi_reverse_brightness,
7912 .ident = "NCR Corporation",
7913 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
7914 DMI_MATCH(DMI_PRODUCT_NAME, ""),
7915 },
7916 },
7917 { } /* terminating entry */
7918 },
7919 .hook = quirk_invert_brightness,
7920 },
7921};
7922
7885static struct intel_quirk intel_quirks[] = { 7923static struct intel_quirk intel_quirks[] = {
7886 /* HP Mini needs pipe A force quirk (LP: #322104) */ 7924 /* HP Mini needs pipe A force quirk (LP: #322104) */
7887 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, 7925 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
@@ -7892,8 +7930,7 @@ static struct intel_quirk intel_quirks[] = {
7892 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 7930 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
7893 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 7931 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
7894 7932
7895 /* 855 & before need to leave pipe A & dpll A up */ 7933 /* 830/845 need to leave pipe A & dpll A up */
7896 { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7897 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 7934 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7898 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 7935 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7899 7936
@@ -7922,6 +7959,10 @@ static void intel_init_quirks(struct drm_device *dev)
7922 q->subsystem_device == PCI_ANY_ID)) 7959 q->subsystem_device == PCI_ANY_ID))
7923 q->hook(dev); 7960 q->hook(dev);
7924 } 7961 }
7962 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
7963 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
7964 intel_dmi_quirks[i].hook(dev);
7965 }
7925} 7966}
7926 7967
7927/* Disable the VGA plane that we never use */ 7968/* Disable the VGA plane that we never use */
@@ -8049,29 +8090,42 @@ static void intel_enable_pipe_a(struct drm_device *dev)
8049 8090
8050} 8091}
8051 8092
8093static bool
8094intel_check_plane_mapping(struct intel_crtc *crtc)
8095{
8096 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
8097 u32 reg, val;
8098
8099 if (dev_priv->num_pipe == 1)
8100 return true;
8101
8102 reg = DSPCNTR(!crtc->plane);
8103 val = I915_READ(reg);
8104
8105 if ((val & DISPLAY_PLANE_ENABLE) &&
8106 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
8107 return false;
8108
8109 return true;
8110}
8111
8052static void intel_sanitize_crtc(struct intel_crtc *crtc) 8112static void intel_sanitize_crtc(struct intel_crtc *crtc)
8053{ 8113{
8054 struct drm_device *dev = crtc->base.dev; 8114 struct drm_device *dev = crtc->base.dev;
8055 struct drm_i915_private *dev_priv = dev->dev_private; 8115 struct drm_i915_private *dev_priv = dev->dev_private;
8056 u32 reg, val; 8116 u32 reg;
8057 8117
8058 /* Clear any frame start delays used for debugging left by the BIOS */ 8118 /* Clear any frame start delays used for debugging left by the BIOS */
8059 reg = PIPECONF(crtc->pipe); 8119 reg = PIPECONF(crtc->pipe);
8060 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 8120 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
8061 8121
8062 /* We need to sanitize the plane -> pipe mapping first because this will 8122 /* We need to sanitize the plane -> pipe mapping first because this will
8063 * disable the crtc (and hence change the state) if it is wrong. */ 8123 * disable the crtc (and hence change the state) if it is wrong. Note
8064 if (!HAS_PCH_SPLIT(dev)) { 8124 * that gen4+ has a fixed plane -> pipe mapping. */
8125 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
8065 struct intel_connector *connector; 8126 struct intel_connector *connector;
8066 bool plane; 8127 bool plane;
8067 8128
8068 reg = DSPCNTR(crtc->plane);
8069 val = I915_READ(reg);
8070
8071 if ((val & DISPLAY_PLANE_ENABLE) == 0 &&
8072 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
8073 goto ok;
8074
8075 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", 8129 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
8076 crtc->base.base.id); 8130 crtc->base.base.id);
8077 8131
@@ -8095,7 +8149,6 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
8095 WARN_ON(crtc->active); 8149 WARN_ON(crtc->active);
8096 crtc->base.enabled = false; 8150 crtc->base.enabled = false;
8097 } 8151 }
8098ok:
8099 8152
8100 if (dev_priv->quirks & QUIRK_PIPEA_FORCE && 8153 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
8101 crtc->pipe == PIPE_A && !crtc->active) { 8154 crtc->pipe == PIPE_A && !crtc->active) {
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d1e8ddb2d6c0..368ed8ef1600 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1797,7 +1797,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1797 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1797 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1798 break; 1798 break;
1799 if (i == intel_dp->lane_count && voltage_tries == 5) { 1799 if (i == intel_dp->lane_count && voltage_tries == 5) {
1800 if (++loop_tries == 5) { 1800 ++loop_tries;
1801 if (loop_tries == 5) {
1801 DRM_DEBUG_KMS("too many full retries, give up\n"); 1802 DRM_DEBUG_KMS("too many full retries, give up\n");
1802 break; 1803 break;
1803 } 1804 }
@@ -1807,11 +1808,15 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1807 } 1808 }
1808 1809
1809 /* Check to see if we've tried the same voltage 5 times */ 1810 /* Check to see if we've tried the same voltage 5 times */
1810 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) { 1811 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
1811 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1812 voltage_tries = 0;
1813 } else
1814 ++voltage_tries; 1812 ++voltage_tries;
1813 if (voltage_tries == 5) {
1814 DRM_DEBUG_KMS("too many voltage retries, give up\n");
1815 break;
1816 }
1817 } else
1818 voltage_tries = 0;
1819 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1815 1820
1816 /* Compute new intel_dp->train_set as requested by target */ 1821 /* Compute new intel_dp->train_set as requested by target */
1817 intel_get_adjust_train(intel_dp, link_status); 1822 intel_get_adjust_train(intel_dp, link_status);
@@ -2369,8 +2374,9 @@ static void
2369intel_dp_destroy(struct drm_connector *connector) 2374intel_dp_destroy(struct drm_connector *connector)
2370{ 2375{
2371 struct drm_device *dev = connector->dev; 2376 struct drm_device *dev = connector->dev;
2377 struct intel_dp *intel_dp = intel_attached_dp(connector);
2372 2378
2373 if (intel_dpd_is_edp(dev)) 2379 if (is_edp(intel_dp))
2374 intel_panel_destroy_backlight(dev); 2380 intel_panel_destroy_backlight(dev);
2375 2381
2376 drm_sysfs_connector_remove(connector); 2382 drm_sysfs_connector_remove(connector);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index e3166df55daa..edba93b3474b 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -777,6 +777,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
777 DMI_MATCH(DMI_BOARD_NAME, "D525TUD"), 777 DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
778 }, 778 },
779 }, 779 },
780 {
781 .callback = intel_no_lvds_dmi_callback,
782 .ident = "Supermicro X7SPA-H",
783 .matches = {
784 DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
785 DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
786 },
787 },
780 788
781 { } /* terminating entry */ 789 { } /* terminating entry */
782}; 790};
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index ebff850a9ab6..d7bc817f51a0 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -209,7 +209,6 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
209} 209}
210 210
211static int intel_overlay_do_wait_request(struct intel_overlay *overlay, 211static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
212 struct drm_i915_gem_request *request,
213 void (*tail)(struct intel_overlay *)) 212 void (*tail)(struct intel_overlay *))
214{ 213{
215 struct drm_device *dev = overlay->dev; 214 struct drm_device *dev = overlay->dev;
@@ -218,12 +217,10 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
218 int ret; 217 int ret;
219 218
220 BUG_ON(overlay->last_flip_req); 219 BUG_ON(overlay->last_flip_req);
221 ret = i915_add_request(ring, NULL, request); 220 ret = i915_add_request(ring, NULL, &overlay->last_flip_req);
222 if (ret) { 221 if (ret)
223 kfree(request); 222 return ret;
224 return ret; 223
225 }
226 overlay->last_flip_req = request->seqno;
227 overlay->flip_tail = tail; 224 overlay->flip_tail = tail;
228 ret = i915_wait_seqno(ring, overlay->last_flip_req); 225 ret = i915_wait_seqno(ring, overlay->last_flip_req);
229 if (ret) 226 if (ret)
@@ -240,7 +237,6 @@ static int intel_overlay_on(struct intel_overlay *overlay)
240 struct drm_device *dev = overlay->dev; 237 struct drm_device *dev = overlay->dev;
241 struct drm_i915_private *dev_priv = dev->dev_private; 238 struct drm_i915_private *dev_priv = dev->dev_private;
242 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 239 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
243 struct drm_i915_gem_request *request;
244 int ret; 240 int ret;
245 241
246 BUG_ON(overlay->active); 242 BUG_ON(overlay->active);
@@ -248,17 +244,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
248 244
249 WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); 245 WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
250 246
251 request = kzalloc(sizeof(*request), GFP_KERNEL);
252 if (request == NULL) {
253 ret = -ENOMEM;
254 goto out;
255 }
256
257 ret = intel_ring_begin(ring, 4); 247 ret = intel_ring_begin(ring, 4);
258 if (ret) { 248 if (ret)
259 kfree(request); 249 return ret;
260 goto out;
261 }
262 250
263 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON); 251 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
264 intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE); 252 intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
@@ -266,9 +254,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
266 intel_ring_emit(ring, MI_NOOP); 254 intel_ring_emit(ring, MI_NOOP);
267 intel_ring_advance(ring); 255 intel_ring_advance(ring);
268 256
269 ret = intel_overlay_do_wait_request(overlay, request, NULL); 257 return intel_overlay_do_wait_request(overlay, NULL);
270out:
271 return ret;
272} 258}
273 259
274/* overlay needs to be enabled in OCMD reg */ 260/* overlay needs to be enabled in OCMD reg */
@@ -278,17 +264,12 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
278 struct drm_device *dev = overlay->dev; 264 struct drm_device *dev = overlay->dev;
279 drm_i915_private_t *dev_priv = dev->dev_private; 265 drm_i915_private_t *dev_priv = dev->dev_private;
280 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 266 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
281 struct drm_i915_gem_request *request;
282 u32 flip_addr = overlay->flip_addr; 267 u32 flip_addr = overlay->flip_addr;
283 u32 tmp; 268 u32 tmp;
284 int ret; 269 int ret;
285 270
286 BUG_ON(!overlay->active); 271 BUG_ON(!overlay->active);
287 272
288 request = kzalloc(sizeof(*request), GFP_KERNEL);
289 if (request == NULL)
290 return -ENOMEM;
291
292 if (load_polyphase_filter) 273 if (load_polyphase_filter)
293 flip_addr |= OFC_UPDATE; 274 flip_addr |= OFC_UPDATE;
294 275
@@ -298,22 +279,14 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
298 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); 279 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
299 280
300 ret = intel_ring_begin(ring, 2); 281 ret = intel_ring_begin(ring, 2);
301 if (ret) { 282 if (ret)
302 kfree(request);
303 return ret; 283 return ret;
304 } 284
305 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 285 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
306 intel_ring_emit(ring, flip_addr); 286 intel_ring_emit(ring, flip_addr);
307 intel_ring_advance(ring); 287 intel_ring_advance(ring);
308 288
309 ret = i915_add_request(ring, NULL, request); 289 return i915_add_request(ring, NULL, &overlay->last_flip_req);
310 if (ret) {
311 kfree(request);
312 return ret;
313 }
314
315 overlay->last_flip_req = request->seqno;
316 return 0;
317} 290}
318 291
319static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) 292static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
@@ -349,15 +322,10 @@ static int intel_overlay_off(struct intel_overlay *overlay)
349 struct drm_i915_private *dev_priv = dev->dev_private; 322 struct drm_i915_private *dev_priv = dev->dev_private;
350 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 323 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
351 u32 flip_addr = overlay->flip_addr; 324 u32 flip_addr = overlay->flip_addr;
352 struct drm_i915_gem_request *request;
353 int ret; 325 int ret;
354 326
355 BUG_ON(!overlay->active); 327 BUG_ON(!overlay->active);
356 328
357 request = kzalloc(sizeof(*request), GFP_KERNEL);
358 if (request == NULL)
359 return -ENOMEM;
360
361 /* According to intel docs the overlay hw may hang (when switching 329 /* According to intel docs the overlay hw may hang (when switching
362 * off) without loading the filter coeffs. It is however unclear whether 330 * off) without loading the filter coeffs. It is however unclear whether
363 * this applies to the disabling of the overlay or to the switching off 331 * this applies to the disabling of the overlay or to the switching off
@@ -365,22 +333,28 @@ static int intel_overlay_off(struct intel_overlay *overlay)
365 flip_addr |= OFC_UPDATE; 333 flip_addr |= OFC_UPDATE;
366 334
367 ret = intel_ring_begin(ring, 6); 335 ret = intel_ring_begin(ring, 6);
368 if (ret) { 336 if (ret)
369 kfree(request);
370 return ret; 337 return ret;
371 } 338
372 /* wait for overlay to go idle */ 339 /* wait for overlay to go idle */
373 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 340 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
374 intel_ring_emit(ring, flip_addr); 341 intel_ring_emit(ring, flip_addr);
375 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 342 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
376 /* turn overlay off */ 343 /* turn overlay off */
377 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF); 344 if (IS_I830(dev)) {
378 intel_ring_emit(ring, flip_addr); 345 /* Workaround: Don't disable the overlay fully, since otherwise
379 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 346 * it dies on the next OVERLAY_ON cmd. */
347 intel_ring_emit(ring, MI_NOOP);
348 intel_ring_emit(ring, MI_NOOP);
349 intel_ring_emit(ring, MI_NOOP);
350 } else {
351 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
352 intel_ring_emit(ring, flip_addr);
353 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
354 }
380 intel_ring_advance(ring); 355 intel_ring_advance(ring);
381 356
382 return intel_overlay_do_wait_request(overlay, request, 357 return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail);
383 intel_overlay_off_tail);
384} 358}
385 359
386/* recover from an interruption due to a signal 360/* recover from an interruption due to a signal
@@ -425,24 +399,16 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
425 return 0; 399 return 0;
426 400
427 if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) { 401 if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
428 struct drm_i915_gem_request *request;
429
430 /* synchronous slowpath */ 402 /* synchronous slowpath */
431 request = kzalloc(sizeof(*request), GFP_KERNEL);
432 if (request == NULL)
433 return -ENOMEM;
434
435 ret = intel_ring_begin(ring, 2); 403 ret = intel_ring_begin(ring, 2);
436 if (ret) { 404 if (ret)
437 kfree(request);
438 return ret; 405 return ret;
439 }
440 406
441 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 407 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
442 intel_ring_emit(ring, MI_NOOP); 408 intel_ring_emit(ring, MI_NOOP);
443 intel_ring_advance(ring); 409 intel_ring_advance(ring);
444 410
445 ret = intel_overlay_do_wait_request(overlay, request, 411 ret = intel_overlay_do_wait_request(overlay,
446 intel_overlay_release_old_vid_tail); 412 intel_overlay_release_old_vid_tail);
447 if (ret) 413 if (ret)
448 return ret; 414 return ret;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e019b2369861..e2aacd329545 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -435,7 +435,7 @@ int intel_panel_setup_backlight(struct drm_device *dev)
435 props.type = BACKLIGHT_RAW; 435 props.type = BACKLIGHT_RAW;
436 props.max_brightness = _intel_panel_get_max_backlight(dev); 436 props.max_brightness = _intel_panel_get_max_backlight(dev);
437 if (props.max_brightness == 0) { 437 if (props.max_brightness == 0) {
438 DRM_ERROR("Failed to get maximum backlight value\n"); 438 DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
439 return -ENODEV; 439 return -ENODEV;
440 } 440 }
441 dev_priv->backlight = 441 dev_priv->backlight =
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index b3b4b6cea8b0..72f41aaa71ff 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3442,8 +3442,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
3442 GEN6_RCCUNIT_CLOCK_GATE_DISABLE); 3442 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
3443 3443
3444 /* Bspec says we need to always set all mask bits. */ 3444 /* Bspec says we need to always set all mask bits. */
3445 I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) | 3445 I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
3446 _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL); 3446 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
3447 3447
3448 /* 3448 /*
3449 * According to the spec the following bits should be 3449 * According to the spec the following bits should be
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 0007a4d9bf6e..79d308da29ff 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -139,6 +139,11 @@ struct intel_sdvo {
139 139
140 /* DDC bus used by this SDVO encoder */ 140 /* DDC bus used by this SDVO encoder */
141 uint8_t ddc_bus; 141 uint8_t ddc_bus;
142
143 /*
144 * the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd
145 */
146 uint8_t dtd_sdvo_flags;
142}; 147};
143 148
144struct intel_sdvo_connector { 149struct intel_sdvo_connector {
@@ -889,6 +894,45 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
889} 894}
890#endif 895#endif
891 896
897static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
898 unsigned if_index, uint8_t tx_rate,
899 uint8_t *data, unsigned length)
900{
901 uint8_t set_buf_index[2] = { if_index, 0 };
902 uint8_t hbuf_size, tmp[8];
903 int i;
904
905 if (!intel_sdvo_set_value(intel_sdvo,
906 SDVO_CMD_SET_HBUF_INDEX,
907 set_buf_index, 2))
908 return false;
909
910 if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
911 &hbuf_size, 1))
912 return false;
913
914 /* Buffer size is 0 based, hooray! */
915 hbuf_size++;
916
917 DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
918 if_index, length, hbuf_size);
919
920 for (i = 0; i < hbuf_size; i += 8) {
921 memset(tmp, 0, 8);
922 if (i < length)
923 memcpy(tmp, data + i, min_t(unsigned, 8, length - i));
924
925 if (!intel_sdvo_set_value(intel_sdvo,
926 SDVO_CMD_SET_HBUF_DATA,
927 tmp, 8))
928 return false;
929 }
930
931 return intel_sdvo_set_value(intel_sdvo,
932 SDVO_CMD_SET_HBUF_TXRATE,
933 &tx_rate, 1);
934}
935
892static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) 936static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
893{ 937{
894 struct dip_infoframe avi_if = { 938 struct dip_infoframe avi_if = {
@@ -896,11 +940,7 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
896 .ver = DIP_VERSION_AVI, 940 .ver = DIP_VERSION_AVI,
897 .len = DIP_LEN_AVI, 941 .len = DIP_LEN_AVI,
898 }; 942 };
899 uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
900 uint8_t set_buf_index[2] = { 1, 0 };
901 uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)]; 943 uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
902 uint64_t *data = (uint64_t *)sdvo_data;
903 unsigned i;
904 944
905 intel_dip_infoframe_csum(&avi_if); 945 intel_dip_infoframe_csum(&avi_if);
906 946
@@ -910,22 +950,9 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
910 sdvo_data[3] = avi_if.checksum; 950 sdvo_data[3] = avi_if.checksum;
911 memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi)); 951 memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
912 952
913 if (!intel_sdvo_set_value(intel_sdvo, 953 return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
914 SDVO_CMD_SET_HBUF_INDEX, 954 SDVO_HBUF_TX_VSYNC,
915 set_buf_index, 2)) 955 sdvo_data, sizeof(sdvo_data));
916 return false;
917
918 for (i = 0; i < sizeof(sdvo_data); i += 8) {
919 if (!intel_sdvo_set_value(intel_sdvo,
920 SDVO_CMD_SET_HBUF_DATA,
921 data, 8))
922 return false;
923 data++;
924 }
925
926 return intel_sdvo_set_value(intel_sdvo,
927 SDVO_CMD_SET_HBUF_TXRATE,
928 &tx_rate, 1);
929} 956}
930 957
931static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) 958static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
@@ -984,6 +1011,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
984 return false; 1011 return false;
985 1012
986 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); 1013 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
1014 intel_sdvo->dtd_sdvo_flags = input_dtd.part2.sdvo_flags;
987 1015
988 return true; 1016 return true;
989} 1017}
@@ -1092,6 +1120,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1092 * adjusted_mode. 1120 * adjusted_mode.
1093 */ 1121 */
1094 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); 1122 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1123 if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
1124 input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
1095 if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) 1125 if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
1096 DRM_INFO("Setting input timings on %s failed\n", 1126 DRM_INFO("Setting input timings on %s failed\n",
1097 SDVO_NAME(intel_sdvo)); 1127 SDVO_NAME(intel_sdvo));
@@ -2277,10 +2307,8 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2277 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; 2307 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
2278 } 2308 }
2279 2309
2280 /* SDVO LVDS is cloneable because the SDVO encoder does the upscaling, 2310 /* SDVO LVDS is not cloneable because the input mode gets adjusted by the encoder */
2281 * as opposed to native LVDS, where we upscale with the panel-fitter 2311 intel_sdvo->base.cloneable = false;
2282 * (and hence only the native LVDS resolution could be cloned). */
2283 intel_sdvo->base.cloneable = true;
2284 2312
2285 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2313 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2286 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) 2314 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 9d030142ee43..770bdd6ecd9f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -708,6 +708,8 @@ struct intel_sdvo_enhancements_arg {
708#define SDVO_CMD_SET_AUDIO_STAT 0x91 708#define SDVO_CMD_SET_AUDIO_STAT 0x91
709#define SDVO_CMD_GET_AUDIO_STAT 0x92 709#define SDVO_CMD_GET_AUDIO_STAT 0x92
710#define SDVO_CMD_SET_HBUF_INDEX 0x93 710#define SDVO_CMD_SET_HBUF_INDEX 0x93
711 #define SDVO_HBUF_INDEX_ELD 0
712 #define SDVO_HBUF_INDEX_AVI_IF 1
711#define SDVO_CMD_GET_HBUF_INDEX 0x94 713#define SDVO_CMD_GET_HBUF_INDEX 0x94
712#define SDVO_CMD_GET_HBUF_INFO 0x95 714#define SDVO_CMD_GET_HBUF_INFO 0x95
713#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96 715#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
diff --git a/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
index 1f34549aff18..70586fde69cf 100644
--- a/drivers/gpu/drm/nouveau/core/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
@@ -39,6 +39,11 @@ nouveau_gpuobj_destroy(struct nouveau_gpuobj *gpuobj)
39 nv_wo32(gpuobj, i, 0x00000000); 39 nv_wo32(gpuobj, i, 0x00000000);
40 } 40 }
41 41
42 if (gpuobj->node) {
43 nouveau_mm_free(&nv_gpuobj(gpuobj->parent)->heap,
44 &gpuobj->node);
45 }
46
42 if (gpuobj->heap.block_size) 47 if (gpuobj->heap.block_size)
43 nouveau_mm_fini(&gpuobj->heap); 48 nouveau_mm_fini(&gpuobj->heap);
44 49
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
index bfddf87926dd..a6d3cd6490f7 100644
--- a/drivers/gpu/drm/nouveau/core/core/mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -218,13 +218,16 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
218 node = kzalloc(sizeof(*node), GFP_KERNEL); 218 node = kzalloc(sizeof(*node), GFP_KERNEL);
219 if (!node) 219 if (!node)
220 return -ENOMEM; 220 return -ENOMEM;
221 node->offset = roundup(offset, mm->block_size); 221
222 node->length = rounddown(offset + length, mm->block_size) - node->offset; 222 if (length) {
223 node->offset = roundup(offset, mm->block_size);
224 node->length = rounddown(offset + length, mm->block_size);
225 node->length -= node->offset;
226 }
223 227
224 list_add_tail(&node->nl_entry, &mm->nodes); 228 list_add_tail(&node->nl_entry, &mm->nodes);
225 list_add_tail(&node->fl_entry, &mm->free); 229 list_add_tail(&node->fl_entry, &mm->free);
226 mm->heap_nodes++; 230 mm->heap_nodes++;
227 mm->heap_size += length;
228 return 0; 231 return 0;
229} 232}
230 233
@@ -236,7 +239,7 @@ nouveau_mm_fini(struct nouveau_mm *mm)
236 int nodes = 0; 239 int nodes = 0;
237 240
238 list_for_each_entry(node, &mm->nodes, nl_entry) { 241 list_for_each_entry(node, &mm->nodes, nl_entry) {
239 if (nodes++ == mm->heap_nodes) 242 if (WARN_ON(nodes++ == mm->heap_nodes))
240 return -EBUSY; 243 return -EBUSY;
241 } 244 }
242 245
diff --git a/drivers/gpu/drm/nouveau/core/core/parent.c b/drivers/gpu/drm/nouveau/core/core/parent.c
index a1ea034611d5..db7c54943102 100644
--- a/drivers/gpu/drm/nouveau/core/core/parent.c
+++ b/drivers/gpu/drm/nouveau/core/core/parent.c
@@ -101,23 +101,6 @@ nouveau_parent_create_(struct nouveau_object *parent,
101 return 0; 101 return 0;
102} 102}
103 103
104int
105_nouveau_parent_ctor(struct nouveau_object *parent,
106 struct nouveau_object *engine,
107 struct nouveau_oclass *oclass, void *data, u32 size,
108 struct nouveau_object **pobject)
109{
110 struct nouveau_parent *object;
111 int ret;
112
113 ret = nouveau_parent_create(parent, engine, oclass, 0, NULL, 0, &object);
114 *pobject = nv_object(object);
115 if (ret)
116 return ret;
117
118 return 0;
119}
120
121void 104void
122nouveau_parent_destroy(struct nouveau_parent *parent) 105nouveau_parent_destroy(struct nouveau_parent *parent)
123{ 106{
diff --git a/drivers/gpu/drm/nouveau/core/include/core/mm.h b/drivers/gpu/drm/nouveau/core/include/core/mm.h
index 9ee9bf4028ca..975137ba34a6 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/mm.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/mm.h
@@ -19,7 +19,6 @@ struct nouveau_mm {
19 19
20 u32 block_size; 20 u32 block_size;
21 int heap_nodes; 21 int heap_nodes;
22 u32 heap_size;
23}; 22};
24 23
25int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block); 24int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/parent.h b/drivers/gpu/drm/nouveau/core/include/core/parent.h
index d3aa251a5eb6..3c2e940eb0f8 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/parent.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/parent.h
@@ -50,9 +50,6 @@ int nouveau_parent_create_(struct nouveau_object *, struct nouveau_object *,
50 int size, void **); 50 int size, void **);
51void nouveau_parent_destroy(struct nouveau_parent *); 51void nouveau_parent_destroy(struct nouveau_parent *);
52 52
53int _nouveau_parent_ctor(struct nouveau_object *, struct nouveau_object *,
54 struct nouveau_oclass *, void *, u32,
55 struct nouveau_object **);
56void _nouveau_parent_dtor(struct nouveau_object *); 53void _nouveau_parent_dtor(struct nouveau_object *);
57#define _nouveau_parent_init _nouveau_object_init 54#define _nouveau_parent_init _nouveau_object_init
58#define _nouveau_parent_fini _nouveau_object_fini 55#define _nouveau_parent_fini _nouveau_object_fini
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
index 49bff901544c..c24ec8ab3db4 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
@@ -26,7 +26,7 @@ void nouveau_timer_alarm(void *, u32 nsec, struct nouveau_alarm *);
26struct nouveau_timer { 26struct nouveau_timer {
27 struct nouveau_subdev base; 27 struct nouveau_subdev base;
28 u64 (*read)(struct nouveau_timer *); 28 u64 (*read)(struct nouveau_timer *);
29 void (*alarm)(struct nouveau_timer *, u32 time, struct nouveau_alarm *); 29 void (*alarm)(struct nouveau_timer *, u64 time, struct nouveau_alarm *);
30}; 30};
31 31
32static inline struct nouveau_timer * 32static inline struct nouveau_timer *
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index 2fbb6df697cd..70ca7d5a1aa1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -72,7 +72,7 @@ nouveau_bios_shadow_of(struct nouveau_bios *bios)
72 } 72 }
73 73
74 data = of_get_property(dn, "NVDA,BMP", &size); 74 data = of_get_property(dn, "NVDA,BMP", &size);
75 if (data) { 75 if (data && size) {
76 bios->size = size; 76 bios->size = size;
77 bios->data = kmalloc(bios->size, GFP_KERNEL); 77 bios->data = kmalloc(bios->size, GFP_KERNEL);
78 if (bios->data) 78 if (bios->data)
@@ -104,6 +104,9 @@ nouveau_bios_shadow_pramin(struct nouveau_bios *bios)
104 goto out; 104 goto out;
105 105
106 bios->size = nv_rd08(bios, 0x700002) * 512; 106 bios->size = nv_rd08(bios, 0x700002) * 512;
107 if (!bios->size)
108 goto out;
109
107 bios->data = kmalloc(bios->size, GFP_KERNEL); 110 bios->data = kmalloc(bios->size, GFP_KERNEL);
108 if (bios->data) { 111 if (bios->data) {
109 for (i = 0; i < bios->size; i++) 112 for (i = 0; i < bios->size; i++)
@@ -155,6 +158,9 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
155 158
156 /* read entire bios image to system memory */ 159 /* read entire bios image to system memory */
157 bios->size = nv_rd08(bios, 0x300002) * 512; 160 bios->size = nv_rd08(bios, 0x300002) * 512;
161 if (!bios->size)
162 goto out;
163
158 bios->data = kmalloc(bios->size, GFP_KERNEL); 164 bios->data = kmalloc(bios->size, GFP_KERNEL);
159 if (bios->data) { 165 if (bios->data) {
160 for (i = 0; i < bios->size; i++) 166 for (i = 0; i < bios->size; i++)
@@ -185,23 +191,30 @@ static void
185nouveau_bios_shadow_acpi(struct nouveau_bios *bios) 191nouveau_bios_shadow_acpi(struct nouveau_bios *bios)
186{ 192{
187 struct pci_dev *pdev = nv_device(bios)->pdev; 193 struct pci_dev *pdev = nv_device(bios)->pdev;
188 int cnt = 65536 / 4096; 194 int ret, cnt, i;
189 int ret;
190 195
191 if (!nouveau_acpi_rom_supported(pdev)) 196 if (!nouveau_acpi_rom_supported(pdev)) {
197 bios->data = NULL;
192 return; 198 return;
199 }
193 200
194 bios->data = kmalloc(65536, GFP_KERNEL);
195 bios->size = 0; 201 bios->size = 0;
196 if (!bios->data) 202 bios->data = kmalloc(4096, GFP_KERNEL);
197 return; 203 if (bios->data) {
204 if (nouveau_acpi_get_bios_chunk(bios->data, 0, 4096) == 4096)
205 bios->size = bios->data[2] * 512;
206 kfree(bios->data);
207 }
198 208
199 while (cnt--) { 209 if (!bios->size)
200 ret = nouveau_acpi_get_bios_chunk(bios->data, bios->size, 4096); 210 return;
201 if (ret != 4096)
202 return;
203 211
204 bios->size += 4096; 212 bios->data = kmalloc(bios->size, GFP_KERNEL);
213 for (i = 0; bios->data && i < bios->size; i += cnt) {
214 cnt = min((bios->size - i), (u32)4096);
215 ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
216 if (ret != cnt)
217 break;
205 } 218 }
206} 219}
207 220
@@ -230,12 +243,14 @@ nouveau_bios_shadow_pci(struct nouveau_bios *bios)
230static int 243static int
231nouveau_bios_score(struct nouveau_bios *bios, const bool writeable) 244nouveau_bios_score(struct nouveau_bios *bios, const bool writeable)
232{ 245{
233 if (!bios->data || bios->data[0] != 0x55 || bios->data[1] != 0xAA) { 246 if (bios->size < 3 || !bios->data || bios->data[0] != 0x55 ||
247 bios->data[1] != 0xAA) {
234 nv_info(bios, "... signature not found\n"); 248 nv_info(bios, "... signature not found\n");
235 return 0; 249 return 0;
236 } 250 }
237 251
238 if (nvbios_checksum(bios->data, bios->data[2] * 512)) { 252 if (nvbios_checksum(bios->data,
253 min_t(u32, bios->data[2] * 512, bios->size))) {
239 nv_info(bios, "... checksum invalid\n"); 254 nv_info(bios, "... checksum invalid\n");
240 /* if a ro image is somewhat bad, it's probably all rubbish */ 255 /* if a ro image is somewhat bad, it's probably all rubbish */
241 return writeable ? 2 : 1; 256 return writeable ? 2 : 1;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
index 9ed6e728a94c..7d750382a833 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -43,7 +43,7 @@ dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
43 *ver = nv_ro08(bios, dcb); 43 *ver = nv_ro08(bios, dcb);
44 44
45 if (*ver >= 0x41) { 45 if (*ver >= 0x41) {
46 nv_warn(bios, "DCB *ver 0x%02x unknown\n", *ver); 46 nv_warn(bios, "DCB version 0x%02x unknown\n", *ver);
47 return 0x0000; 47 return 0x0000;
48 } else 48 } else
49 if (*ver >= 0x30) { 49 if (*ver >= 0x30) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
index 5e5f4cddae3c..f835501203e5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
@@ -157,11 +157,10 @@ pll_map_reg(struct nouveau_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
157 while (map->reg) { 157 while (map->reg) {
158 if (map->reg == reg && *ver >= 0x20) { 158 if (map->reg == reg && *ver >= 0x20) {
159 u16 addr = (data += hdr); 159 u16 addr = (data += hdr);
160 *type = map->type;
160 while (cnt--) { 161 while (cnt--) {
161 if (nv_ro32(bios, data) == map->reg) { 162 if (nv_ro32(bios, data) == map->reg)
162 *type = map->type;
163 return data; 163 return data;
164 }
165 data += *len; 164 data += *len;
166 } 165 }
167 return addr; 166 return addr;
@@ -200,11 +199,10 @@ pll_map_type(struct nouveau_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
200 while (map->reg) { 199 while (map->reg) {
201 if (map->type == type && *ver >= 0x20) { 200 if (map->type == type && *ver >= 0x20) {
202 u16 addr = (data += hdr); 201 u16 addr = (data += hdr);
202 *reg = map->reg;
203 while (cnt--) { 203 while (cnt--) {
204 if (nv_ro32(bios, data) == map->reg) { 204 if (nv_ro32(bios, data) == map->reg)
205 *reg = map->reg;
206 return data; 205 return data;
207 }
208 data += *len; 206 data += *len;
209 } 207 }
210 return addr; 208 return addr;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
index fd181fbceddb..f4147f67eda6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
@@ -90,6 +90,7 @@ nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
90 return ret; 90 return ret;
91 91
92 priv->base.pll_set = nv50_clock_pll_set; 92 priv->base.pll_set = nv50_clock_pll_set;
93 priv->base.pll_calc = nv04_clock_pll_calc;
93 return 0; 94 return 0;
94} 95}
95 96
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index 436e9efe7ef5..5f570806143a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -219,13 +219,11 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
219 ((priv->base.ram.size & 0x000000ff) << 32); 219 ((priv->base.ram.size & 0x000000ff) << 32);
220 220
221 tags = nv_rd32(priv, 0x100320); 221 tags = nv_rd32(priv, 0x100320);
222 if (tags) { 222 ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1);
223 ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1); 223 if (ret)
224 if (ret) 224 return ret;
225 return ret;
226 225
227 nv_debug(priv, "%d compression tags\n", tags); 226 nv_debug(priv, "%d compression tags\n", tags);
228 }
229 227
230 size = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail; 228 size = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
231 switch (device->chipset) { 229 switch (device->chipset) {
@@ -237,6 +235,7 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
237 return ret; 235 return ret;
238 236
239 priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12; 237 priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12;
238 priv->base.ram.type = NV_MEM_TYPE_STOLEN;
240 break; 239 break;
241 default: 240 default:
242 ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size, 241 ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size,
@@ -277,7 +276,6 @@ nv50_fb_dtor(struct nouveau_object *object)
277 __free_page(priv->r100c08_page); 276 __free_page(priv->r100c08_page);
278 } 277 }
279 278
280 nouveau_mm_fini(&priv->base.vram);
281 nouveau_fb_destroy(&priv->base); 279 nouveau_fb_destroy(&priv->base);
282} 280}
283 281
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 3d2c88310f98..dbfc2abf0cfe 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -292,7 +292,7 @@ nouveau_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
292 case DCB_I2C_NVIO_BIT: 292 case DCB_I2C_NVIO_BIT:
293 port->drive = info.drive & 0x0f; 293 port->drive = info.drive & 0x0f;
294 if (device->card_type < NV_D0) { 294 if (device->card_type < NV_D0) {
295 if (info.drive >= ARRAY_SIZE(nv50_i2c_port)) 295 if (port->drive >= ARRAY_SIZE(nv50_i2c_port))
296 break; 296 break;
297 port->drive = nv50_i2c_port[port->drive]; 297 port->drive = nv50_i2c_port[port->drive];
298 port->sense = port->drive; 298 port->sense = port->drive;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
index b29237970fa0..523178685180 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -134,7 +134,7 @@ nouveau_therm_fan_sense(struct nouveau_therm *therm)
134 end = ptimer->read(ptimer); 134 end = ptimer->read(ptimer);
135 135
136 if (cycles == 5) { 136 if (cycles == 5) {
137 tach = (u64)60000000000; 137 tach = (u64)60000000000ULL;
138 do_div(tach, (end - start)); 138 do_div(tach, (end - start));
139 return tach; 139 return tach;
140 } else 140 } else
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
index f87a7a3eb4e7..9360ddd469e7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
@@ -92,7 +92,7 @@ nv50_fan_pwm_clock(struct nouveau_therm *therm)
92 if (nv_rd32(therm, 0xc040) & 0x800000) { 92 if (nv_rd32(therm, 0xc040) & 0x800000) {
93 /* Use the HOST clock (100 MHz) 93 /* Use the HOST clock (100 MHz)
94 * Where does this constant(2.4) comes from? */ 94 * Where does this constant(2.4) comes from? */
95 pwm_clock = (100000000 >> pwm_div) / 10 / 24; 95 pwm_clock = (100000000 >> pwm_div) * 10 / 24;
96 } else { 96 } else {
97 /* Where does this constant(20) comes from? */ 97 /* Where does this constant(20) comes from? */
98 pwm_clock = (crystal * 1000) >> pwm_div; 98 pwm_clock = (crystal * 1000) >> pwm_div;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
index 49976be4d73b..c26ca9bef671 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -85,7 +85,7 @@ nv04_timer_alarm_trigger(struct nouveau_timer *ptimer)
85} 85}
86 86
87static void 87static void
88nv04_timer_alarm(struct nouveau_timer *ptimer, u32 time, 88nv04_timer_alarm(struct nouveau_timer *ptimer, u64 time,
89 struct nouveau_alarm *alarm) 89 struct nouveau_alarm *alarm)
90{ 90{
91 struct nv04_timer_priv *priv = (void *)ptimer; 91 struct nv04_timer_priv *priv = (void *)ptimer;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
index 0203e1e12caa..49050d991e75 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
@@ -92,7 +92,8 @@ nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
92 struct nv04_vmmgr_priv *priv; 92 struct nv04_vmmgr_priv *priv;
93 int ret; 93 int ret;
94 94
95 if (!nouveau_boolopt(device->cfgopt, "NvPCIE", true)) { 95 if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
96 !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
96 return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass, 97 return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass,
97 data, size, pobject); 98 data, size, pobject);
98 } 99 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
index 0ac18d05a146..aa8131436e3d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
@@ -163,7 +163,8 @@ nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
163 struct nv04_vmmgr_priv *priv; 163 struct nv04_vmmgr_priv *priv;
164 int ret; 164 int ret;
165 165
166 if (!nouveau_boolopt(device->cfgopt, "NvPCIE", true)) { 166 if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
167 !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
167 return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass, 168 return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass,
168 data, size, pobject); 169 data, size, pobject);
169 } 170 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 259e5f1adf47..35ac57f0aab6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -456,6 +456,7 @@ static struct ttm_tt *
456nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, 456nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
457 uint32_t page_flags, struct page *dummy_read) 457 uint32_t page_flags, struct page *dummy_read)
458{ 458{
459#if __OS_HAS_AGP
459 struct nouveau_drm *drm = nouveau_bdev(bdev); 460 struct nouveau_drm *drm = nouveau_bdev(bdev);
460 struct drm_device *dev = drm->dev; 461 struct drm_device *dev = drm->dev;
461 462
@@ -463,6 +464,7 @@ nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
463 return ttm_agp_tt_create(bdev, dev->agp->bridge, size, 464 return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
464 page_flags, dummy_read); 465 page_flags, dummy_read);
465 } 466 }
467#endif
466 468
467 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read); 469 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
468} 470}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 8f98e5a8c488..86124b131f4f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -290,6 +290,7 @@ nouveau_display_create(struct drm_device *dev)
290 struct nouveau_drm *drm = nouveau_drm(dev); 290 struct nouveau_drm *drm = nouveau_drm(dev);
291 struct nouveau_disp *pdisp = nouveau_disp(drm->device); 291 struct nouveau_disp *pdisp = nouveau_disp(drm->device);
292 struct nouveau_display *disp; 292 struct nouveau_display *disp;
293 u32 pclass = dev->pdev->class >> 8;
293 int ret, gen; 294 int ret, gen;
294 295
295 disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL); 296 disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
@@ -360,23 +361,27 @@ nouveau_display_create(struct drm_device *dev)
360 drm_kms_helper_poll_init(dev); 361 drm_kms_helper_poll_init(dev);
361 drm_kms_helper_poll_disable(dev); 362 drm_kms_helper_poll_disable(dev);
362 363
363 if (nv_device(drm->device)->card_type < NV_50) 364 if (nouveau_modeset == 1 ||
364 ret = nv04_display_create(dev); 365 (nouveau_modeset < 0 && pclass == PCI_CLASS_DISPLAY_VGA)) {
365 else 366 if (nv_device(drm->device)->card_type < NV_50)
366 if (nv_device(drm->device)->card_type < NV_D0) 367 ret = nv04_display_create(dev);
367 ret = nv50_display_create(dev); 368 else
368 else 369 if (nv_device(drm->device)->card_type < NV_D0)
369 ret = nvd0_display_create(dev); 370 ret = nv50_display_create(dev);
370 if (ret) 371 else
371 goto disp_create_err; 372 ret = nvd0_display_create(dev);
372
373 if (dev->mode_config.num_crtc) {
374 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
375 if (ret) 373 if (ret)
376 goto vblank_err; 374 goto disp_create_err;
375
376 if (dev->mode_config.num_crtc) {
377 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
378 if (ret)
379 goto vblank_err;
380 }
381
382 nouveau_backlight_init(dev);
377 } 383 }
378 384
379 nouveau_backlight_init(dev);
380 return 0; 385 return 0;
381 386
382vblank_err: 387vblank_err:
@@ -395,7 +400,8 @@ nouveau_display_destroy(struct drm_device *dev)
395 nouveau_backlight_exit(dev); 400 nouveau_backlight_exit(dev);
396 drm_vblank_cleanup(dev); 401 drm_vblank_cleanup(dev);
397 402
398 disp->dtor(dev); 403 if (disp->dtor)
404 disp->dtor(dev);
399 405
400 drm_kms_helper_poll_fini(dev); 406 drm_kms_helper_poll_fini(dev);
401 drm_mode_config_cleanup(dev); 407 drm_mode_config_cleanup(dev);
@@ -530,9 +536,11 @@ nouveau_page_flip_reserve(struct nouveau_bo *old_bo,
530 if (ret) 536 if (ret)
531 goto fail; 537 goto fail;
532 538
533 ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0); 539 if (likely(old_bo != new_bo)) {
534 if (ret) 540 ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
535 goto fail_unreserve; 541 if (ret)
542 goto fail_unreserve;
543 }
536 544
537 return 0; 545 return 0;
538 546
@@ -551,8 +559,10 @@ nouveau_page_flip_unreserve(struct nouveau_bo *old_bo,
551 nouveau_bo_fence(new_bo, fence); 559 nouveau_bo_fence(new_bo, fence);
552 ttm_bo_unreserve(&new_bo->bo); 560 ttm_bo_unreserve(&new_bo->bo);
553 561
554 nouveau_bo_fence(old_bo, fence); 562 if (likely(old_bo != new_bo)) {
555 ttm_bo_unreserve(&old_bo->bo); 563 nouveau_bo_fence(old_bo, fence);
564 ttm_bo_unreserve(&old_bo->bo);
565 }
556 566
557 nouveau_bo_unpin(old_bo); 567 nouveau_bo_unpin(old_bo);
558} 568}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index ccae8c26ae2b..0910125cbbc3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -63,8 +63,9 @@ MODULE_PARM_DESC(noaccel, "disable kernel/abi16 acceleration");
63static int nouveau_noaccel = 0; 63static int nouveau_noaccel = 0;
64module_param_named(noaccel, nouveau_noaccel, int, 0400); 64module_param_named(noaccel, nouveau_noaccel, int, 0400);
65 65
66MODULE_PARM_DESC(modeset, "enable driver"); 66MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
67static int nouveau_modeset = -1; 67 "0 = disabled, 1 = enabled, 2 = headless)");
68int nouveau_modeset = -1;
68module_param_named(modeset, nouveau_modeset, int, 0400); 69module_param_named(modeset, nouveau_modeset, int, 0400);
69 70
70static struct drm_driver driver; 71static struct drm_driver driver;
@@ -363,7 +364,8 @@ nouveau_drm_unload(struct drm_device *dev)
363 364
364 nouveau_pm_fini(dev); 365 nouveau_pm_fini(dev);
365 366
366 nouveau_display_fini(dev); 367 if (dev->mode_config.num_crtc)
368 nouveau_display_fini(dev);
367 nouveau_display_destroy(dev); 369 nouveau_display_destroy(dev);
368 370
369 nouveau_irq_fini(dev); 371 nouveau_irq_fini(dev);
@@ -403,13 +405,15 @@ nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
403 pm_state.event == PM_EVENT_PRETHAW) 405 pm_state.event == PM_EVENT_PRETHAW)
404 return 0; 406 return 0;
405 407
406 NV_INFO(drm, "suspending fbcon...\n"); 408 if (dev->mode_config.num_crtc) {
407 nouveau_fbcon_set_suspend(dev, 1); 409 NV_INFO(drm, "suspending fbcon...\n");
410 nouveau_fbcon_set_suspend(dev, 1);
408 411
409 NV_INFO(drm, "suspending display...\n"); 412 NV_INFO(drm, "suspending display...\n");
410 ret = nouveau_display_suspend(dev); 413 ret = nouveau_display_suspend(dev);
411 if (ret) 414 if (ret)
412 return ret; 415 return ret;
416 }
413 417
414 NV_INFO(drm, "evicting buffers...\n"); 418 NV_INFO(drm, "evicting buffers...\n");
415 ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM); 419 ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
@@ -445,8 +449,10 @@ fail_client:
445 nouveau_client_init(&cli->base); 449 nouveau_client_init(&cli->base);
446 } 450 }
447 451
448 NV_INFO(drm, "resuming display...\n"); 452 if (dev->mode_config.num_crtc) {
449 nouveau_display_resume(dev); 453 NV_INFO(drm, "resuming display...\n");
454 nouveau_display_resume(dev);
455 }
450 return ret; 456 return ret;
451} 457}
452 458
@@ -486,8 +492,10 @@ nouveau_drm_resume(struct pci_dev *pdev)
486 nouveau_irq_postinstall(dev); 492 nouveau_irq_postinstall(dev);
487 nouveau_pm_resume(dev); 493 nouveau_pm_resume(dev);
488 494
489 NV_INFO(drm, "resuming display...\n"); 495 if (dev->mode_config.num_crtc) {
490 nouveau_display_resume(dev); 496 NV_INFO(drm, "resuming display...\n");
497 nouveau_display_resume(dev);
498 }
491 return 0; 499 return 0;
492} 500}
493 501
@@ -662,9 +670,7 @@ nouveau_drm_init(void)
662#ifdef CONFIG_VGA_CONSOLE 670#ifdef CONFIG_VGA_CONSOLE
663 if (vgacon_text_force()) 671 if (vgacon_text_force())
664 nouveau_modeset = 0; 672 nouveau_modeset = 0;
665 else
666#endif 673#endif
667 nouveau_modeset = 1;
668 } 674 }
669 675
670 if (!nouveau_modeset) 676 if (!nouveau_modeset)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 819471217546..a10169927086 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -141,4 +141,6 @@ int nouveau_drm_resume(struct pci_dev *);
141 nv_info((cli), fmt, ##args); \ 141 nv_info((cli), fmt, ##args); \
142} while (0) 142} while (0)
143 143
144extern int nouveau_modeset;
145
144#endif 146#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 9ca8afdb5549..1d8cb506a28a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -61,13 +61,15 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
61 61
62 nv_subdev(pmc)->intr(nv_subdev(pmc)); 62 nv_subdev(pmc)->intr(nv_subdev(pmc));
63 63
64 if (device->card_type >= NV_D0) { 64 if (dev->mode_config.num_crtc) {
65 if (nv_rd32(device, 0x000100) & 0x04000000) 65 if (device->card_type >= NV_D0) {
66 nvd0_display_intr(dev); 66 if (nv_rd32(device, 0x000100) & 0x04000000)
67 } else 67 nvd0_display_intr(dev);
68 if (device->card_type >= NV_50) { 68 } else
69 if (nv_rd32(device, 0x000100) & 0x04000000) 69 if (device->card_type >= NV_50) {
70 nv50_display_intr(dev); 70 if (nv_rd32(device, 0x000100) & 0x04000000)
71 nv50_display_intr(dev);
72 }
71 } 73 }
72 74
73 return IRQ_HANDLED; 75 return IRQ_HANDLED;
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 0bf64c90aa20..5566172774df 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -52,7 +52,7 @@ nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
52{ 52{
53 struct nouveau_drm *drm = nouveau_drm(dev); 53 struct nouveau_drm *drm = nouveau_drm(dev);
54 struct nouveau_pm *pm = nouveau_pm(dev); 54 struct nouveau_pm *pm = nouveau_pm(dev);
55 struct nouveau_therm *therm = nouveau_therm(drm); 55 struct nouveau_therm *therm = nouveau_therm(drm->device);
56 int ret; 56 int ret;
57 57
58 /*XXX: not on all boards, we should control based on temperature 58 /*XXX: not on all boards, we should control based on temperature
@@ -64,7 +64,6 @@ nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
64 ret = therm->fan_set(therm, perflvl->fanspeed); 64 ret = therm->fan_set(therm, perflvl->fanspeed);
65 if (ret && ret != -ENODEV) { 65 if (ret && ret != -ENODEV) {
66 NV_ERROR(drm, "fanspeed set failed: %d\n", ret); 66 NV_ERROR(drm, "fanspeed set failed: %d\n", ret);
67 return ret;
68 } 67 }
69 } 68 }
70 69
@@ -706,8 +705,7 @@ nouveau_hwmon_init(struct drm_device *dev)
706 struct device *hwmon_dev; 705 struct device *hwmon_dev;
707 int ret = 0; 706 int ret = 0;
708 707
709 if (!therm || !therm->temp_get || !therm->attr_get || 708 if (!therm || !therm->temp_get || !therm->attr_get || !therm->attr_set)
710 !therm->attr_set || therm->temp_get(therm) < 0)
711 return -ENODEV; 709 return -ENODEV;
712 710
713 hwmon_dev = hwmon_device_register(&dev->pdev->dev); 711 hwmon_dev = hwmon_device_register(&dev->pdev->dev);
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index 347a3bd78d04..64f7020fb605 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -220,7 +220,7 @@ out:
220 NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode); 220 NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
221 221
222 if (blue == 0x18) { 222 if (blue == 0x18) {
223 NV_INFO(drm, "Load detected on head A\n"); 223 NV_DEBUG(drm, "Load detected on head A\n");
224 return connector_status_connected; 224 return connector_status_connected;
225 } 225 }
226 226
@@ -338,8 +338,8 @@ nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
338 338
339 if (nv17_dac_sample_load(encoder) & 339 if (nv17_dac_sample_load(encoder) &
340 NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) { 340 NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
341 NV_INFO(drm, "Load detected on output %c\n", 341 NV_DEBUG(drm, "Load detected on output %c\n",
342 '@' + ffs(dcb->or)); 342 '@' + ffs(dcb->or));
343 return connector_status_connected; 343 return connector_status_connected;
344 } else { 344 } else {
345 return connector_status_disconnected; 345 return connector_status_disconnected;
@@ -413,9 +413,9 @@ static void nv04_dac_commit(struct drm_encoder *encoder)
413 413
414 helper->dpms(encoder, DRM_MODE_DPMS_ON); 414 helper->dpms(encoder, DRM_MODE_DPMS_ON);
415 415
416 NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n", 416 NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
417 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), 417 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
418 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); 418 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
419} 419}
420 420
421void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable) 421void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable)
@@ -461,8 +461,8 @@ static void nv04_dac_dpms(struct drm_encoder *encoder, int mode)
461 return; 461 return;
462 nv_encoder->last_dpms = mode; 462 nv_encoder->last_dpms = mode;
463 463
464 NV_INFO(drm, "Setting dpms mode %d on vga encoder (output %d)\n", 464 NV_DEBUG(drm, "Setting dpms mode %d on vga encoder (output %d)\n",
465 mode, nv_encoder->dcb->index); 465 mode, nv_encoder->dcb->index);
466 466
467 nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON); 467 nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
468} 468}
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index da55d7642c8c..184cdf806761 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -476,9 +476,9 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
476 476
477 helper->dpms(encoder, DRM_MODE_DPMS_ON); 477 helper->dpms(encoder, DRM_MODE_DPMS_ON);
478 478
479 NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n", 479 NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
480 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), 480 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
481 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); 481 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
482} 482}
483 483
484static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode) 484static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
@@ -520,8 +520,8 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
520 return; 520 return;
521 nv_encoder->last_dpms = mode; 521 nv_encoder->last_dpms = mode;
522 522
523 NV_INFO(drm, "Setting dpms mode %d on lvds encoder (output %d)\n", 523 NV_DEBUG(drm, "Setting dpms mode %d on lvds encoder (output %d)\n",
524 mode, nv_encoder->dcb->index); 524 mode, nv_encoder->dcb->index);
525 525
526 if (was_powersaving && is_powersaving_dpms(mode)) 526 if (was_powersaving && is_powersaving_dpms(mode))
527 return; 527 return;
@@ -565,8 +565,8 @@ static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
565 return; 565 return;
566 nv_encoder->last_dpms = mode; 566 nv_encoder->last_dpms = mode;
567 567
568 NV_INFO(drm, "Setting dpms mode %d on tmds encoder (output %d)\n", 568 NV_DEBUG(drm, "Setting dpms mode %d on tmds encoder (output %d)\n",
569 mode, nv_encoder->dcb->index); 569 mode, nv_encoder->dcb->index);
570 570
571 nv04_dfp_update_backlight(encoder, mode); 571 nv04_dfp_update_backlight(encoder, mode);
572 nv04_dfp_update_fp_control(encoder, mode); 572 nv04_dfp_update_fp_control(encoder, mode);
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
index 099fbeda6e2e..62e826a139b3 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -75,8 +75,8 @@ static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
75 struct nv04_mode_state *state = &nv04_display(dev)->mode_reg; 75 struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
76 uint8_t crtc1A; 76 uint8_t crtc1A;
77 77
78 NV_INFO(drm, "Setting dpms mode %d on TV encoder (output %d)\n", 78 NV_DEBUG(drm, "Setting dpms mode %d on TV encoder (output %d)\n",
79 mode, nv_encoder->dcb->index); 79 mode, nv_encoder->dcb->index);
80 80
81 state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK); 81 state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK);
82 82
@@ -167,9 +167,8 @@ static void nv04_tv_commit(struct drm_encoder *encoder)
167 167
168 helper->dpms(encoder, DRM_MODE_DPMS_ON); 168 helper->dpms(encoder, DRM_MODE_DPMS_ON);
169 169
170 NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n", 170 NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
171 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, 171 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
172 '@' + ffs(nv_encoder->dcb->or));
173} 172}
174 173
175static void nv04_tv_destroy(struct drm_encoder *encoder) 174static void nv04_tv_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 96184d02c8d9..2e566e123e9e 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1690,10 +1690,10 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1690 } 1690 }
1691 /* all other cases */ 1691 /* all other cases */
1692 pll_in_use = radeon_get_pll_use_mask(crtc); 1692 pll_in_use = radeon_get_pll_use_mask(crtc);
1693 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1694 return ATOM_PPLL2;
1695 if (!(pll_in_use & (1 << ATOM_PPLL1))) 1693 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1696 return ATOM_PPLL1; 1694 return ATOM_PPLL1;
1695 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1696 return ATOM_PPLL2;
1697 DRM_ERROR("unable to allocate a PPLL\n"); 1697 DRM_ERROR("unable to allocate a PPLL\n");
1698 return ATOM_PPLL_INVALID; 1698 return ATOM_PPLL_INVALID;
1699 } else { 1699 } else {
@@ -1715,10 +1715,10 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1715 } 1715 }
1716 /* all other cases */ 1716 /* all other cases */
1717 pll_in_use = radeon_get_pll_use_mask(crtc); 1717 pll_in_use = radeon_get_pll_use_mask(crtc);
1718 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1719 return ATOM_PPLL2;
1720 if (!(pll_in_use & (1 << ATOM_PPLL1))) 1718 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1721 return ATOM_PPLL1; 1719 return ATOM_PPLL1;
1720 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1721 return ATOM_PPLL2;
1722 DRM_ERROR("unable to allocate a PPLL\n"); 1722 DRM_ERROR("unable to allocate a PPLL\n");
1723 return ATOM_PPLL_INVALID; 1723 return ATOM_PPLL_INVALID;
1724 } else { 1724 } else {
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 49cbb3795a10..ba498f8e47a2 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -184,6 +184,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
184 struct radeon_backlight_privdata *pdata; 184 struct radeon_backlight_privdata *pdata;
185 struct radeon_encoder_atom_dig *dig; 185 struct radeon_encoder_atom_dig *dig;
186 u8 backlight_level; 186 u8 backlight_level;
187 char bl_name[16];
187 188
188 if (!radeon_encoder->enc_priv) 189 if (!radeon_encoder->enc_priv)
189 return; 190 return;
@@ -203,7 +204,9 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
203 memset(&props, 0, sizeof(props)); 204 memset(&props, 0, sizeof(props));
204 props.max_brightness = RADEON_MAX_BL_LEVEL; 205 props.max_brightness = RADEON_MAX_BL_LEVEL;
205 props.type = BACKLIGHT_RAW; 206 props.type = BACKLIGHT_RAW;
206 bd = backlight_device_register("radeon_bl", &drm_connector->kdev, 207 snprintf(bl_name, sizeof(bl_name),
208 "radeon_bl%d", dev->primary->index);
209 bd = backlight_device_register(bl_name, &drm_connector->kdev,
207 pdata, &radeon_atom_backlight_ops, &props); 210 pdata, &radeon_atom_backlight_ops, &props);
208 if (IS_ERR(bd)) { 211 if (IS_ERR(bd)) {
209 DRM_ERROR("Backlight registration failed\n"); 212 DRM_ERROR("Backlight registration failed\n");
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index a1f49c5fd74b..14313ad43b76 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3431,9 +3431,14 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
3431 if (!(mask & DRM_PCIE_SPEED_50)) 3431 if (!(mask & DRM_PCIE_SPEED_50))
3432 return; 3432 return;
3433 3433
3434 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3435 if (speed_cntl & LC_CURRENT_DATA_RATE) {
3436 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
3437 return;
3438 }
3439
3434 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); 3440 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
3435 3441
3436 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3437 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) || 3442 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
3438 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { 3443 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3439 3444
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 573ed1bc6cf7..95e6318b6268 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -264,7 +264,7 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
264 /* macro tile width & height */ 264 /* macro tile width & height */
265 palign = (8 * surf->bankw * track->npipes) * surf->mtilea; 265 palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
266 halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea; 266 halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
267 mtileb = (palign / 8) * (halign / 8) * tileb;; 267 mtileb = (palign / 8) * (halign / 8) * tileb;
268 mtile_pr = surf->nbx / palign; 268 mtile_pr = surf->nbx / palign;
269 mtile_ps = (mtile_pr * surf->nby) / halign; 269 mtile_ps = (mtile_pr * surf->nby) / halign;
270 surf->layer_size = mtile_ps * mtileb * slice_pt; 270 surf->layer_size = mtile_ps * mtileb * slice_pt;
@@ -2829,6 +2829,7 @@ static bool evergreen_vm_reg_valid(u32 reg)
2829 case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS: 2829 case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
2830 return true; 2830 return true;
2831 default: 2831 default:
2832 DRM_ERROR("Invalid register 0x%x in CS\n", reg);
2832 return false; 2833 return false;
2833 } 2834 }
2834} 2835}
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 8bcb554ea0c5..81e6a568c29d 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -770,9 +770,13 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
770 WREG32(0x15DC, 0); 770 WREG32(0x15DC, 0);
771 771
772 /* empty context1-7 */ 772 /* empty context1-7 */
773 /* Assign the pt base to something valid for now; the pts used for
774 * the VMs are determined by the application and setup and assigned
775 * on the fly in the vm part of radeon_gart.c
776 */
773 for (i = 1; i < 8; i++) { 777 for (i = 1; i < 8; i++) {
774 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); 778 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
775 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), 0); 779 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
776 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), 780 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
777 rdev->gart.table_addr >> 12); 781 rdev->gart.table_addr >> 12);
778 } 782 }
@@ -1534,26 +1538,31 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
1534{ 1538{
1535 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; 1539 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
1536 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); 1540 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
1537 int i;
1538 1541
1539 radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, 1 + count * 2)); 1542 while (count) {
1540 radeon_ring_write(ring, pe); 1543 unsigned ndw = 1 + count * 2;
1541 radeon_ring_write(ring, upper_32_bits(pe) & 0xff); 1544 if (ndw > 0x3FFF)
1542 for (i = 0; i < count; ++i) { 1545 ndw = 0x3FFF;
1543 uint64_t value = 0; 1546
1544 if (flags & RADEON_VM_PAGE_SYSTEM) { 1547 radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
1545 value = radeon_vm_map_gart(rdev, addr); 1548 radeon_ring_write(ring, pe);
1546 value &= 0xFFFFFFFFFFFFF000ULL; 1549 radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
1547 addr += incr; 1550 for (; ndw > 1; ndw -= 2, --count, pe += 8) {
1548 1551 uint64_t value = 0;
1549 } else if (flags & RADEON_VM_PAGE_VALID) { 1552 if (flags & RADEON_VM_PAGE_SYSTEM) {
1550 value = addr; 1553 value = radeon_vm_map_gart(rdev, addr);
1551 addr += incr; 1554 value &= 0xFFFFFFFFFFFFF000ULL;
1552 } 1555 addr += incr;
1556
1557 } else if (flags & RADEON_VM_PAGE_VALID) {
1558 value = addr;
1559 addr += incr;
1560 }
1553 1561
1554 value |= r600_flags; 1562 value |= r600_flags;
1555 radeon_ring_write(ring, value); 1563 radeon_ring_write(ring, value);
1556 radeon_ring_write(ring, upper_32_bits(value)); 1564 radeon_ring_write(ring, upper_32_bits(value));
1565 }
1557 } 1566 }
1558} 1567}
1559 1568
@@ -1572,12 +1581,6 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
1572 if (vm == NULL) 1581 if (vm == NULL)
1573 return; 1582 return;
1574 1583
1575 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0));
1576 radeon_ring_write(ring, 0);
1577
1578 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0));
1579 radeon_ring_write(ring, vm->last_pfn);
1580
1581 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0)); 1584 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
1582 radeon_ring_write(ring, vm->pd_gpu_addr >> 12); 1585 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
1583 1586
@@ -1588,4 +1591,8 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
1588 /* bits 0-7 are the VM contexts0-7 */ 1591 /* bits 0-7 are the VM contexts0-7 */
1589 radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); 1592 radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
1590 radeon_ring_write(ring, 1 << vm->id); 1593 radeon_ring_write(ring, 1 << vm->id);
1594
1595 /* sync PFP to ME, otherwise we might get invalid PFP reads */
1596 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
1597 radeon_ring_write(ring, 0x0);
1591} 1598}
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 2423d1b5d385..cbef6815907a 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -502,6 +502,7 @@
502#define PACKET3_MPEG_INDEX 0x3A 502#define PACKET3_MPEG_INDEX 0x3A
503#define PACKET3_WAIT_REG_MEM 0x3C 503#define PACKET3_WAIT_REG_MEM 0x3C
504#define PACKET3_MEM_WRITE 0x3D 504#define PACKET3_MEM_WRITE 0x3D
505#define PACKET3_PFP_SYNC_ME 0x42
505#define PACKET3_SURFACE_SYNC 0x43 506#define PACKET3_SURFACE_SYNC 0x43
506# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) 507# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
507# define PACKET3_CB1_DEST_BASE_ENA (1 << 7) 508# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 70c800ff6190..cda280d157da 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3703,6 +3703,12 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
3703 if (!(mask & DRM_PCIE_SPEED_50)) 3703 if (!(mask & DRM_PCIE_SPEED_50))
3704 return; 3704 return;
3705 3705
3706 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3707 if (speed_cntl & LC_CURRENT_DATA_RATE) {
3708 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
3709 return;
3710 }
3711
3706 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); 3712 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
3707 3713
3708 /* 55 nm r6xx asics */ 3714 /* 55 nm r6xx asics */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b04c06444d8b..8c42d54c2e26 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -663,9 +663,14 @@ struct radeon_vm {
663 struct list_head list; 663 struct list_head list;
664 struct list_head va; 664 struct list_head va;
665 unsigned id; 665 unsigned id;
666 unsigned last_pfn; 666
667 u64 pd_gpu_addr; 667 /* contains the page directory */
668 struct radeon_sa_bo *sa_bo; 668 struct radeon_sa_bo *page_directory;
669 uint64_t pd_gpu_addr;
670
671 /* array of page tables, one for each page directory entry */
672 struct radeon_sa_bo **page_tables;
673
669 struct mutex mutex; 674 struct mutex mutex;
670 /* last fence for cs using this vm */ 675 /* last fence for cs using this vm */
671 struct radeon_fence *fence; 676 struct radeon_fence *fence;
@@ -1843,9 +1848,10 @@ extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size
1843 */ 1848 */
1844int radeon_vm_manager_init(struct radeon_device *rdev); 1849int radeon_vm_manager_init(struct radeon_device *rdev);
1845void radeon_vm_manager_fini(struct radeon_device *rdev); 1850void radeon_vm_manager_fini(struct radeon_device *rdev);
1846int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); 1851void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
1847void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); 1852void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
1848int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm); 1853int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
1854void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
1849struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, 1855struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
1850 struct radeon_vm *vm, int ring); 1856 struct radeon_vm *vm, int ring);
1851void radeon_vm_fence(struct radeon_device *rdev, 1857void radeon_vm_fence(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index b0a5688c67f8..196d28d99570 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -201,7 +201,7 @@ static int radeon_atif_verify_interface(acpi_handle handle,
201 201
202 size = *(u16 *) info->buffer.pointer; 202 size = *(u16 *) info->buffer.pointer;
203 if (size < 12) { 203 if (size < 12) {
204 DRM_INFO("ATIF buffer is too small: %lu\n", size); 204 DRM_INFO("ATIF buffer is too small: %zu\n", size);
205 err = -EINVAL; 205 err = -EINVAL;
206 goto out; 206 goto out;
207 } 207 }
@@ -370,6 +370,7 @@ int radeon_atif_handler(struct radeon_device *rdev,
370 370
371 radeon_set_backlight_level(rdev, enc, req.backlight_level); 371 radeon_set_backlight_level(rdev, enc, req.backlight_level);
372 372
373#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
373 if (rdev->is_atom_bios) { 374 if (rdev->is_atom_bios) {
374 struct radeon_encoder_atom_dig *dig = enc->enc_priv; 375 struct radeon_encoder_atom_dig *dig = enc->enc_priv;
375 backlight_force_update(dig->bl_dev, 376 backlight_force_update(dig->bl_dev,
@@ -379,6 +380,7 @@ int radeon_atif_handler(struct radeon_device *rdev,
379 backlight_force_update(dig->bl_dev, 380 backlight_force_update(dig->bl_dev,
380 BACKLIGHT_UPDATE_HOTKEY); 381 BACKLIGHT_UPDATE_HOTKEY);
381 } 382 }
383#endif
382 } 384 }
383 } 385 }
384 /* TODO: check other events */ 386 /* TODO: check other events */
@@ -485,7 +487,7 @@ static int radeon_atcs_verify_interface(acpi_handle handle,
485 487
486 size = *(u16 *) info->buffer.pointer; 488 size = *(u16 *) info->buffer.pointer;
487 if (size < 8) { 489 if (size < 8) {
488 DRM_INFO("ATCS buffer is too small: %lu\n", size); 490 DRM_INFO("ATCS buffer is too small: %zu\n", size);
489 err = -EINVAL; 491 err = -EINVAL;
490 goto out; 492 goto out;
491 } 493 }
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 582e99449c12..15f5ded65e0c 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -87,7 +87,7 @@ static union acpi_object *radeon_atpx_call(acpi_handle handle, int function,
87 atpx_arg_elements[1].integer.value = 0; 87 atpx_arg_elements[1].integer.value = 0;
88 } 88 }
89 89
90 status = acpi_evaluate_object(handle, "ATPX", &atpx_arg, &buffer); 90 status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
91 91
92 /* Fail only if calling the method fails and ATPX is supported */ 92 /* Fail only if calling the method fails and ATPX is supported */
93 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 93 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
@@ -148,7 +148,7 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
148 148
149 size = *(u16 *) info->buffer.pointer; 149 size = *(u16 *) info->buffer.pointer;
150 if (size < 8) { 150 if (size < 8) {
151 printk("ATPX buffer is too small: %lu\n", size); 151 printk("ATPX buffer is too small: %zu\n", size);
152 err = -EINVAL; 152 err = -EINVAL;
153 goto out; 153 goto out;
154 } 154 }
@@ -352,9 +352,9 @@ static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
352} 352}
353 353
354/** 354/**
355 * radeon_atpx_switchto - switch to the requested GPU 355 * radeon_atpx_power_state - power down/up the requested GPU
356 * 356 *
357 * @id: GPU to switch to 357 * @id: GPU to power down/up
358 * @state: requested power state (0 = off, 1 = on) 358 * @state: requested power state (0 = off, 1 = on)
359 * 359 *
360 * Execute the necessary ATPX function to power down/up the discrete GPU 360 * Execute the necessary ATPX function to power down/up the discrete GPU
@@ -373,11 +373,11 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
373} 373}
374 374
375/** 375/**
376 * radeon_atpx_pci_probe_handle - look up the ATRM and ATPX handles 376 * radeon_atpx_pci_probe_handle - look up the ATPX handle
377 * 377 *
378 * @pdev: pci device 378 * @pdev: pci device
379 * 379 *
380 * Look up the ATPX and ATRM handles (all asics). 380 * Look up the ATPX handles (all asics).
381 * Returns true if the handles are found, false if not. 381 * Returns true if the handles are found, false if not.
382 */ 382 */
383static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) 383static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 67cfc1795ecd..b884c362a8c2 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -941,7 +941,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
941 struct drm_mode_object *obj; 941 struct drm_mode_object *obj;
942 int i; 942 int i;
943 enum drm_connector_status ret = connector_status_disconnected; 943 enum drm_connector_status ret = connector_status_disconnected;
944 bool dret = false; 944 bool dret = false, broken_edid = false;
945 945
946 if (!force && radeon_check_hpd_status_unchanged(connector)) 946 if (!force && radeon_check_hpd_status_unchanged(connector))
947 return connector->status; 947 return connector->status;
@@ -965,6 +965,9 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
965 ret = connector_status_disconnected; 965 ret = connector_status_disconnected;
966 DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector)); 966 DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector));
967 radeon_connector->ddc_bus = NULL; 967 radeon_connector->ddc_bus = NULL;
968 } else {
969 ret = connector_status_connected;
970 broken_edid = true; /* defer use_digital to later */
968 } 971 }
969 } else { 972 } else {
970 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); 973 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
@@ -1047,13 +1050,24 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1047 1050
1048 encoder_funcs = encoder->helper_private; 1051 encoder_funcs = encoder->helper_private;
1049 if (encoder_funcs->detect) { 1052 if (encoder_funcs->detect) {
1050 if (ret != connector_status_connected) { 1053 if (!broken_edid) {
1051 ret = encoder_funcs->detect(encoder, connector); 1054 if (ret != connector_status_connected) {
1052 if (ret == connector_status_connected) { 1055 /* deal with analog monitors without DDC */
1053 radeon_connector->use_digital = false; 1056 ret = encoder_funcs->detect(encoder, connector);
1057 if (ret == connector_status_connected) {
1058 radeon_connector->use_digital = false;
1059 }
1060 if (ret != connector_status_disconnected)
1061 radeon_connector->detected_by_load = true;
1054 } 1062 }
1055 if (ret != connector_status_disconnected) 1063 } else {
1056 radeon_connector->detected_by_load = true; 1064 enum drm_connector_status lret;
1065 /* assume digital unless load detected otherwise */
1066 radeon_connector->use_digital = true;
1067 lret = encoder_funcs->detect(encoder, connector);
1068 DRM_DEBUG_KMS("load_detect %x returned: %x\n",encoder->encoder_type,lret);
1069 if (lret == connector_status_connected)
1070 radeon_connector->use_digital = false;
1057 } 1071 }
1058 break; 1072 break;
1059 } 1073 }
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index cb7b7c062fef..41672cc563fb 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -478,6 +478,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
478 } 478 }
479 479
480out: 480out:
481 radeon_vm_add_to_lru(rdev, vm);
481 mutex_unlock(&vm->mutex); 482 mutex_unlock(&vm->mutex);
482 mutex_unlock(&rdev->vm_manager.lock); 483 mutex_unlock(&rdev->vm_manager.lock);
483 return r; 484 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 64a42647f08a..e2f5f888c374 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -355,6 +355,8 @@ int radeon_wb_init(struct radeon_device *rdev)
355 */ 355 */
356void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) 356void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
357{ 357{
358 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
359
358 mc->vram_start = base; 360 mc->vram_start = base;
359 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) { 361 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
360 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 362 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
@@ -368,8 +370,8 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
368 mc->mc_vram_size = mc->aper_size; 370 mc->mc_vram_size = mc->aper_size;
369 } 371 }
370 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 372 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
371 if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size) 373 if (limit && limit < mc->real_vram_size)
372 mc->real_vram_size = radeon_vram_limit; 374 mc->real_vram_size = limit;
373 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", 375 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
374 mc->mc_vram_size >> 20, mc->vram_start, 376 mc->mc_vram_size >> 20, mc->vram_start,
375 mc->vram_end, mc->real_vram_size >> 20); 377 mc->vram_end, mc->real_vram_size >> 20);
@@ -835,6 +837,19 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
835} 837}
836 838
837/** 839/**
840 * radeon_check_pot_argument - check that argument is a power of two
841 *
842 * @arg: value to check
843 *
844 * Validates that a certain argument is a power of two (all asics).
845 * Returns true if argument is valid.
846 */
847static bool radeon_check_pot_argument(int arg)
848{
849 return (arg & (arg - 1)) == 0;
850}
851
852/**
838 * radeon_check_arguments - validate module params 853 * radeon_check_arguments - validate module params
839 * 854 *
840 * @rdev: radeon_device pointer 855 * @rdev: radeon_device pointer
@@ -845,52 +860,25 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
845static void radeon_check_arguments(struct radeon_device *rdev) 860static void radeon_check_arguments(struct radeon_device *rdev)
846{ 861{
847 /* vramlimit must be a power of two */ 862 /* vramlimit must be a power of two */
848 switch (radeon_vram_limit) { 863 if (!radeon_check_pot_argument(radeon_vram_limit)) {
849 case 0:
850 case 4:
851 case 8:
852 case 16:
853 case 32:
854 case 64:
855 case 128:
856 case 256:
857 case 512:
858 case 1024:
859 case 2048:
860 case 4096:
861 break;
862 default:
863 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", 864 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
864 radeon_vram_limit); 865 radeon_vram_limit);
865 radeon_vram_limit = 0; 866 radeon_vram_limit = 0;
866 break;
867 } 867 }
868 radeon_vram_limit = radeon_vram_limit << 20; 868
869 /* gtt size must be power of two and greater or equal to 32M */ 869 /* gtt size must be power of two and greater or equal to 32M */
870 switch (radeon_gart_size) { 870 if (radeon_gart_size < 32) {
871 case 4:
872 case 8:
873 case 16:
874 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", 871 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
875 radeon_gart_size); 872 radeon_gart_size);
876 radeon_gart_size = 512; 873 radeon_gart_size = 512;
877 break; 874
878 case 32: 875 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
879 case 64:
880 case 128:
881 case 256:
882 case 512:
883 case 1024:
884 case 2048:
885 case 4096:
886 break;
887 default:
888 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 876 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
889 radeon_gart_size); 877 radeon_gart_size);
890 radeon_gart_size = 512; 878 radeon_gart_size = 512;
891 break;
892 } 879 }
893 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 880 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
881
894 /* AGP mode can only be -1, 1, 2, 4, 8 */ 882 /* AGP mode can only be -1, 1, 2, 4, 8 */
895 switch (radeon_agpmode) { 883 switch (radeon_agpmode) {
896 case -1: 884 case -1:
@@ -1018,6 +1006,10 @@ int radeon_device_init(struct radeon_device *rdev,
1018 return r; 1006 return r;
1019 /* initialize vm here */ 1007 /* initialize vm here */
1020 mutex_init(&rdev->vm_manager.lock); 1008 mutex_init(&rdev->vm_manager.lock);
1009 /* Adjust VM size here.
1010 * Currently set to 4GB ((1 << 20) 4k pages).
1011 * Max GPUVM size for cayman and SI is 40 bits.
1012 */
1021 rdev->vm_manager.max_pfn = 1 << 20; 1013 rdev->vm_manager.max_pfn = 1 << 20;
1022 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm); 1014 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
1023 1015
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index f0c06d196b75..4debd60e5aa6 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -355,14 +355,13 @@ int radeon_gart_init(struct radeon_device *rdev)
355 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", 355 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
356 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); 356 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
357 /* Allocate pages table */ 357 /* Allocate pages table */
358 rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages, 358 rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
359 GFP_KERNEL);
360 if (rdev->gart.pages == NULL) { 359 if (rdev->gart.pages == NULL) {
361 radeon_gart_fini(rdev); 360 radeon_gart_fini(rdev);
362 return -ENOMEM; 361 return -ENOMEM;
363 } 362 }
364 rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) * 363 rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
365 rdev->gart.num_cpu_pages, GFP_KERNEL); 364 rdev->gart.num_cpu_pages);
366 if (rdev->gart.pages_addr == NULL) { 365 if (rdev->gart.pages_addr == NULL) {
367 radeon_gart_fini(rdev); 366 radeon_gart_fini(rdev);
368 return -ENOMEM; 367 return -ENOMEM;
@@ -388,8 +387,8 @@ void radeon_gart_fini(struct radeon_device *rdev)
388 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); 387 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
389 } 388 }
390 rdev->gart.ready = false; 389 rdev->gart.ready = false;
391 kfree(rdev->gart.pages); 390 vfree(rdev->gart.pages);
392 kfree(rdev->gart.pages_addr); 391 vfree(rdev->gart.pages_addr);
393 rdev->gart.pages = NULL; 392 rdev->gart.pages = NULL;
394 rdev->gart.pages_addr = NULL; 393 rdev->gart.pages_addr = NULL;
395 394
@@ -423,6 +422,18 @@ void radeon_gart_fini(struct radeon_device *rdev)
423 */ 422 */
424 423
425/** 424/**
425 * radeon_vm_num_pde - return the number of page directory entries
426 *
427 * @rdev: radeon_device pointer
428 *
429 * Calculate the number of page directory entries (cayman+).
430 */
431static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
432{
433 return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
434}
435
436/**
426 * radeon_vm_directory_size - returns the size of the page directory in bytes 437 * radeon_vm_directory_size - returns the size of the page directory in bytes
427 * 438 *
428 * @rdev: radeon_device pointer 439 * @rdev: radeon_device pointer
@@ -431,7 +442,7 @@ void radeon_gart_fini(struct radeon_device *rdev)
431 */ 442 */
432static unsigned radeon_vm_directory_size(struct radeon_device *rdev) 443static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
433{ 444{
434 return (rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE) * 8; 445 return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
435} 446}
436 447
437/** 448/**
@@ -451,11 +462,11 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
451 462
452 if (!rdev->vm_manager.enabled) { 463 if (!rdev->vm_manager.enabled) {
453 /* allocate enough for 2 full VM pts */ 464 /* allocate enough for 2 full VM pts */
454 size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev)); 465 size = radeon_vm_directory_size(rdev);
455 size += RADEON_GPU_PAGE_ALIGN(rdev->vm_manager.max_pfn * 8); 466 size += rdev->vm_manager.max_pfn * 8;
456 size *= 2; 467 size *= 2;
457 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, 468 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
458 size, 469 RADEON_GPU_PAGE_ALIGN(size),
459 RADEON_GEM_DOMAIN_VRAM); 470 RADEON_GEM_DOMAIN_VRAM);
460 if (r) { 471 if (r) {
461 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", 472 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -476,7 +487,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
476 487
477 /* restore page table */ 488 /* restore page table */
478 list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) { 489 list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
479 if (vm->sa_bo == NULL) 490 if (vm->page_directory == NULL)
480 continue; 491 continue;
481 492
482 list_for_each_entry(bo_va, &vm->va, vm_list) { 493 list_for_each_entry(bo_va, &vm->va, vm_list) {
@@ -500,16 +511,25 @@ static void radeon_vm_free_pt(struct radeon_device *rdev,
500 struct radeon_vm *vm) 511 struct radeon_vm *vm)
501{ 512{
502 struct radeon_bo_va *bo_va; 513 struct radeon_bo_va *bo_va;
514 int i;
503 515
504 if (!vm->sa_bo) 516 if (!vm->page_directory)
505 return; 517 return;
506 518
507 list_del_init(&vm->list); 519 list_del_init(&vm->list);
508 radeon_sa_bo_free(rdev, &vm->sa_bo, vm->fence); 520 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
509 521
510 list_for_each_entry(bo_va, &vm->va, vm_list) { 522 list_for_each_entry(bo_va, &vm->va, vm_list) {
511 bo_va->valid = false; 523 bo_va->valid = false;
512 } 524 }
525
526 if (vm->page_tables == NULL)
527 return;
528
529 for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
530 radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence);
531
532 kfree(vm->page_tables);
513} 533}
514 534
515/** 535/**
@@ -546,63 +566,106 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
546} 566}
547 567
548/** 568/**
569 * radeon_vm_evict - evict page table to make room for new one
570 *
571 * @rdev: radeon_device pointer
572 * @vm: VM we want to allocate something for
573 *
574 * Evict a VM from the lru, making sure that it isn't @vm. (cayman+).
575 * Returns 0 for success, -ENOMEM for failure.
576 *
577 * Global and local mutex must be locked!
578 */
579static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
580{
581 struct radeon_vm *vm_evict;
582
583 if (list_empty(&rdev->vm_manager.lru_vm))
584 return -ENOMEM;
585
586 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm,
587 struct radeon_vm, list);
588 if (vm_evict == vm)
589 return -ENOMEM;
590
591 mutex_lock(&vm_evict->mutex);
592 radeon_vm_free_pt(rdev, vm_evict);
593 mutex_unlock(&vm_evict->mutex);
594 return 0;
595}
596
597/**
549 * radeon_vm_alloc_pt - allocates a page table for a VM 598 * radeon_vm_alloc_pt - allocates a page table for a VM
550 * 599 *
551 * @rdev: radeon_device pointer 600 * @rdev: radeon_device pointer
552 * @vm: vm to bind 601 * @vm: vm to bind
553 * 602 *
554 * Allocate a page table for the requested vm (cayman+). 603 * Allocate a page table for the requested vm (cayman+).
555 * Also starts to populate the page table.
556 * Returns 0 for success, error for failure. 604 * Returns 0 for success, error for failure.
557 * 605 *
558 * Global and local mutex must be locked! 606 * Global and local mutex must be locked!
559 */ 607 */
560int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) 608int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
561{ 609{
562 struct radeon_vm *vm_evict; 610 unsigned pd_size, pts_size;
563 int r;
564 u64 *pd_addr; 611 u64 *pd_addr;
565 int tables_size; 612 int r;
566 613
567 if (vm == NULL) { 614 if (vm == NULL) {
568 return -EINVAL; 615 return -EINVAL;
569 } 616 }
570 617
571 /* allocate enough to cover the current VM size */ 618 if (vm->page_directory != NULL) {
572 tables_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
573 tables_size += RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8);
574
575 if (vm->sa_bo != NULL) {
576 /* update lru */
577 list_del_init(&vm->list);
578 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
579 return 0; 619 return 0;
580 } 620 }
581 621
582retry: 622retry:
583 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo, 623 pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
584 tables_size, RADEON_GPU_PAGE_SIZE, false); 624 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
625 &vm->page_directory, pd_size,
626 RADEON_GPU_PAGE_SIZE, false);
585 if (r == -ENOMEM) { 627 if (r == -ENOMEM) {
586 if (list_empty(&rdev->vm_manager.lru_vm)) { 628 r = radeon_vm_evict(rdev, vm);
629 if (r)
587 return r; 630 return r;
588 }
589 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
590 mutex_lock(&vm_evict->mutex);
591 radeon_vm_free_pt(rdev, vm_evict);
592 mutex_unlock(&vm_evict->mutex);
593 goto retry; 631 goto retry;
594 632
595 } else if (r) { 633 } else if (r) {
596 return r; 634 return r;
597 } 635 }
598 636
599 pd_addr = radeon_sa_bo_cpu_addr(vm->sa_bo); 637 vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
600 vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo); 638
601 memset(pd_addr, 0, tables_size); 639 /* Initially clear the page directory */
640 pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory);
641 memset(pd_addr, 0, pd_size);
642
643 pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
644 vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
645
646 if (vm->page_tables == NULL) {
647 DRM_ERROR("Cannot allocate memory for page table array\n");
648 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
649 return -ENOMEM;
650 }
651
652 return 0;
653}
602 654
655/**
656 * radeon_vm_add_to_lru - add VMs page table to LRU list
657 *
658 * @rdev: radeon_device pointer
659 * @vm: vm to add to LRU
660 *
661 * Add the allocated page table to the LRU list (cayman+).
662 *
663 * Global mutex must be locked!
664 */
665void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm)
666{
667 list_del_init(&vm->list);
603 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); 668 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
604 return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
605 &rdev->ring_tmp_bo.bo->tbo.mem);
606} 669}
607 670
608/** 671/**
@@ -793,20 +856,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
793 } 856 }
794 857
795 mutex_lock(&vm->mutex); 858 mutex_lock(&vm->mutex);
796 if (last_pfn > vm->last_pfn) {
797 /* release mutex and lock in right order */
798 mutex_unlock(&vm->mutex);
799 mutex_lock(&rdev->vm_manager.lock);
800 mutex_lock(&vm->mutex);
801 /* and check again */
802 if (last_pfn > vm->last_pfn) {
803 /* grow va space 32M by 32M */
804 unsigned align = ((32 << 20) >> 12) - 1;
805 radeon_vm_free_pt(rdev, vm);
806 vm->last_pfn = (last_pfn + align) & ~align;
807 }
808 mutex_unlock(&rdev->vm_manager.lock);
809 }
810 head = &vm->va; 859 head = &vm->va;
811 last_offset = 0; 860 last_offset = 0;
812 list_for_each_entry(tmp, &vm->va, vm_list) { 861 list_for_each_entry(tmp, &vm->va, vm_list) {
@@ -865,6 +914,154 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
865} 914}
866 915
867/** 916/**
917 * radeon_vm_update_pdes - make sure that page directory is valid
918 *
919 * @rdev: radeon_device pointer
920 * @vm: requested vm
921 * @start: start of GPU address range
922 * @end: end of GPU address range
923 *
924 * Allocates new page tables if necessary
925 * and updates the page directory (cayman+).
926 * Returns 0 for success, error for failure.
927 *
928 * Global and local mutex must be locked!
929 */
930static int radeon_vm_update_pdes(struct radeon_device *rdev,
931 struct radeon_vm *vm,
932 uint64_t start, uint64_t end)
933{
934 static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
935
936 uint64_t last_pde = ~0, last_pt = ~0;
937 unsigned count = 0;
938 uint64_t pt_idx;
939 int r;
940
941 start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
942 end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
943
944 /* walk over the address space and update the page directory */
945 for (pt_idx = start; pt_idx <= end; ++pt_idx) {
946 uint64_t pde, pt;
947
948 if (vm->page_tables[pt_idx])
949 continue;
950
951retry:
952 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
953 &vm->page_tables[pt_idx],
954 RADEON_VM_PTE_COUNT * 8,
955 RADEON_GPU_PAGE_SIZE, false);
956
957 if (r == -ENOMEM) {
958 r = radeon_vm_evict(rdev, vm);
959 if (r)
960 return r;
961 goto retry;
962 } else if (r) {
963 return r;
964 }
965
966 pde = vm->pd_gpu_addr + pt_idx * 8;
967
968 pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
969
970 if (((last_pde + 8 * count) != pde) ||
971 ((last_pt + incr * count) != pt)) {
972
973 if (count) {
974 radeon_asic_vm_set_page(rdev, last_pde,
975 last_pt, count, incr,
976 RADEON_VM_PAGE_VALID);
977 }
978
979 count = 1;
980 last_pde = pde;
981 last_pt = pt;
982 } else {
983 ++count;
984 }
985 }
986
987 if (count) {
988 radeon_asic_vm_set_page(rdev, last_pde, last_pt, count,
989 incr, RADEON_VM_PAGE_VALID);
990
991 }
992
993 return 0;
994}
995
996/**
997 * radeon_vm_update_ptes - make sure that page tables are valid
998 *
999 * @rdev: radeon_device pointer
1000 * @vm: requested vm
1001 * @start: start of GPU address range
1002 * @end: end of GPU address range
1003 * @dst: destination address to map to
1004 * @flags: mapping flags
1005 *
1006 * Update the page tables in the range @start - @end (cayman+).
1007 *
1008 * Global and local mutex must be locked!
1009 */
1010static void radeon_vm_update_ptes(struct radeon_device *rdev,
1011 struct radeon_vm *vm,
1012 uint64_t start, uint64_t end,
1013 uint64_t dst, uint32_t flags)
1014{
1015 static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
1016
1017 uint64_t last_pte = ~0, last_dst = ~0;
1018 unsigned count = 0;
1019 uint64_t addr;
1020
1021 start = start / RADEON_GPU_PAGE_SIZE;
1022 end = end / RADEON_GPU_PAGE_SIZE;
1023
1024 /* walk over the address space and update the page tables */
1025 for (addr = start; addr < end; ) {
1026 uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
1027 unsigned nptes;
1028 uint64_t pte;
1029
1030 if ((addr & ~mask) == (end & ~mask))
1031 nptes = end - addr;
1032 else
1033 nptes = RADEON_VM_PTE_COUNT - (addr & mask);
1034
1035 pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
1036 pte += (addr & mask) * 8;
1037
1038 if ((last_pte + 8 * count) != pte) {
1039
1040 if (count) {
1041 radeon_asic_vm_set_page(rdev, last_pte,
1042 last_dst, count,
1043 RADEON_GPU_PAGE_SIZE,
1044 flags);
1045 }
1046
1047 count = nptes;
1048 last_pte = pte;
1049 last_dst = dst;
1050 } else {
1051 count += nptes;
1052 }
1053
1054 addr += nptes;
1055 dst += nptes * RADEON_GPU_PAGE_SIZE;
1056 }
1057
1058 if (count) {
1059 radeon_asic_vm_set_page(rdev, last_pte, last_dst, count,
1060 RADEON_GPU_PAGE_SIZE, flags);
1061 }
1062}
1063
1064/**
868 * radeon_vm_bo_update_pte - map a bo into the vm page table 1065 * radeon_vm_bo_update_pte - map a bo into the vm page table
869 * 1066 *
870 * @rdev: radeon_device pointer 1067 * @rdev: radeon_device pointer
@@ -887,12 +1084,11 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
887 struct radeon_semaphore *sem = NULL; 1084 struct radeon_semaphore *sem = NULL;
888 struct radeon_bo_va *bo_va; 1085 struct radeon_bo_va *bo_va;
889 unsigned nptes, npdes, ndw; 1086 unsigned nptes, npdes, ndw;
890 uint64_t pe, addr; 1087 uint64_t addr;
891 uint64_t pfn;
892 int r; 1088 int r;
893 1089
894 /* nothing to do if vm isn't bound */ 1090 /* nothing to do if vm isn't bound */
895 if (vm->sa_bo == NULL) 1091 if (vm->page_directory == NULL)
896 return 0; 1092 return 0;
897 1093
898 bo_va = radeon_vm_bo_find(vm, bo); 1094 bo_va = radeon_vm_bo_find(vm, bo);
@@ -939,25 +1135,29 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
939 } 1135 }
940 } 1136 }
941 1137
942 /* estimate number of dw needed */
943 /* reserve space for 32-bit padding */
944 ndw = 32;
945
946 nptes = radeon_bo_ngpu_pages(bo); 1138 nptes = radeon_bo_ngpu_pages(bo);
947 1139
948 pfn = (bo_va->soffset / RADEON_GPU_PAGE_SIZE); 1140 /* assume two extra pdes in case the mapping overlaps the borders */
1141 npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
949 1142
950 /* handle cases where a bo spans several pdes */ 1143 /* estimate number of dw needed */
951 npdes = (ALIGN(pfn + nptes, RADEON_VM_PTE_COUNT) - 1144 /* semaphore, fence and padding */
952 (pfn & ~(RADEON_VM_PTE_COUNT - 1))) >> RADEON_VM_BLOCK_SIZE; 1145 ndw = 32;
1146
1147 if (RADEON_VM_BLOCK_SIZE > 11)
1148 /* reserve space for one header for every 2k dwords */
1149 ndw += (nptes >> 11) * 4;
1150 else
1151 /* reserve space for one header for
1152 every (1 << BLOCK_SIZE) entries */
1153 ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
953 1154
954 /* reserve space for one header for every 2k dwords */
955 ndw += (nptes >> 11) * 3;
956 /* reserve space for pte addresses */ 1155 /* reserve space for pte addresses */
957 ndw += nptes * 2; 1156 ndw += nptes * 2;
958 1157
959 /* reserve space for one header for every 2k dwords */ 1158 /* reserve space for one header for every 2k dwords */
960 ndw += (npdes >> 11) * 3; 1159 ndw += (npdes >> 11) * 4;
1160
961 /* reserve space for pde addresses */ 1161 /* reserve space for pde addresses */
962 ndw += npdes * 2; 1162 ndw += npdes * 2;
963 1163
@@ -971,22 +1171,14 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
971 radeon_fence_note_sync(vm->fence, ridx); 1171 radeon_fence_note_sync(vm->fence, ridx);
972 } 1172 }
973 1173
974 /* update page table entries */ 1174 r = radeon_vm_update_pdes(rdev, vm, bo_va->soffset, bo_va->eoffset);
975 pe = vm->pd_gpu_addr; 1175 if (r) {
976 pe += radeon_vm_directory_size(rdev); 1176 radeon_ring_unlock_undo(rdev, ring);
977 pe += (bo_va->soffset / RADEON_GPU_PAGE_SIZE) * 8; 1177 return r;
978 1178 }
979 radeon_asic_vm_set_page(rdev, pe, addr, nptes,
980 RADEON_GPU_PAGE_SIZE, bo_va->flags);
981
982 /* update page directory entries */
983 addr = pe;
984
985 pe = vm->pd_gpu_addr;
986 pe += ((bo_va->soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE) * 8;
987 1179
988 radeon_asic_vm_set_page(rdev, pe, addr, npdes, 1180 radeon_vm_update_ptes(rdev, vm, bo_va->soffset, bo_va->eoffset,
989 RADEON_VM_PTE_COUNT * 8, RADEON_VM_PAGE_VALID); 1181 addr, bo_va->flags);
990 1182
991 radeon_fence_unref(&vm->fence); 1183 radeon_fence_unref(&vm->fence);
992 r = radeon_fence_emit(rdev, &vm->fence, ridx); 1184 r = radeon_fence_emit(rdev, &vm->fence, ridx);
@@ -997,6 +1189,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
997 radeon_ring_unlock_commit(rdev, ring); 1189 radeon_ring_unlock_commit(rdev, ring);
998 radeon_semaphore_free(rdev, &sem, vm->fence); 1190 radeon_semaphore_free(rdev, &sem, vm->fence);
999 radeon_fence_unref(&vm->last_flush); 1191 radeon_fence_unref(&vm->last_flush);
1192
1000 return 0; 1193 return 0;
1001} 1194}
1002 1195
@@ -1056,31 +1249,15 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1056 * @rdev: radeon_device pointer 1249 * @rdev: radeon_device pointer
1057 * @vm: requested vm 1250 * @vm: requested vm
1058 * 1251 *
1059 * Init @vm (cayman+). 1252 * Init @vm fields (cayman+).
1060 * Map the IB pool and any other shared objects into the VM
1061 * by default as it's used by all VMs.
1062 * Returns 0 for success, error for failure.
1063 */ 1253 */
1064int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) 1254void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
1065{ 1255{
1066 struct radeon_bo_va *bo_va;
1067 int r;
1068
1069 vm->id = 0; 1256 vm->id = 0;
1070 vm->fence = NULL; 1257 vm->fence = NULL;
1071 vm->last_pfn = 0;
1072 mutex_init(&vm->mutex); 1258 mutex_init(&vm->mutex);
1073 INIT_LIST_HEAD(&vm->list); 1259 INIT_LIST_HEAD(&vm->list);
1074 INIT_LIST_HEAD(&vm->va); 1260 INIT_LIST_HEAD(&vm->va);
1075
1076 /* map the ib pool buffer at 0 in virtual address space, set
1077 * read only
1078 */
1079 bo_va = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo);
1080 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
1081 RADEON_VM_PAGE_READABLE |
1082 RADEON_VM_PAGE_SNOOPED);
1083 return r;
1084} 1261}
1085 1262
1086/** 1263/**
@@ -1102,17 +1279,6 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1102 radeon_vm_free_pt(rdev, vm); 1279 radeon_vm_free_pt(rdev, vm);
1103 mutex_unlock(&rdev->vm_manager.lock); 1280 mutex_unlock(&rdev->vm_manager.lock);
1104 1281
1105 /* remove all bo at this point non are busy any more because unbind
1106 * waited for the last vm fence to signal
1107 */
1108 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
1109 if (!r) {
1110 bo_va = radeon_vm_bo_find(vm, rdev->ring_tmp_bo.bo);
1111 list_del_init(&bo_va->bo_list);
1112 list_del_init(&bo_va->vm_list);
1113 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
1114 kfree(bo_va);
1115 }
1116 if (!list_empty(&vm->va)) { 1282 if (!list_empty(&vm->va)) {
1117 dev_err(rdev->dev, "still active bo inside vm\n"); 1283 dev_err(rdev->dev, "still active bo inside vm\n");
1118 } 1284 }
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index f38fbcc46935..fe5c1f6b7957 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -53,6 +53,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
53 struct drm_gem_object **obj) 53 struct drm_gem_object **obj)
54{ 54{
55 struct radeon_bo *robj; 55 struct radeon_bo *robj;
56 unsigned long max_size;
56 int r; 57 int r;
57 58
58 *obj = NULL; 59 *obj = NULL;
@@ -60,11 +61,26 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
60 if (alignment < PAGE_SIZE) { 61 if (alignment < PAGE_SIZE) {
61 alignment = PAGE_SIZE; 62 alignment = PAGE_SIZE;
62 } 63 }
64
65 /* maximun bo size is the minimun btw visible vram and gtt size */
66 max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
67 if (size > max_size) {
68 printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
69 __func__, __LINE__, size >> 20, max_size >> 20);
70 return -ENOMEM;
71 }
72
73retry:
63 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj); 74 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
64 if (r) { 75 if (r) {
65 if (r != -ERESTARTSYS) 76 if (r != -ERESTARTSYS) {
77 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
78 initial_domain |= RADEON_GEM_DOMAIN_GTT;
79 goto retry;
80 }
66 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", 81 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
67 size, initial_domain, alignment, r); 82 size, initial_domain, alignment, r);
83 }
68 return r; 84 return r;
69 } 85 }
70 *obj = &robj->gem_base; 86 *obj = &robj->gem_base;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 83b8d8aa71c0..dc781c49b96b 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -419,6 +419,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
419 /* new gpu have virtual address space support */ 419 /* new gpu have virtual address space support */
420 if (rdev->family >= CHIP_CAYMAN) { 420 if (rdev->family >= CHIP_CAYMAN) {
421 struct radeon_fpriv *fpriv; 421 struct radeon_fpriv *fpriv;
422 struct radeon_bo_va *bo_va;
422 int r; 423 int r;
423 424
424 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 425 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@@ -426,7 +427,15 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
426 return -ENOMEM; 427 return -ENOMEM;
427 } 428 }
428 429
429 r = radeon_vm_init(rdev, &fpriv->vm); 430 radeon_vm_init(rdev, &fpriv->vm);
431
432 /* map the ib pool buffer read only into
433 * virtual address space */
434 bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
435 rdev->ring_tmp_bo.bo);
436 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
437 RADEON_VM_PAGE_READABLE |
438 RADEON_VM_PAGE_SNOOPED);
430 if (r) { 439 if (r) {
431 radeon_vm_fini(rdev, &fpriv->vm); 440 radeon_vm_fini(rdev, &fpriv->vm);
432 kfree(fpriv); 441 kfree(fpriv);
@@ -454,6 +463,17 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
454 /* new gpu have virtual address space support */ 463 /* new gpu have virtual address space support */
455 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { 464 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
456 struct radeon_fpriv *fpriv = file_priv->driver_priv; 465 struct radeon_fpriv *fpriv = file_priv->driver_priv;
466 struct radeon_bo_va *bo_va;
467 int r;
468
469 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
470 if (!r) {
471 bo_va = radeon_vm_bo_find(&fpriv->vm,
472 rdev->ring_tmp_bo.bo);
473 if (bo_va)
474 radeon_vm_bo_rmv(rdev, bo_va);
475 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
476 }
457 477
458 radeon_vm_fini(rdev, &fpriv->vm); 478 radeon_vm_fini(rdev, &fpriv->vm);
459 kfree(fpriv); 479 kfree(fpriv);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 5677a424b585..6857cb4efb76 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -295,6 +295,7 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
295 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 295 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
296 struct drm_device *dev = crtc->dev; 296 struct drm_device *dev = crtc->dev;
297 struct radeon_device *rdev = dev->dev_private; 297 struct radeon_device *rdev = dev->dev_private;
298 uint32_t crtc_ext_cntl = 0;
298 uint32_t mask; 299 uint32_t mask;
299 300
300 if (radeon_crtc->crtc_id) 301 if (radeon_crtc->crtc_id)
@@ -307,6 +308,16 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
307 RADEON_CRTC_VSYNC_DIS | 308 RADEON_CRTC_VSYNC_DIS |
308 RADEON_CRTC_HSYNC_DIS); 309 RADEON_CRTC_HSYNC_DIS);
309 310
311 /*
312 * On all dual CRTC GPUs this bit controls the CRTC of the primary DAC.
313 * Therefore it is set in the DAC DMPS function.
314 * This is different for GPU's with a single CRTC but a primary and a
315 * TV DAC: here it controls the single CRTC no matter where it is
316 * routed. Therefore we set it here.
317 */
318 if (rdev->flags & RADEON_SINGLE_CRTC)
319 crtc_ext_cntl = RADEON_CRTC_CRT_ON;
320
310 switch (mode) { 321 switch (mode) {
311 case DRM_MODE_DPMS_ON: 322 case DRM_MODE_DPMS_ON:
312 radeon_crtc->enabled = true; 323 radeon_crtc->enabled = true;
@@ -317,7 +328,7 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
317 else { 328 else {
318 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN | 329 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
319 RADEON_CRTC_DISP_REQ_EN_B)); 330 RADEON_CRTC_DISP_REQ_EN_B));
320 WREG32_P(RADEON_CRTC_EXT_CNTL, 0, ~mask); 331 WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
321 } 332 }
322 drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); 333 drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
323 radeon_crtc_load_lut(crtc); 334 radeon_crtc_load_lut(crtc);
@@ -331,7 +342,7 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
331 else { 342 else {
332 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN | 343 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
333 RADEON_CRTC_DISP_REQ_EN_B)); 344 RADEON_CRTC_DISP_REQ_EN_B));
334 WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask); 345 WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~(mask | crtc_ext_cntl));
335 } 346 }
336 radeon_crtc->enabled = false; 347 radeon_crtc->enabled = false;
337 /* adjust pm to dpms changes AFTER disabling crtcs */ 348 /* adjust pm to dpms changes AFTER disabling crtcs */
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 92487e614778..f5ba2241dacc 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -269,27 +269,6 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
269 .disable = radeon_legacy_encoder_disable, 269 .disable = radeon_legacy_encoder_disable,
270}; 270};
271 271
272#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
273
274static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
275{
276 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
277 uint8_t level;
278
279 /* Convert brightness to hardware level */
280 if (bd->props.brightness < 0)
281 level = 0;
282 else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
283 level = RADEON_MAX_BL_LEVEL;
284 else
285 level = bd->props.brightness;
286
287 if (pdata->negative)
288 level = RADEON_MAX_BL_LEVEL - level;
289
290 return level;
291}
292
293u8 272u8
294radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder) 273radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder)
295{ 274{
@@ -331,6 +310,27 @@ radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 leve
331 radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode); 310 radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
332} 311}
333 312
313#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
314
315static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
316{
317 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
318 uint8_t level;
319
320 /* Convert brightness to hardware level */
321 if (bd->props.brightness < 0)
322 level = 0;
323 else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
324 level = RADEON_MAX_BL_LEVEL;
325 else
326 level = bd->props.brightness;
327
328 if (pdata->negative)
329 level = RADEON_MAX_BL_LEVEL - level;
330
331 return level;
332}
333
334static int radeon_legacy_backlight_update_status(struct backlight_device *bd) 334static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
335{ 335{
336 struct radeon_backlight_privdata *pdata = bl_get_data(bd); 336 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
@@ -370,6 +370,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
370 struct backlight_properties props; 370 struct backlight_properties props;
371 struct radeon_backlight_privdata *pdata; 371 struct radeon_backlight_privdata *pdata;
372 uint8_t backlight_level; 372 uint8_t backlight_level;
373 char bl_name[16];
373 374
374 if (!radeon_encoder->enc_priv) 375 if (!radeon_encoder->enc_priv)
375 return; 376 return;
@@ -389,7 +390,9 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
389 memset(&props, 0, sizeof(props)); 390 memset(&props, 0, sizeof(props));
390 props.max_brightness = RADEON_MAX_BL_LEVEL; 391 props.max_brightness = RADEON_MAX_BL_LEVEL;
391 props.type = BACKLIGHT_RAW; 392 props.type = BACKLIGHT_RAW;
392 bd = backlight_device_register("radeon_bl", &drm_connector->kdev, 393 snprintf(bl_name, sizeof(bl_name),
394 "radeon_bl%d", dev->primary->index);
395 bd = backlight_device_register(bl_name, &drm_connector->kdev,
393 pdata, &radeon_backlight_ops, &props); 396 pdata, &radeon_backlight_ops, &props);
394 if (IS_ERR(bd)) { 397 if (IS_ERR(bd)) {
395 DRM_ERROR("Backlight registration failed\n"); 398 DRM_ERROR("Backlight registration failed\n");
@@ -534,7 +537,9 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode
534 break; 537 break;
535 } 538 }
536 539
537 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); 540 /* handled in radeon_crtc_dpms() */
541 if (!(rdev->flags & RADEON_SINGLE_CRTC))
542 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
538 WREG32(RADEON_DAC_CNTL, dac_cntl); 543 WREG32(RADEON_DAC_CNTL, dac_cntl);
539 WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl); 544 WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
540 545
@@ -659,6 +664,8 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
659 664
660 if (ASIC_IS_R300(rdev)) 665 if (ASIC_IS_R300(rdev))
661 tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT); 666 tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT);
667 else if (ASIC_IS_RV100(rdev))
668 tmp |= (0x1ac << RADEON_DAC_FORCE_DATA_SHIFT);
662 else 669 else
663 tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT); 670 tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT);
664 671
@@ -668,6 +675,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
668 tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN; 675 tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN;
669 WREG32(RADEON_DAC_CNTL, tmp); 676 WREG32(RADEON_DAC_CNTL, tmp);
670 677
678 tmp = dac_macro_cntl;
671 tmp &= ~(RADEON_DAC_PDWN_R | 679 tmp &= ~(RADEON_DAC_PDWN_R |
672 RADEON_DAC_PDWN_G | 680 RADEON_DAC_PDWN_G |
673 RADEON_DAC_PDWN_B); 681 RADEON_DAC_PDWN_B);
@@ -991,11 +999,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
991static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder) 999static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
992{ 1000{
993 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1001 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
994 struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; 1002 /* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */
995 if (tmds) {
996 if (tmds->i2c_bus)
997 radeon_i2c_destroy(tmds->i2c_bus);
998 }
999 kfree(radeon_encoder->enc_priv); 1003 kfree(radeon_encoder->enc_priv);
1000 drm_encoder_cleanup(encoder); 1004 drm_encoder_cleanup(encoder);
1001 kfree(radeon_encoder); 1005 kfree(radeon_encoder);
@@ -1093,7 +1097,8 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
1093 } else { 1097 } else {
1094 if (is_tv) 1098 if (is_tv)
1095 WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl); 1099 WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl);
1096 else 1100 /* handled in radeon_crtc_dpms() */
1101 else if (!(rdev->flags & RADEON_SINGLE_CRTC))
1097 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); 1102 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
1098 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); 1103 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
1099 } 1104 }
@@ -1417,13 +1422,104 @@ static bool radeon_legacy_tv_detect(struct drm_encoder *encoder,
1417 return found; 1422 return found;
1418} 1423}
1419 1424
1425static bool radeon_legacy_ext_dac_detect(struct drm_encoder *encoder,
1426 struct drm_connector *connector)
1427{
1428 struct drm_device *dev = encoder->dev;
1429 struct radeon_device *rdev = dev->dev_private;
1430 uint32_t gpio_monid, fp2_gen_cntl, disp_output_cntl, crtc2_gen_cntl;
1431 uint32_t disp_lin_trans_grph_a, disp_lin_trans_grph_b, disp_lin_trans_grph_c;
1432 uint32_t disp_lin_trans_grph_d, disp_lin_trans_grph_e, disp_lin_trans_grph_f;
1433 uint32_t tmp, crtc2_h_total_disp, crtc2_v_total_disp;
1434 uint32_t crtc2_h_sync_strt_wid, crtc2_v_sync_strt_wid;
1435 bool found = false;
1436 int i;
1437
1438 /* save the regs we need */
1439 gpio_monid = RREG32(RADEON_GPIO_MONID);
1440 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
1441 disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
1442 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1443 disp_lin_trans_grph_a = RREG32(RADEON_DISP_LIN_TRANS_GRPH_A);
1444 disp_lin_trans_grph_b = RREG32(RADEON_DISP_LIN_TRANS_GRPH_B);
1445 disp_lin_trans_grph_c = RREG32(RADEON_DISP_LIN_TRANS_GRPH_C);
1446 disp_lin_trans_grph_d = RREG32(RADEON_DISP_LIN_TRANS_GRPH_D);
1447 disp_lin_trans_grph_e = RREG32(RADEON_DISP_LIN_TRANS_GRPH_E);
1448 disp_lin_trans_grph_f = RREG32(RADEON_DISP_LIN_TRANS_GRPH_F);
1449 crtc2_h_total_disp = RREG32(RADEON_CRTC2_H_TOTAL_DISP);
1450 crtc2_v_total_disp = RREG32(RADEON_CRTC2_V_TOTAL_DISP);
1451 crtc2_h_sync_strt_wid = RREG32(RADEON_CRTC2_H_SYNC_STRT_WID);
1452 crtc2_v_sync_strt_wid = RREG32(RADEON_CRTC2_V_SYNC_STRT_WID);
1453
1454 tmp = RREG32(RADEON_GPIO_MONID);
1455 tmp &= ~RADEON_GPIO_A_0;
1456 WREG32(RADEON_GPIO_MONID, tmp);
1457
1458 WREG32(RADEON_FP2_GEN_CNTL, (RADEON_FP2_ON |
1459 RADEON_FP2_PANEL_FORMAT |
1460 R200_FP2_SOURCE_SEL_TRANS_UNIT |
1461 RADEON_FP2_DVO_EN |
1462 R200_FP2_DVO_RATE_SEL_SDR));
1463
1464 WREG32(RADEON_DISP_OUTPUT_CNTL, (RADEON_DISP_DAC_SOURCE_RMX |
1465 RADEON_DISP_TRANS_MATRIX_GRAPHICS));
1466
1467 WREG32(RADEON_CRTC2_GEN_CNTL, (RADEON_CRTC2_EN |
1468 RADEON_CRTC2_DISP_REQ_EN_B));
1469
1470 WREG32(RADEON_DISP_LIN_TRANS_GRPH_A, 0x00000000);
1471 WREG32(RADEON_DISP_LIN_TRANS_GRPH_B, 0x000003f0);
1472 WREG32(RADEON_DISP_LIN_TRANS_GRPH_C, 0x00000000);
1473 WREG32(RADEON_DISP_LIN_TRANS_GRPH_D, 0x000003f0);
1474 WREG32(RADEON_DISP_LIN_TRANS_GRPH_E, 0x00000000);
1475 WREG32(RADEON_DISP_LIN_TRANS_GRPH_F, 0x000003f0);
1476
1477 WREG32(RADEON_CRTC2_H_TOTAL_DISP, 0x01000008);
1478 WREG32(RADEON_CRTC2_H_SYNC_STRT_WID, 0x00000800);
1479 WREG32(RADEON_CRTC2_V_TOTAL_DISP, 0x00080001);
1480 WREG32(RADEON_CRTC2_V_SYNC_STRT_WID, 0x00000080);
1481
1482 for (i = 0; i < 200; i++) {
1483 tmp = RREG32(RADEON_GPIO_MONID);
1484 if (tmp & RADEON_GPIO_Y_0)
1485 found = true;
1486
1487 if (found)
1488 break;
1489
1490 if (!drm_can_sleep())
1491 mdelay(1);
1492 else
1493 msleep(1);
1494 }
1495
1496 /* restore the regs we used */
1497 WREG32(RADEON_DISP_LIN_TRANS_GRPH_A, disp_lin_trans_grph_a);
1498 WREG32(RADEON_DISP_LIN_TRANS_GRPH_B, disp_lin_trans_grph_b);
1499 WREG32(RADEON_DISP_LIN_TRANS_GRPH_C, disp_lin_trans_grph_c);
1500 WREG32(RADEON_DISP_LIN_TRANS_GRPH_D, disp_lin_trans_grph_d);
1501 WREG32(RADEON_DISP_LIN_TRANS_GRPH_E, disp_lin_trans_grph_e);
1502 WREG32(RADEON_DISP_LIN_TRANS_GRPH_F, disp_lin_trans_grph_f);
1503 WREG32(RADEON_CRTC2_H_TOTAL_DISP, crtc2_h_total_disp);
1504 WREG32(RADEON_CRTC2_V_TOTAL_DISP, crtc2_v_total_disp);
1505 WREG32(RADEON_CRTC2_H_SYNC_STRT_WID, crtc2_h_sync_strt_wid);
1506 WREG32(RADEON_CRTC2_V_SYNC_STRT_WID, crtc2_v_sync_strt_wid);
1507 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
1508 WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
1509 WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
1510 WREG32(RADEON_GPIO_MONID, gpio_monid);
1511
1512 return found;
1513}
1514
1420static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder, 1515static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder,
1421 struct drm_connector *connector) 1516 struct drm_connector *connector)
1422{ 1517{
1423 struct drm_device *dev = encoder->dev; 1518 struct drm_device *dev = encoder->dev;
1424 struct radeon_device *rdev = dev->dev_private; 1519 struct radeon_device *rdev = dev->dev_private;
1425 uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl; 1520 uint32_t crtc2_gen_cntl = 0, tv_dac_cntl, dac_cntl2, dac_ext_cntl;
1426 uint32_t disp_hw_debug, disp_output_cntl, gpiopad_a, pixclks_cntl, tmp; 1521 uint32_t gpiopad_a = 0, pixclks_cntl, tmp;
1522 uint32_t disp_output_cntl = 0, disp_hw_debug = 0, crtc_ext_cntl = 0;
1427 enum drm_connector_status found = connector_status_disconnected; 1523 enum drm_connector_status found = connector_status_disconnected;
1428 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1524 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1429 struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; 1525 struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
@@ -1460,12 +1556,27 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
1460 return connector_status_disconnected; 1556 return connector_status_disconnected;
1461 } 1557 }
1462 1558
1559 /* R200 uses an external DAC for secondary DAC */
1560 if (rdev->family == CHIP_R200) {
1561 if (radeon_legacy_ext_dac_detect(encoder, connector))
1562 found = connector_status_connected;
1563 return found;
1564 }
1565
1463 /* save the regs we need */ 1566 /* save the regs we need */
1464 pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL); 1567 pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
1465 gpiopad_a = ASIC_IS_R300(rdev) ? RREG32(RADEON_GPIOPAD_A) : 0; 1568
1466 disp_output_cntl = ASIC_IS_R300(rdev) ? RREG32(RADEON_DISP_OUTPUT_CNTL) : 0; 1569 if (rdev->flags & RADEON_SINGLE_CRTC) {
1467 disp_hw_debug = ASIC_IS_R300(rdev) ? 0 : RREG32(RADEON_DISP_HW_DEBUG); 1570 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
1468 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); 1571 } else {
1572 if (ASIC_IS_R300(rdev)) {
1573 gpiopad_a = RREG32(RADEON_GPIOPAD_A);
1574 disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
1575 } else {
1576 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
1577 }
1578 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1579 }
1469 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); 1580 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
1470 dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL); 1581 dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
1471 dac_cntl2 = RREG32(RADEON_DAC_CNTL2); 1582 dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
@@ -1474,22 +1585,24 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
1474 | RADEON_PIX2CLK_DAC_ALWAYS_ONb); 1585 | RADEON_PIX2CLK_DAC_ALWAYS_ONb);
1475 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); 1586 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
1476 1587
1477 if (ASIC_IS_R300(rdev)) 1588 if (rdev->flags & RADEON_SINGLE_CRTC) {
1478 WREG32_P(RADEON_GPIOPAD_A, 1, ~1); 1589 tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON;
1479 1590 WREG32(RADEON_CRTC_EXT_CNTL, tmp);
1480 tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK;
1481 tmp |= RADEON_CRTC2_CRT2_ON |
1482 (2 << RADEON_CRTC2_PIX_WIDTH_SHIFT);
1483
1484 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
1485
1486 if (ASIC_IS_R300(rdev)) {
1487 tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK;
1488 tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
1489 WREG32(RADEON_DISP_OUTPUT_CNTL, tmp);
1490 } else { 1591 } else {
1491 tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL; 1592 tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK;
1492 WREG32(RADEON_DISP_HW_DEBUG, tmp); 1593 tmp |= RADEON_CRTC2_CRT2_ON |
1594 (2 << RADEON_CRTC2_PIX_WIDTH_SHIFT);
1595 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
1596
1597 if (ASIC_IS_R300(rdev)) {
1598 WREG32_P(RADEON_GPIOPAD_A, 1, ~1);
1599 tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK;
1600 tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
1601 WREG32(RADEON_DISP_OUTPUT_CNTL, tmp);
1602 } else {
1603 tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL;
1604 WREG32(RADEON_DISP_HW_DEBUG, tmp);
1605 }
1493 } 1606 }
1494 1607
1495 tmp = RADEON_TV_DAC_NBLANK | 1608 tmp = RADEON_TV_DAC_NBLANK |
@@ -1531,14 +1644,19 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
1531 WREG32(RADEON_DAC_CNTL2, dac_cntl2); 1644 WREG32(RADEON_DAC_CNTL2, dac_cntl2);
1532 WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl); 1645 WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
1533 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); 1646 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
1534 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
1535 1647
1536 if (ASIC_IS_R300(rdev)) { 1648 if (rdev->flags & RADEON_SINGLE_CRTC) {
1537 WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); 1649 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
1538 WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
1539 } else { 1650 } else {
1540 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); 1651 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
1652 if (ASIC_IS_R300(rdev)) {
1653 WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
1654 WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
1655 } else {
1656 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
1657 }
1541 } 1658 }
1659
1542 WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); 1660 WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
1543 1661
1544 return found; 1662 return found;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 8b27dd6e3144..b91118ccef86 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -105,7 +105,6 @@ int radeon_bo_create(struct radeon_device *rdev,
105 struct radeon_bo *bo; 105 struct radeon_bo *bo;
106 enum ttm_bo_type type; 106 enum ttm_bo_type type;
107 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; 107 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
108 unsigned long max_size = 0;
109 size_t acc_size; 108 size_t acc_size;
110 int r; 109 int r;
111 110
@@ -121,18 +120,9 @@ int radeon_bo_create(struct radeon_device *rdev,
121 } 120 }
122 *bo_ptr = NULL; 121 *bo_ptr = NULL;
123 122
124 /* maximun bo size is the minimun btw visible vram and gtt size */
125 max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
126 if ((page_align << PAGE_SHIFT) >= max_size) {
127 printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
128 __func__, __LINE__, page_align >> (20 - PAGE_SHIFT), max_size >> 20);
129 return -ENOMEM;
130 }
131
132 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, 123 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
133 sizeof(struct radeon_bo)); 124 sizeof(struct radeon_bo));
134 125
135retry:
136 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); 126 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
137 if (bo == NULL) 127 if (bo == NULL)
138 return -ENOMEM; 128 return -ENOMEM;
@@ -154,15 +144,6 @@ retry:
154 acc_size, sg, &radeon_ttm_bo_destroy); 144 acc_size, sg, &radeon_ttm_bo_destroy);
155 up_read(&rdev->pm.mclk_lock); 145 up_read(&rdev->pm.mclk_lock);
156 if (unlikely(r != 0)) { 146 if (unlikely(r != 0)) {
157 if (r != -ERESTARTSYS) {
158 if (domain == RADEON_GEM_DOMAIN_VRAM) {
159 domain |= RADEON_GEM_DOMAIN_GTT;
160 goto retry;
161 }
162 dev_err(rdev->dev,
163 "object_init failed for (%lu, 0x%08X)\n",
164 size, domain);
165 }
166 return r; 147 return r;
167 } 148 }
168 *bo_ptr = bo; 149 *bo_ptr = bo;
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index bba66902c83b..47634f27f2e5 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -305,7 +305,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
305{ 305{
306#if DRM_DEBUG_CODE 306#if DRM_DEBUG_CODE
307 if (ring->count_dw <= 0) { 307 if (ring->count_dw <= 0) {
308 DRM_ERROR("radeon: writting more dword to ring than expected !\n"); 308 DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
309 } 309 }
310#endif 310#endif
311 ring->ring[ring->wptr++] = v; 311 ring->ring[ring->wptr++] = v;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index f79633a036c3..b0db712060fb 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2407,12 +2407,13 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
2407 WREG32(0x15DC, 0); 2407 WREG32(0x15DC, 0);
2408 2408
2409 /* empty context1-15 */ 2409 /* empty context1-15 */
2410 /* FIXME start with 4G, once using 2 level pt switch to full
2411 * vm size space
2412 */
2413 /* set vm size, must be a multiple of 4 */ 2410 /* set vm size, must be a multiple of 4 */
2414 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); 2411 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
2415 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); 2412 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
2413 /* Assign the pt base to something valid for now; the pts used for
2414 * the VMs are determined by the application and setup and assigned
2415 * on the fly in the vm part of radeon_gart.c
2416 */
2416 for (i = 1; i < 16; i++) { 2417 for (i = 1; i < 16; i++) {
2417 if (i < 8) 2418 if (i < 8)
2418 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), 2419 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
@@ -2807,26 +2808,31 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
2807{ 2808{
2808 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; 2809 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
2809 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); 2810 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
2810 int i;
2811 uint64_t value;
2812 2811
2813 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 2 + count * 2)); 2812 while (count) {
2814 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 2813 unsigned ndw = 2 + count * 2;
2815 WRITE_DATA_DST_SEL(1))); 2814 if (ndw > 0x3FFE)
2816 radeon_ring_write(ring, pe); 2815 ndw = 0x3FFE;
2817 radeon_ring_write(ring, upper_32_bits(pe)); 2816
2818 for (i = 0; i < count; ++i) { 2817 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
2819 if (flags & RADEON_VM_PAGE_SYSTEM) { 2818 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2820 value = radeon_vm_map_gart(rdev, addr); 2819 WRITE_DATA_DST_SEL(1)));
2821 value &= 0xFFFFFFFFFFFFF000ULL; 2820 radeon_ring_write(ring, pe);
2822 } else if (flags & RADEON_VM_PAGE_VALID) 2821 radeon_ring_write(ring, upper_32_bits(pe));
2823 value = addr; 2822 for (; ndw > 2; ndw -= 2, --count, pe += 8) {
2824 else 2823 uint64_t value;
2825 value = 0; 2824 if (flags & RADEON_VM_PAGE_SYSTEM) {
2826 addr += incr; 2825 value = radeon_vm_map_gart(rdev, addr);
2827 value |= r600_flags; 2826 value &= 0xFFFFFFFFFFFFF000ULL;
2828 radeon_ring_write(ring, value); 2827 } else if (flags & RADEON_VM_PAGE_VALID)
2829 radeon_ring_write(ring, upper_32_bits(value)); 2828 value = addr;
2829 else
2830 value = 0;
2831 addr += incr;
2832 value |= r600_flags;
2833 radeon_ring_write(ring, value);
2834 radeon_ring_write(ring, upper_32_bits(value));
2835 }
2830 } 2836 }
2831} 2837}
2832 2838
@@ -2867,6 +2873,10 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
2867 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); 2873 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
2868 radeon_ring_write(ring, 0); 2874 radeon_ring_write(ring, 0);
2869 radeon_ring_write(ring, 1 << vm->id); 2875 radeon_ring_write(ring, 1 << vm->id);
2876
2877 /* sync PFP to ME, otherwise we might get invalid PFP reads */
2878 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2879 radeon_ring_write(ring, 0x0);
2870} 2880}
2871 2881
2872/* 2882/*
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index c71d493fd0c5..1c350fc4e449 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -201,6 +201,8 @@ static int shmob_drm_load(struct drm_device *dev, unsigned long flags)
201 goto done; 201 goto done;
202 } 202 }
203 203
204 platform_set_drvdata(pdev, sdev);
205
204done: 206done:
205 if (ret) 207 if (ret)
206 shmob_drm_unload(dev); 208 shmob_drm_unload(dev);
@@ -299,11 +301,9 @@ static struct drm_driver shmob_drm_driver = {
299#if CONFIG_PM_SLEEP 301#if CONFIG_PM_SLEEP
300static int shmob_drm_pm_suspend(struct device *dev) 302static int shmob_drm_pm_suspend(struct device *dev)
301{ 303{
302 struct platform_device *pdev = to_platform_device(dev); 304 struct shmob_drm_device *sdev = dev_get_drvdata(dev);
303 struct drm_device *ddev = platform_get_drvdata(pdev);
304 struct shmob_drm_device *sdev = ddev->dev_private;
305 305
306 drm_kms_helper_poll_disable(ddev); 306 drm_kms_helper_poll_disable(sdev->ddev);
307 shmob_drm_crtc_suspend(&sdev->crtc); 307 shmob_drm_crtc_suspend(&sdev->crtc);
308 308
309 return 0; 309 return 0;
@@ -311,9 +311,7 @@ static int shmob_drm_pm_suspend(struct device *dev)
311 311
312static int shmob_drm_pm_resume(struct device *dev) 312static int shmob_drm_pm_resume(struct device *dev)
313{ 313{
314 struct platform_device *pdev = to_platform_device(dev); 314 struct shmob_drm_device *sdev = dev_get_drvdata(dev);
315 struct drm_device *ddev = platform_get_drvdata(pdev);
316 struct shmob_drm_device *sdev = ddev->dev_private;
317 315
318 mutex_lock(&sdev->ddev->mode_config.mutex); 316 mutex_lock(&sdev->ddev->mode_config.mutex);
319 shmob_drm_crtc_resume(&sdev->crtc); 317 shmob_drm_crtc_resume(&sdev->crtc);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 402ab69f9f99..bf6e4b5a73b5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -580,6 +580,7 @@ retry:
580 if (unlikely(ret != 0)) 580 if (unlikely(ret != 0))
581 return ret; 581 return ret;
582 582
583retry_reserve:
583 spin_lock(&glob->lru_lock); 584 spin_lock(&glob->lru_lock);
584 585
585 if (unlikely(list_empty(&bo->ddestroy))) { 586 if (unlikely(list_empty(&bo->ddestroy))) {
@@ -587,14 +588,20 @@ retry:
587 return 0; 588 return 0;
588 } 589 }
589 590
590 ret = ttm_bo_reserve_locked(bo, interruptible, 591 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
591 no_wait_reserve, false, 0);
592 592
593 if (unlikely(ret != 0)) { 593 if (unlikely(ret == -EBUSY)) {
594 spin_unlock(&glob->lru_lock); 594 spin_unlock(&glob->lru_lock);
595 return ret; 595 if (likely(!no_wait_reserve))
596 ret = ttm_bo_wait_unreserved(bo, interruptible);
597 if (unlikely(ret != 0))
598 return ret;
599
600 goto retry_reserve;
596 } 601 }
597 602
603 BUG_ON(ret != 0);
604
598 /** 605 /**
599 * We can re-check for sync object without taking 606 * We can re-check for sync object without taking
600 * the bo::lock since setting the sync object requires 607 * the bo::lock since setting the sync object requires
@@ -811,17 +818,14 @@ retry:
811 no_wait_reserve, no_wait_gpu); 818 no_wait_reserve, no_wait_gpu);
812 kref_put(&bo->list_kref, ttm_bo_release_list); 819 kref_put(&bo->list_kref, ttm_bo_release_list);
813 820
814 if (likely(ret == 0 || ret == -ERESTARTSYS)) 821 return ret;
815 return ret;
816
817 goto retry;
818 } 822 }
819 823
820 ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0); 824 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
821 825
822 if (unlikely(ret == -EBUSY)) { 826 if (unlikely(ret == -EBUSY)) {
823 spin_unlock(&glob->lru_lock); 827 spin_unlock(&glob->lru_lock);
824 if (likely(!no_wait_gpu)) 828 if (likely(!no_wait_reserve))
825 ret = ttm_bo_wait_unreserved(bo, interruptible); 829 ret = ttm_bo_wait_unreserved(bo, interruptible);
826 830
827 kref_put(&bo->list_kref, ttm_bo_release_list); 831 kref_put(&bo->list_kref, ttm_bo_release_list);
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index fccd361f7b50..87aa5f5d3c88 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -104,7 +104,7 @@ udl_fb_user_fb_create(struct drm_device *dev,
104 104
105int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr, 105int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
106 const char *front, char **urb_buf_ptr, 106 const char *front, char **urb_buf_ptr,
107 u32 byte_offset, u32 byte_width, 107 u32 byte_offset, u32 device_byte_offset, u32 byte_width,
108 int *ident_ptr, int *sent_ptr); 108 int *ident_ptr, int *sent_ptr);
109 109
110int udl_dumb_create(struct drm_file *file_priv, 110int udl_dumb_create(struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 69a2b16f42a6..d4ab3beaada0 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -114,9 +114,10 @@ static void udlfb_dpy_deferred_io(struct fb_info *info,
114 list_for_each_entry(cur, &fbdefio->pagelist, lru) { 114 list_for_each_entry(cur, &fbdefio->pagelist, lru) {
115 115
116 if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8), 116 if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8),
117 &urb, (char *) info->fix.smem_start, 117 &urb, (char *) info->fix.smem_start,
118 &cmd, cur->index << PAGE_SHIFT, 118 &cmd, cur->index << PAGE_SHIFT,
119 PAGE_SIZE, &bytes_identical, &bytes_sent)) 119 cur->index << PAGE_SHIFT,
120 PAGE_SIZE, &bytes_identical, &bytes_sent))
120 goto error; 121 goto error;
121 bytes_rendered += PAGE_SIZE; 122 bytes_rendered += PAGE_SIZE;
122 } 123 }
@@ -187,10 +188,11 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
187 for (i = y; i < y + height ; i++) { 188 for (i = y; i < y + height ; i++) {
188 const int line_offset = fb->base.pitches[0] * i; 189 const int line_offset = fb->base.pitches[0] * i;
189 const int byte_offset = line_offset + (x * bpp); 190 const int byte_offset = line_offset + (x * bpp);
190 191 const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
191 if (udl_render_hline(dev, bpp, &urb, 192 if (udl_render_hline(dev, bpp, &urb,
192 (char *) fb->obj->vmapping, 193 (char *) fb->obj->vmapping,
193 &cmd, byte_offset, width * bpp, 194 &cmd, byte_offset, dev_byte_offset,
195 width * bpp,
194 &bytes_identical, &bytes_sent)) 196 &bytes_identical, &bytes_sent))
195 goto error; 197 goto error;
196 } 198 }
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index dc095526ffb7..142fee5f983f 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -213,11 +213,12 @@ static void udl_compress_hline16(
213 */ 213 */
214int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr, 214int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
215 const char *front, char **urb_buf_ptr, 215 const char *front, char **urb_buf_ptr,
216 u32 byte_offset, u32 byte_width, 216 u32 byte_offset, u32 device_byte_offset,
217 u32 byte_width,
217 int *ident_ptr, int *sent_ptr) 218 int *ident_ptr, int *sent_ptr)
218{ 219{
219 const u8 *line_start, *line_end, *next_pixel; 220 const u8 *line_start, *line_end, *next_pixel;
220 u32 base16 = 0 + (byte_offset / bpp) * 2; 221 u32 base16 = 0 + (device_byte_offset / bpp) * 2;
221 struct urb *urb = *urb_ptr; 222 struct urb *urb = *urb_ptr;
222 u8 *cmd = *urb_buf_ptr; 223 u8 *cmd = *urb_buf_ptr;
223 u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length; 224 u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 06ebdbb6ea02..fd7722aecf77 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -522,6 +522,12 @@ static const struct hid_device_id apple_devices[] = {
522 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, 522 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
523 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS), 523 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
524 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, 524 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
525 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI),
526 .driver_data = APPLE_HAS_FN },
527 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO),
528 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
529 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS),
530 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
525 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI), 531 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
526 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 532 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
527 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO), 533 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index bd3971bf31bf..f4109fd657ff 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1532,6 +1532,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
1532 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) }, 1532 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
1533 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) }, 1533 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
1534 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) }, 1534 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
1535 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
1536 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
1537 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
1535 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, 1538 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
1536 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, 1539 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
1537 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, 1540 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -2139,6 +2142,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
2139 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) }, 2142 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
2140 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) }, 2143 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
2141 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) }, 2144 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
2145 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
2146 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
2147 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
2142 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 2148 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
2143 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 2149 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
2144 { } 2150 { }
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 269b50912a4a..9d7a42857ea1 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -118,6 +118,9 @@
118#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252 118#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252
119#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253 119#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253
120#define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254 120#define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254
121#define USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI 0x0259
122#define USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO 0x025a
123#define USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS 0x025b
121#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249 124#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249
122#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a 125#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a
123#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b 126#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 3acdcfcc17df..f676c01bb471 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -28,22 +28,30 @@
28#define MS_RDESC 0x08 28#define MS_RDESC 0x08
29#define MS_NOGET 0x10 29#define MS_NOGET 0x10
30#define MS_DUPLICATE_USAGES 0x20 30#define MS_DUPLICATE_USAGES 0x20
31#define MS_RDESC_3K 0x40
31 32
32/*
33 * Microsoft Wireless Desktop Receiver (Model 1028) has
34 * 'Usage Min/Max' where it ought to have 'Physical Min/Max'
35 */
36static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc, 33static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
37 unsigned int *rsize) 34 unsigned int *rsize)
38{ 35{
39 unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); 36 unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
40 37
38 /*
39 * Microsoft Wireless Desktop Receiver (Model 1028) has
40 * 'Usage Min/Max' where it ought to have 'Physical Min/Max'
41 */
41 if ((quirks & MS_RDESC) && *rsize == 571 && rdesc[557] == 0x19 && 42 if ((quirks & MS_RDESC) && *rsize == 571 && rdesc[557] == 0x19 &&
42 rdesc[559] == 0x29) { 43 rdesc[559] == 0x29) {
43 hid_info(hdev, "fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n"); 44 hid_info(hdev, "fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n");
44 rdesc[557] = 0x35; 45 rdesc[557] = 0x35;
45 rdesc[559] = 0x45; 46 rdesc[559] = 0x45;
46 } 47 }
48 /* the same as above (s/usage/physical/) */
49 if ((quirks & MS_RDESC_3K) && *rsize == 106 &&
50 !memcmp((char []){ 0x19, 0x00, 0x29, 0xff },
51 &rdesc[94], 4)) {
52 rdesc[94] = 0x35;
53 rdesc[96] = 0x45;
54 }
47 return rdesc; 55 return rdesc;
48} 56}
49 57
@@ -192,7 +200,7 @@ static const struct hid_device_id ms_devices[] = {
192 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB), 200 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB),
193 .driver_data = MS_PRESENTER }, 201 .driver_data = MS_PRESENTER },
194 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K), 202 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K),
195 .driver_data = MS_ERGONOMY }, 203 .driver_data = MS_ERGONOMY | MS_RDESC_3K },
196 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0), 204 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0),
197 .driver_data = MS_NOGET }, 205 .driver_data = MS_NOGET },
198 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500), 206 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 3eb02b94fc87..7867d69f0efe 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -210,8 +210,7 @@ static struct mt_class mt_classes[] = {
210 }, 210 },
211 { .name = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, 211 { .name = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
212 .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP | 212 .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
213 MT_QUIRK_SLOT_IS_CONTACTNUMBER, 213 MT_QUIRK_SLOT_IS_CONTACTNUMBER
214 .maxcontacts = 10
215 }, 214 },
216 215
217 { .name = MT_CLS_FLATFROG, 216 { .name = MT_CLS_FLATFROG,
@@ -421,11 +420,11 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
421 * contact max are global to the report */ 420 * contact max are global to the report */
422 td->last_field_index = field->index; 421 td->last_field_index = field->index;
423 return -1; 422 return -1;
424 }
425 case HID_DG_TOUCH: 423 case HID_DG_TOUCH:
426 /* Legacy devices use TIPSWITCH and not TOUCH. 424 /* Legacy devices use TIPSWITCH and not TOUCH.
427 * Let's just ignore this field. */ 425 * Let's just ignore this field. */
428 return -1; 426 return -1;
427 }
429 /* let hid-input decide for the others */ 428 /* let hid-input decide for the others */
430 return 0; 429 return 0;
431 430
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 406537420fff..f4c3d28cd1fc 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -146,14 +146,14 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
146 146
147 if (ret != 0) { 147 if (ret != 0) {
148 err = ret; 148 err = ret;
149 goto errorout; 149 goto error0;
150 } 150 }
151 151
152 ret = hv_ringbuffer_init( 152 ret = hv_ringbuffer_init(
153 &newchannel->inbound, in, recv_ringbuffer_size); 153 &newchannel->inbound, in, recv_ringbuffer_size);
154 if (ret != 0) { 154 if (ret != 0) {
155 err = ret; 155 err = ret;
156 goto errorout; 156 goto error0;
157 } 157 }
158 158
159 159
@@ -168,7 +168,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
168 168
169 if (ret != 0) { 169 if (ret != 0) {
170 err = ret; 170 err = ret;
171 goto errorout; 171 goto error0;
172 } 172 }
173 173
174 /* Create and init the channel open message */ 174 /* Create and init the channel open message */
@@ -177,7 +177,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
177 GFP_KERNEL); 177 GFP_KERNEL);
178 if (!open_info) { 178 if (!open_info) {
179 err = -ENOMEM; 179 err = -ENOMEM;
180 goto errorout; 180 goto error0;
181 } 181 }
182 182
183 init_completion(&open_info->waitevent); 183 init_completion(&open_info->waitevent);
@@ -193,7 +193,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
193 193
194 if (userdatalen > MAX_USER_DEFINED_BYTES) { 194 if (userdatalen > MAX_USER_DEFINED_BYTES) {
195 err = -EINVAL; 195 err = -EINVAL;
196 goto errorout; 196 goto error0;
197 } 197 }
198 198
199 if (userdatalen) 199 if (userdatalen)
@@ -208,19 +208,18 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
208 sizeof(struct vmbus_channel_open_channel)); 208 sizeof(struct vmbus_channel_open_channel));
209 209
210 if (ret != 0) 210 if (ret != 0)
211 goto cleanup; 211 goto error1;
212 212
213 t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ); 213 t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
214 if (t == 0) { 214 if (t == 0) {
215 err = -ETIMEDOUT; 215 err = -ETIMEDOUT;
216 goto errorout; 216 goto error1;
217 } 217 }
218 218
219 219
220 if (open_info->response.open_result.status) 220 if (open_info->response.open_result.status)
221 err = open_info->response.open_result.status; 221 err = open_info->response.open_result.status;
222 222
223cleanup:
224 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 223 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
225 list_del(&open_info->msglistentry); 224 list_del(&open_info->msglistentry);
226 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 225 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
@@ -228,9 +227,12 @@ cleanup:
228 kfree(open_info); 227 kfree(open_info);
229 return err; 228 return err;
230 229
231errorout: 230error1:
232 hv_ringbuffer_cleanup(&newchannel->outbound); 231 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
233 hv_ringbuffer_cleanup(&newchannel->inbound); 232 list_del(&open_info->msglistentry);
233 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
234
235error0:
234 free_pages((unsigned long)out, 236 free_pages((unsigned long)out,
235 get_order(send_ringbuffer_size + recv_ringbuffer_size)); 237 get_order(send_ringbuffer_size + recv_ringbuffer_size));
236 kfree(open_info); 238 kfree(open_info);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index c74e73b2069a..c4633de64465 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -334,16 +334,6 @@ config SENSORS_DA9052_ADC
334 This driver can also be built as module. If so, the module 334 This driver can also be built as module. If so, the module
335 will be called da9052-hwmon. 335 will be called da9052-hwmon.
336 336
337config SENSORS_EXYNOS4_TMU
338 tristate "Temperature sensor on Samsung EXYNOS4"
339 depends on ARCH_EXYNOS4
340 help
341 If you say yes here you get support for TMU (Thermal Management
342 Unit) on SAMSUNG EXYNOS4 series of SoC.
343
344 This driver can also be built as a module. If so, the module
345 will be called exynos4-tmu.
346
347config SENSORS_I5K_AMB 337config SENSORS_I5K_AMB
348 tristate "FB-DIMM AMB temperature sensor on Intel 5000 series chipsets" 338 tristate "FB-DIMM AMB temperature sensor on Intel 5000 series chipsets"
349 depends on PCI 339 depends on PCI
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index a62ce17ddbfc..8d5fcb5e8e9f 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -50,7 +50,6 @@ obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
50obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o 50obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o
51obj-$(CONFIG_SENSORS_EMC2103) += emc2103.o 51obj-$(CONFIG_SENSORS_EMC2103) += emc2103.o
52obj-$(CONFIG_SENSORS_EMC6W201) += emc6w201.o 52obj-$(CONFIG_SENSORS_EMC6W201) += emc6w201.o
53obj-$(CONFIG_SENSORS_EXYNOS4_TMU) += exynos4_tmu.o
54obj-$(CONFIG_SENSORS_F71805F) += f71805f.o 53obj-$(CONFIG_SENSORS_F71805F) += f71805f.o
55obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o 54obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o
56obj-$(CONFIG_SENSORS_F75375S) += f75375s.o 55obj-$(CONFIG_SENSORS_F75375S) += f75375s.o
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 23ab3c496b05..1672e2a5db46 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -29,6 +29,7 @@
29#include <linux/kdev_t.h> 29#include <linux/kdev_t.h>
30#include <linux/sched.h> 30#include <linux/sched.h>
31#include <linux/time.h> 31#include <linux/time.h>
32#include <linux/err.h>
32#include <acpi/acpi_drivers.h> 33#include <acpi/acpi_drivers.h>
33#include <acpi/acpi_bus.h> 34#include <acpi/acpi_bus.h>
34 35
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 8b24d1a4a2b4..dafa477715e3 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -50,6 +50,7 @@
50#include <linux/hwmon-vid.h> 50#include <linux/hwmon-vid.h>
51#include <linux/err.h> 51#include <linux/err.h>
52#include <linux/mutex.h> 52#include <linux/mutex.h>
53#include <linux/jiffies.h>
53 54
54/* Addresses to scan */ 55/* Addresses to scan */
55static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, 56static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index fe72c69a2d68..517f1856c706 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -15,7 +15,6 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/delay.h>
19#include <linux/mutex.h> 18#include <linux/mutex.h>
20#include <linux/jiffies.h> 19#include <linux/jiffies.h>
21#include <linux/i2c.h> 20#include <linux/i2c.h>
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index baee482aedfc..98a7d81e25c5 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -26,7 +26,6 @@
26#include <linux/hwmon-sysfs.h> 26#include <linux/hwmon-sysfs.h>
27#include <linux/err.h> 27#include <linux/err.h>
28#include <linux/mutex.h> 28#include <linux/mutex.h>
29#include <linux/delay.h>
30#include <linux/log2.h> 29#include <linux/log2.h>
31#include <linux/slab.h> 30#include <linux/slab.h>
32 31
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 861c756e9536..989e54c39252 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -20,6 +20,7 @@
20#include <linux/hwmon-sysfs.h> 20#include <linux/hwmon-sysfs.h>
21#include <linux/hwmon-vid.h> 21#include <linux/hwmon-vid.h>
22#include <linux/err.h> 22#include <linux/err.h>
23#include <linux/jiffies.h>
23 24
24/* Indexes for the sysfs hooks */ 25/* Indexes for the sysfs hooks */
25 26
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 8f3f6f2c45fd..b41baffa20f0 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -43,6 +43,7 @@
43#include <linux/leds.h> 43#include <linux/leds.h>
44#include <linux/hwmon.h> 44#include <linux/hwmon.h>
45#include <linux/workqueue.h> 45#include <linux/workqueue.h>
46#include <linux/err.h>
46 47
47/* data port used by Apple SMC */ 48/* data port used by Apple SMC */
48#define APPLESMC_DATA_PORT 0x300 49#define APPLESMC_DATA_PORT 0x300
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index cccb0e9d45b4..56dbcfb3e301 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -14,6 +14,8 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/dmi.h> 16#include <linux/dmi.h>
17#include <linux/jiffies.h>
18#include <linux/err.h>
17 19
18#include <acpi/acpi.h> 20#include <acpi/acpi.h>
19#include <acpi/acpixf.h> 21#include <acpi/acpixf.h>
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 984a3f13923b..47b8d84b489d 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -205,8 +205,11 @@ static const struct tjmax __cpuinitconst tjmax_table[] = {
205 { "CPU N455", 100000 }, 205 { "CPU N455", 100000 },
206 { "CPU N470", 100000 }, 206 { "CPU N470", 100000 },
207 { "CPU N475", 100000 }, 207 { "CPU N475", 100000 },
208 { "CPU 230", 100000 }, 208 { "CPU 230", 100000 }, /* Model 0x1c, stepping 2 */
209 { "CPU 330", 125000 }, 209 { "CPU 330", 125000 }, /* Model 0x1c, stepping 2 */
210 { "CPU CE4110", 110000 }, /* Model 0x1c, stepping 10 */
211 { "CPU CE4150", 110000 }, /* Model 0x1c, stepping 10 */
212 { "CPU CE4170", 110000 }, /* Model 0x1c, stepping 10 */
210}; 213};
211 214
212static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, 215static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index fc65f2d3ec91..b8d01c5f5713 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -12,7 +12,6 @@
12 * 12 *
13 */ 13 */
14 14
15#include <linux/delay.h>
16#include <linux/err.h> 15#include <linux/err.h>
17#include <linux/hwmon.h> 16#include <linux/hwmon.h>
18#include <linux/hwmon-sysfs.h> 17#include <linux/hwmon-sysfs.h>
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 68ab94bde3f1..142e1cb8dea7 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -33,6 +33,7 @@
33#include <linux/err.h> 33#include <linux/err.h>
34#include <linux/sysfs.h> 34#include <linux/sysfs.h>
35#include <linux/mutex.h> 35#include <linux/mutex.h>
36#include <linux/jiffies.h>
36 37
37#define THERMAL_PID_REG 0xfd 38#define THERMAL_PID_REG 0xfd
38#define THERMAL_SMSC_ID_REG 0xfe 39#define THERMAL_SMSC_ID_REG 0xfe
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index ada12a98a97c..a98c917b5888 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -18,7 +18,6 @@
18 */ 18 */
19 19
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/delay.h>
22#include <linux/init.h> 21#include <linux/init.h>
23#include <linux/slab.h> 22#include <linux/slab.h>
24#include <linux/jiffies.h> 23#include <linux/jiffies.h>
diff --git a/drivers/hwmon/exynos4_tmu.c b/drivers/hwmon/exynos4_tmu.c
deleted file mode 100644
index e912059140cd..000000000000
--- a/drivers/hwmon/exynos4_tmu.c
+++ /dev/null
@@ -1,518 +0,0 @@
1/*
2 * exynos4_tmu.c - Samsung EXYNOS4 TMU (Thermal Management Unit)
3 *
4 * Copyright (C) 2011 Samsung Electronics
5 * Donggeun Kim <dg77.kim@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/module.h>
24#include <linux/err.h>
25#include <linux/kernel.h>
26#include <linux/slab.h>
27#include <linux/platform_device.h>
28#include <linux/interrupt.h>
29#include <linux/clk.h>
30#include <linux/workqueue.h>
31#include <linux/sysfs.h>
32#include <linux/kobject.h>
33#include <linux/io.h>
34#include <linux/mutex.h>
35
36#include <linux/hwmon.h>
37#include <linux/hwmon-sysfs.h>
38
39#include <linux/platform_data/exynos4_tmu.h>
40
41#define EXYNOS4_TMU_REG_TRIMINFO 0x0
42#define EXYNOS4_TMU_REG_CONTROL 0x20
43#define EXYNOS4_TMU_REG_STATUS 0x28
44#define EXYNOS4_TMU_REG_CURRENT_TEMP 0x40
45#define EXYNOS4_TMU_REG_THRESHOLD_TEMP 0x44
46#define EXYNOS4_TMU_REG_TRIG_LEVEL0 0x50
47#define EXYNOS4_TMU_REG_TRIG_LEVEL1 0x54
48#define EXYNOS4_TMU_REG_TRIG_LEVEL2 0x58
49#define EXYNOS4_TMU_REG_TRIG_LEVEL3 0x5C
50#define EXYNOS4_TMU_REG_PAST_TEMP0 0x60
51#define EXYNOS4_TMU_REG_PAST_TEMP1 0x64
52#define EXYNOS4_TMU_REG_PAST_TEMP2 0x68
53#define EXYNOS4_TMU_REG_PAST_TEMP3 0x6C
54#define EXYNOS4_TMU_REG_INTEN 0x70
55#define EXYNOS4_TMU_REG_INTSTAT 0x74
56#define EXYNOS4_TMU_REG_INTCLEAR 0x78
57
58#define EXYNOS4_TMU_GAIN_SHIFT 8
59#define EXYNOS4_TMU_REF_VOLTAGE_SHIFT 24
60
61#define EXYNOS4_TMU_TRIM_TEMP_MASK 0xff
62#define EXYNOS4_TMU_CORE_ON 3
63#define EXYNOS4_TMU_CORE_OFF 2
64#define EXYNOS4_TMU_DEF_CODE_TO_TEMP_OFFSET 50
65#define EXYNOS4_TMU_TRIG_LEVEL0_MASK 0x1
66#define EXYNOS4_TMU_TRIG_LEVEL1_MASK 0x10
67#define EXYNOS4_TMU_TRIG_LEVEL2_MASK 0x100
68#define EXYNOS4_TMU_TRIG_LEVEL3_MASK 0x1000
69#define EXYNOS4_TMU_INTCLEAR_VAL 0x1111
70
71struct exynos4_tmu_data {
72 struct exynos4_tmu_platform_data *pdata;
73 struct device *hwmon_dev;
74 struct resource *mem;
75 void __iomem *base;
76 int irq;
77 struct work_struct irq_work;
78 struct mutex lock;
79 struct clk *clk;
80 u8 temp_error1, temp_error2;
81};
82
83/*
84 * TMU treats temperature as a mapped temperature code.
85 * The temperature is converted differently depending on the calibration type.
86 */
87static int temp_to_code(struct exynos4_tmu_data *data, u8 temp)
88{
89 struct exynos4_tmu_platform_data *pdata = data->pdata;
90 int temp_code;
91
92 /* temp should range between 25 and 125 */
93 if (temp < 25 || temp > 125) {
94 temp_code = -EINVAL;
95 goto out;
96 }
97
98 switch (pdata->cal_type) {
99 case TYPE_TWO_POINT_TRIMMING:
100 temp_code = (temp - 25) *
101 (data->temp_error2 - data->temp_error1) /
102 (85 - 25) + data->temp_error1;
103 break;
104 case TYPE_ONE_POINT_TRIMMING:
105 temp_code = temp + data->temp_error1 - 25;
106 break;
107 default:
108 temp_code = temp + EXYNOS4_TMU_DEF_CODE_TO_TEMP_OFFSET;
109 break;
110 }
111out:
112 return temp_code;
113}
114
115/*
116 * Calculate a temperature value from a temperature code.
117 * The unit of the temperature is degree Celsius.
118 */
119static int code_to_temp(struct exynos4_tmu_data *data, u8 temp_code)
120{
121 struct exynos4_tmu_platform_data *pdata = data->pdata;
122 int temp;
123
124 /* temp_code should range between 75 and 175 */
125 if (temp_code < 75 || temp_code > 175) {
126 temp = -ENODATA;
127 goto out;
128 }
129
130 switch (pdata->cal_type) {
131 case TYPE_TWO_POINT_TRIMMING:
132 temp = (temp_code - data->temp_error1) * (85 - 25) /
133 (data->temp_error2 - data->temp_error1) + 25;
134 break;
135 case TYPE_ONE_POINT_TRIMMING:
136 temp = temp_code - data->temp_error1 + 25;
137 break;
138 default:
139 temp = temp_code - EXYNOS4_TMU_DEF_CODE_TO_TEMP_OFFSET;
140 break;
141 }
142out:
143 return temp;
144}
145
146static int exynos4_tmu_initialize(struct platform_device *pdev)
147{
148 struct exynos4_tmu_data *data = platform_get_drvdata(pdev);
149 struct exynos4_tmu_platform_data *pdata = data->pdata;
150 unsigned int status, trim_info;
151 int ret = 0, threshold_code;
152
153 mutex_lock(&data->lock);
154 clk_enable(data->clk);
155
156 status = readb(data->base + EXYNOS4_TMU_REG_STATUS);
157 if (!status) {
158 ret = -EBUSY;
159 goto out;
160 }
161
162 /* Save trimming info in order to perform calibration */
163 trim_info = readl(data->base + EXYNOS4_TMU_REG_TRIMINFO);
164 data->temp_error1 = trim_info & EXYNOS4_TMU_TRIM_TEMP_MASK;
165 data->temp_error2 = ((trim_info >> 8) & EXYNOS4_TMU_TRIM_TEMP_MASK);
166
167 /* Write temperature code for threshold */
168 threshold_code = temp_to_code(data, pdata->threshold);
169 if (threshold_code < 0) {
170 ret = threshold_code;
171 goto out;
172 }
173 writeb(threshold_code,
174 data->base + EXYNOS4_TMU_REG_THRESHOLD_TEMP);
175
176 writeb(pdata->trigger_levels[0],
177 data->base + EXYNOS4_TMU_REG_TRIG_LEVEL0);
178 writeb(pdata->trigger_levels[1],
179 data->base + EXYNOS4_TMU_REG_TRIG_LEVEL1);
180 writeb(pdata->trigger_levels[2],
181 data->base + EXYNOS4_TMU_REG_TRIG_LEVEL2);
182 writeb(pdata->trigger_levels[3],
183 data->base + EXYNOS4_TMU_REG_TRIG_LEVEL3);
184
185 writel(EXYNOS4_TMU_INTCLEAR_VAL,
186 data->base + EXYNOS4_TMU_REG_INTCLEAR);
187out:
188 clk_disable(data->clk);
189 mutex_unlock(&data->lock);
190
191 return ret;
192}
193
194static void exynos4_tmu_control(struct platform_device *pdev, bool on)
195{
196 struct exynos4_tmu_data *data = platform_get_drvdata(pdev);
197 struct exynos4_tmu_platform_data *pdata = data->pdata;
198 unsigned int con, interrupt_en;
199
200 mutex_lock(&data->lock);
201 clk_enable(data->clk);
202
203 con = pdata->reference_voltage << EXYNOS4_TMU_REF_VOLTAGE_SHIFT |
204 pdata->gain << EXYNOS4_TMU_GAIN_SHIFT;
205 if (on) {
206 con |= EXYNOS4_TMU_CORE_ON;
207 interrupt_en = pdata->trigger_level3_en << 12 |
208 pdata->trigger_level2_en << 8 |
209 pdata->trigger_level1_en << 4 |
210 pdata->trigger_level0_en;
211 } else {
212 con |= EXYNOS4_TMU_CORE_OFF;
213 interrupt_en = 0; /* Disable all interrupts */
214 }
215 writel(interrupt_en, data->base + EXYNOS4_TMU_REG_INTEN);
216 writel(con, data->base + EXYNOS4_TMU_REG_CONTROL);
217
218 clk_disable(data->clk);
219 mutex_unlock(&data->lock);
220}
221
222static int exynos4_tmu_read(struct exynos4_tmu_data *data)
223{
224 u8 temp_code;
225 int temp;
226
227 mutex_lock(&data->lock);
228 clk_enable(data->clk);
229
230 temp_code = readb(data->base + EXYNOS4_TMU_REG_CURRENT_TEMP);
231 temp = code_to_temp(data, temp_code);
232
233 clk_disable(data->clk);
234 mutex_unlock(&data->lock);
235
236 return temp;
237}
238
239static void exynos4_tmu_work(struct work_struct *work)
240{
241 struct exynos4_tmu_data *data = container_of(work,
242 struct exynos4_tmu_data, irq_work);
243
244 mutex_lock(&data->lock);
245 clk_enable(data->clk);
246
247 writel(EXYNOS4_TMU_INTCLEAR_VAL, data->base + EXYNOS4_TMU_REG_INTCLEAR);
248
249 kobject_uevent(&data->hwmon_dev->kobj, KOBJ_CHANGE);
250
251 enable_irq(data->irq);
252
253 clk_disable(data->clk);
254 mutex_unlock(&data->lock);
255}
256
257static irqreturn_t exynos4_tmu_irq(int irq, void *id)
258{
259 struct exynos4_tmu_data *data = id;
260
261 disable_irq_nosync(irq);
262 schedule_work(&data->irq_work);
263
264 return IRQ_HANDLED;
265}
266
267static ssize_t exynos4_tmu_show_name(struct device *dev,
268 struct device_attribute *attr, char *buf)
269{
270 return sprintf(buf, "exynos4-tmu\n");
271}
272
273static ssize_t exynos4_tmu_show_temp(struct device *dev,
274 struct device_attribute *attr, char *buf)
275{
276 struct exynos4_tmu_data *data = dev_get_drvdata(dev);
277 int ret;
278
279 ret = exynos4_tmu_read(data);
280 if (ret < 0)
281 return ret;
282
283 /* convert from degree Celsius to millidegree Celsius */
284 return sprintf(buf, "%d\n", ret * 1000);
285}
286
287static ssize_t exynos4_tmu_show_alarm(struct device *dev,
288 struct device_attribute *devattr, char *buf)
289{
290 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
291 struct exynos4_tmu_data *data = dev_get_drvdata(dev);
292 struct exynos4_tmu_platform_data *pdata = data->pdata;
293 int temp;
294 unsigned int trigger_level;
295
296 temp = exynos4_tmu_read(data);
297 if (temp < 0)
298 return temp;
299
300 trigger_level = pdata->threshold + pdata->trigger_levels[attr->index];
301
302 return sprintf(buf, "%d\n", !!(temp > trigger_level));
303}
304
305static ssize_t exynos4_tmu_show_level(struct device *dev,
306 struct device_attribute *devattr, char *buf)
307{
308 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
309 struct exynos4_tmu_data *data = dev_get_drvdata(dev);
310 struct exynos4_tmu_platform_data *pdata = data->pdata;
311 unsigned int temp = pdata->threshold +
312 pdata->trigger_levels[attr->index];
313
314 return sprintf(buf, "%u\n", temp * 1000);
315}
316
317static DEVICE_ATTR(name, S_IRUGO, exynos4_tmu_show_name, NULL);
318static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, exynos4_tmu_show_temp, NULL, 0);
319
320static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
321 exynos4_tmu_show_alarm, NULL, 1);
322static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO,
323 exynos4_tmu_show_alarm, NULL, 2);
324static SENSOR_DEVICE_ATTR(temp1_emergency_alarm, S_IRUGO,
325 exynos4_tmu_show_alarm, NULL, 3);
326
327static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, exynos4_tmu_show_level, NULL, 1);
328static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, exynos4_tmu_show_level, NULL, 2);
329static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO,
330 exynos4_tmu_show_level, NULL, 3);
331
332static struct attribute *exynos4_tmu_attributes[] = {
333 &dev_attr_name.attr,
334 &sensor_dev_attr_temp1_input.dev_attr.attr,
335 &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
336 &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
337 &sensor_dev_attr_temp1_emergency_alarm.dev_attr.attr,
338 &sensor_dev_attr_temp1_max.dev_attr.attr,
339 &sensor_dev_attr_temp1_crit.dev_attr.attr,
340 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
341 NULL,
342};
343
344static const struct attribute_group exynos4_tmu_attr_group = {
345 .attrs = exynos4_tmu_attributes,
346};
347
348static int __devinit exynos4_tmu_probe(struct platform_device *pdev)
349{
350 struct exynos4_tmu_data *data;
351 struct exynos4_tmu_platform_data *pdata = pdev->dev.platform_data;
352 int ret;
353
354 if (!pdata) {
355 dev_err(&pdev->dev, "No platform init data supplied.\n");
356 return -ENODEV;
357 }
358
359 data = kzalloc(sizeof(struct exynos4_tmu_data), GFP_KERNEL);
360 if (!data) {
361 dev_err(&pdev->dev, "Failed to allocate driver structure\n");
362 return -ENOMEM;
363 }
364
365 data->irq = platform_get_irq(pdev, 0);
366 if (data->irq < 0) {
367 ret = data->irq;
368 dev_err(&pdev->dev, "Failed to get platform irq\n");
369 goto err_free;
370 }
371
372 INIT_WORK(&data->irq_work, exynos4_tmu_work);
373
374 data->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
375 if (!data->mem) {
376 ret = -ENOENT;
377 dev_err(&pdev->dev, "Failed to get platform resource\n");
378 goto err_free;
379 }
380
381 data->mem = request_mem_region(data->mem->start,
382 resource_size(data->mem), pdev->name);
383 if (!data->mem) {
384 ret = -ENODEV;
385 dev_err(&pdev->dev, "Failed to request memory region\n");
386 goto err_free;
387 }
388
389 data->base = ioremap(data->mem->start, resource_size(data->mem));
390 if (!data->base) {
391 ret = -ENODEV;
392 dev_err(&pdev->dev, "Failed to ioremap memory\n");
393 goto err_mem_region;
394 }
395
396 ret = request_irq(data->irq, exynos4_tmu_irq,
397 IRQF_TRIGGER_RISING,
398 "exynos4-tmu", data);
399 if (ret) {
400 dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
401 goto err_io_remap;
402 }
403
404 data->clk = clk_get(NULL, "tmu_apbif");
405 if (IS_ERR(data->clk)) {
406 ret = PTR_ERR(data->clk);
407 dev_err(&pdev->dev, "Failed to get clock\n");
408 goto err_irq;
409 }
410
411 data->pdata = pdata;
412 platform_set_drvdata(pdev, data);
413 mutex_init(&data->lock);
414
415 ret = exynos4_tmu_initialize(pdev);
416 if (ret) {
417 dev_err(&pdev->dev, "Failed to initialize TMU\n");
418 goto err_clk;
419 }
420
421 ret = sysfs_create_group(&pdev->dev.kobj, &exynos4_tmu_attr_group);
422 if (ret) {
423 dev_err(&pdev->dev, "Failed to create sysfs group\n");
424 goto err_clk;
425 }
426
427 data->hwmon_dev = hwmon_device_register(&pdev->dev);
428 if (IS_ERR(data->hwmon_dev)) {
429 ret = PTR_ERR(data->hwmon_dev);
430 dev_err(&pdev->dev, "Failed to register hwmon device\n");
431 goto err_create_group;
432 }
433
434 exynos4_tmu_control(pdev, true);
435
436 return 0;
437
438err_create_group:
439 sysfs_remove_group(&pdev->dev.kobj, &exynos4_tmu_attr_group);
440err_clk:
441 platform_set_drvdata(pdev, NULL);
442 clk_put(data->clk);
443err_irq:
444 free_irq(data->irq, data);
445err_io_remap:
446 iounmap(data->base);
447err_mem_region:
448 release_mem_region(data->mem->start, resource_size(data->mem));
449err_free:
450 kfree(data);
451
452 return ret;
453}
454
455static int __devexit exynos4_tmu_remove(struct platform_device *pdev)
456{
457 struct exynos4_tmu_data *data = platform_get_drvdata(pdev);
458
459 exynos4_tmu_control(pdev, false);
460
461 hwmon_device_unregister(data->hwmon_dev);
462 sysfs_remove_group(&pdev->dev.kobj, &exynos4_tmu_attr_group);
463
464 clk_put(data->clk);
465
466 free_irq(data->irq, data);
467
468 iounmap(data->base);
469 release_mem_region(data->mem->start, resource_size(data->mem));
470
471 platform_set_drvdata(pdev, NULL);
472
473 kfree(data);
474
475 return 0;
476}
477
478#ifdef CONFIG_PM_SLEEP
479static int exynos4_tmu_suspend(struct device *dev)
480{
481 exynos4_tmu_control(to_platform_device(dev), false);
482
483 return 0;
484}
485
486static int exynos4_tmu_resume(struct device *dev)
487{
488 struct platform_device *pdev = to_platform_device(dev);
489
490 exynos4_tmu_initialize(pdev);
491 exynos4_tmu_control(pdev, true);
492
493 return 0;
494}
495
496static SIMPLE_DEV_PM_OPS(exynos4_tmu_pm,
497 exynos4_tmu_suspend, exynos4_tmu_resume);
498#define EXYNOS4_TMU_PM &exynos4_tmu_pm
499#else
500#define EXYNOS4_TMU_PM NULL
501#endif
502
503static struct platform_driver exynos4_tmu_driver = {
504 .driver = {
505 .name = "exynos4-tmu",
506 .owner = THIS_MODULE,
507 .pm = EXYNOS4_TMU_PM,
508 },
509 .probe = exynos4_tmu_probe,
510 .remove = __devexit_p(exynos4_tmu_remove),
511};
512
513module_platform_driver(exynos4_tmu_driver);
514
515MODULE_DESCRIPTION("EXYNOS4 TMU Driver");
516MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
517MODULE_LICENSE("GPL");
518MODULE_ALIAS("platform:exynos4-tmu");
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index 68ad7d255512..4f4110407387 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -2,7 +2,7 @@
2 * fam15h_power.c - AMD Family 15h processor power monitoring 2 * fam15h_power.c - AMD Family 15h processor power monitoring
3 * 3 *
4 * Copyright (c) 2011 Advanced Micro Devices, Inc. 4 * Copyright (c) 2011 Advanced Micro Devices, Inc.
5 * Author: Andreas Herrmann <andreas.herrmann3@amd.com> 5 * Author: Andreas Herrmann <herrmann.der.user@googlemail.com>
6 * 6 *
7 * 7 *
8 * This driver is free software; you can redistribute it and/or 8 * This driver is free software; you can redistribute it and/or
@@ -28,7 +28,7 @@
28#include <asm/processor.h> 28#include <asm/processor.h>
29 29
30MODULE_DESCRIPTION("AMD Family 15h CPU processor power monitor"); 30MODULE_DESCRIPTION("AMD Family 15h CPU processor power monitor");
31MODULE_AUTHOR("Andreas Herrmann <andreas.herrmann3@amd.com>"); 31MODULE_AUTHOR("Andreas Herrmann <herrmann.der.user@googlemail.com>");
32MODULE_LICENSE("GPL"); 32MODULE_LICENSE("GPL");
33 33
34/* D18F3 */ 34/* D18F3 */
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 36509ae32083..1381a2e3bbd4 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -630,7 +630,9 @@ static struct platform_driver gpio_fan_driver = {
630 .driver = { 630 .driver = {
631 .name = "gpio-fan", 631 .name = "gpio-fan",
632 .pm = GPIO_FAN_PM, 632 .pm = GPIO_FAN_PM,
633#ifdef CONFIG_OF_GPIO
633 .of_match_table = of_match_ptr(of_gpio_fan_match), 634 .of_match_table = of_match_ptr(of_gpio_fan_match),
635#endif
634 }, 636 },
635}; 637};
636 638
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
index e8ee75f55472..9a675efaa78d 100644
--- a/drivers/hwmon/hih6130.c
+++ b/drivers/hwmon/hih6130.c
@@ -33,6 +33,7 @@
33#include <linux/mutex.h> 33#include <linux/mutex.h>
34#include <linux/device.h> 34#include <linux/device.h>
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/jiffies.h>
36 37
37/** 38/**
38 * struct hih6130 - HIH-6130 device specific data 39 * struct hih6130 - HIH-6130 device specific data
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
index a18882cc073d..46141abaafba 100644
--- a/drivers/hwmon/i5k_amb.c
+++ b/drivers/hwmon/i5k_amb.c
@@ -21,12 +21,10 @@
21 */ 21 */
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/jiffies.h>
25#include <linux/hwmon.h> 24#include <linux/hwmon.h>
26#include <linux/hwmon-sysfs.h> 25#include <linux/hwmon-sysfs.h>
27#include <linux/err.h> 26#include <linux/err.h>
28#include <linux/mutex.h> 27#include <linux/mutex.h>
29#include <linux/delay.h>
30#include <linux/log2.h> 28#include <linux/log2.h>
31#include <linux/pci.h> 29#include <linux/pci.h>
32#include <linux/platform_device.h> 30#include <linux/platform_device.h>
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 37f17e0d9d5d..a14f634248e7 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -36,6 +36,7 @@
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/math64.h> 37#include <linux/math64.h>
38#include <linux/time.h> 38#include <linux/time.h>
39#include <linux/err.h>
39 40
40#define REFRESH_INTERVAL (HZ) 41#define REFRESH_INTERVAL (HZ)
41#define IPMI_TIMEOUT (30 * HZ) 42#define IPMI_TIMEOUT (30 * HZ)
diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
index 41dbf8161ed7..b622a93ec32c 100644
--- a/drivers/hwmon/ibmpex.c
+++ b/drivers/hwmon/ibmpex.c
@@ -26,6 +26,7 @@
26#include <linux/jiffies.h> 26#include <linux/jiffies.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/err.h>
29 30
30#define REFRESH_INTERVAL (2 * HZ) 31#define REFRESH_INTERVAL (2 * HZ)
31#define DRVNAME "ibmpex" 32#define DRVNAME "ibmpex"
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 70717d4a5e89..2b726346f8fa 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -33,6 +33,7 @@
33#include <linux/i2c.h> 33#include <linux/i2c.h>
34#include <linux/hwmon.h> 34#include <linux/hwmon.h>
35#include <linux/hwmon-sysfs.h> 35#include <linux/hwmon-sysfs.h>
36#include <linux/jiffies.h>
36 37
37#include <linux/platform_data/ina2xx.h> 38#include <linux/platform_data/ina2xx.h>
38 39
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index 49a69c5b3b8d..e8c7fb0bbf95 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -22,7 +22,6 @@
22 */ 22 */
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/delay.h>
26#include <linux/init.h> 25#include <linux/init.h>
27#include <linux/slab.h> 26#include <linux/slab.h>
28#include <linux/jiffies.h> 27#include <linux/jiffies.h>
diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c
index bd75d2415432..41df29f59b0e 100644
--- a/drivers/hwmon/lineage-pem.c
+++ b/drivers/hwmon/lineage-pem.c
@@ -29,6 +29,7 @@
29#include <linux/i2c.h> 29#include <linux/i2c.h>
30#include <linux/hwmon.h> 30#include <linux/hwmon.h>
31#include <linux/hwmon-sysfs.h> 31#include <linux/hwmon-sysfs.h>
32#include <linux/jiffies.h>
32 33
33/* 34/*
34 * This driver supports various Lineage Compact Power Line DC/DC and AC/DC 35 * This driver supports various Lineage Compact Power Line DC/DC and AC/DC
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index 2282d77e83e8..71626f3c8742 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -48,6 +48,7 @@
48#include <linux/hwmon-sysfs.h> 48#include <linux/hwmon-sysfs.h>
49#include <linux/err.h> 49#include <linux/err.h>
50#include <linux/mutex.h> 50#include <linux/mutex.h>
51#include <linux/jiffies.h>
51 52
52/* 53/*
53 * The LM92 and MAX6635 have 2 two-state pins for address selection, 54 * The LM92 and MAX6635 have 2 two-state pins for address selection,
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index c3d4255ed154..1a003f73e4e4 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -47,6 +47,7 @@
47#include <linux/hwmon-vid.h> 47#include <linux/hwmon-vid.h>
48#include <linux/err.h> 48#include <linux/err.h>
49#include <linux/delay.h> 49#include <linux/delay.h>
50#include <linux/jiffies.h>
50 51
51/* LM93 REGISTER ADDRESSES */ 52/* LM93 REGISTER ADDRESSES */
52 53
diff --git a/drivers/hwmon/ltc4151.c b/drivers/hwmon/ltc4151.c
index 8496baa08bc8..4319a94f549d 100644
--- a/drivers/hwmon/ltc4151.c
+++ b/drivers/hwmon/ltc4151.c
@@ -36,6 +36,7 @@
36#include <linux/i2c.h> 36#include <linux/i2c.h>
37#include <linux/hwmon.h> 37#include <linux/hwmon.h>
38#include <linux/hwmon-sysfs.h> 38#include <linux/hwmon-sysfs.h>
39#include <linux/jiffies.h>
39 40
40/* chip registers */ 41/* chip registers */
41#define LTC4151_SENSE_H 0x00 42#define LTC4151_SENSE_H 0x00
diff --git a/drivers/hwmon/ltc4215.c b/drivers/hwmon/ltc4215.c
index 98b3d04f98b7..e8876108a6b3 100644
--- a/drivers/hwmon/ltc4215.c
+++ b/drivers/hwmon/ltc4215.c
@@ -19,6 +19,7 @@
19#include <linux/i2c.h> 19#include <linux/i2c.h>
20#include <linux/hwmon.h> 20#include <linux/hwmon.h>
21#include <linux/hwmon-sysfs.h> 21#include <linux/hwmon-sysfs.h>
22#include <linux/jiffies.h>
22 23
23/* Here are names of the chip's registers (a.k.a. commands) */ 24/* Here are names of the chip's registers (a.k.a. commands) */
24enum ltc4215_cmd { 25enum ltc4215_cmd {
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index 52075914eb0b..3653f79dc2de 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -21,6 +21,7 @@
21#include <linux/i2c.h> 21#include <linux/i2c.h>
22#include <linux/hwmon.h> 22#include <linux/hwmon.h>
23#include <linux/hwmon-sysfs.h> 23#include <linux/hwmon-sysfs.h>
24#include <linux/jiffies.h>
24#include <linux/i2c/ltc4245.h> 25#include <linux/i2c/ltc4245.h>
25 26
26/* Here are names of the chip's registers (a.k.a. commands) */ 27/* Here are names of the chip's registers (a.k.a. commands) */
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
index 77476a575c4e..84a2d2872b20 100644
--- a/drivers/hwmon/ltc4261.c
+++ b/drivers/hwmon/ltc4261.c
@@ -33,6 +33,7 @@
33#include <linux/i2c.h> 33#include <linux/i2c.h>
34#include <linux/hwmon.h> 34#include <linux/hwmon.h>
35#include <linux/hwmon-sysfs.h> 35#include <linux/hwmon-sysfs.h>
36#include <linux/jiffies.h>
36 37
37/* chip registers */ 38/* chip registers */
38#define LTC4261_STATUS 0x00 /* readonly */ 39#define LTC4261_STATUS 0x00 /* readonly */
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index 019427d7a5fd..e0019c69d1bb 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -22,7 +22,6 @@
22#include <linux/i2c.h> 22#include <linux/i2c.h>
23#include <linux/hwmon.h> 23#include <linux/hwmon.h>
24#include <linux/hwmon-sysfs.h> 24#include <linux/hwmon-sysfs.h>
25#include <linux/delay.h>
26#include <linux/jiffies.h> 25#include <linux/jiffies.h>
27 26
28enum chips { max16065, max16066, max16067, max16068, max16070, max16071 }; 27enum chips { max16065, max16066, max16067, max16068, max16070, max16071 };
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index 6c11ec214071..445e5d40ac82 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * max1619.c - Part of lm_sensors, Linux kernel modules for hardware 2 * max1619.c - Part of lm_sensors, Linux kernel modules for hardware
3 * monitoring 3 * monitoring
4 * Copyright (C) 2003-2004 Alexey Fisher <fishor@mail.ru> 4 * Copyright (C) 2003-2004 Oleksij Rempel <bug-track@fisher-privat.net>
5 * Jean Delvare <khali@linux-fr.org> 5 * Jean Delvare <khali@linux-fr.org>
6 * 6 *
7 * Based on the lm90 driver. The MAX1619 is a sensor chip made by Maxim. 7 * Based on the lm90 driver. The MAX1619 is a sensor chip made by Maxim.
@@ -357,7 +357,7 @@ static struct max1619_data *max1619_update_device(struct device *dev)
357 357
358module_i2c_driver(max1619_driver); 358module_i2c_driver(max1619_driver);
359 359
360MODULE_AUTHOR("Alexey Fisher <fishor@mail.ru> and " 360MODULE_AUTHOR("Oleksij Rempel <bug-track@fisher-privat.net> and "
361 "Jean Delvare <khali@linux-fr.org>"); 361 "Jean Delvare <khali@linux-fr.org>");
362MODULE_DESCRIPTION("MAX1619 sensor driver"); 362MODULE_DESCRIPTION("MAX1619 sensor driver");
363MODULE_LICENSE("GPL"); 363MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index bf236c0782b7..223461a6d70f 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -7,7 +7,7 @@
7 * Derived from: 7 * Derived from:
8 * 8 *
9 * Based on the max1619 driver. 9 * Based on the max1619 driver.
10 * Copyright (C) 2003-2004 Alexey Fisher <fishor@mail.ru> 10 * Copyright (C) 2003-2004 Oleksij Rempel <bug-track@fisher-privat.net>
11 * Jean Delvare <khali@linux-fr.org> 11 * Jean Delvare <khali@linux-fr.org>
12 * 12 *
13 * The MAX6642 is a sensor chip made by Maxim. 13 * The MAX6642 is a sensor chip made by Maxim.
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 2ca6a5a4f5a7..60745a535821 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -4,7 +4,7 @@
4 4
5menuconfig PMBUS 5menuconfig PMBUS
6 tristate "PMBus support" 6 tristate "PMBus support"
7 depends on I2C && EXPERIMENTAL 7 depends on I2C
8 default n 8 default n
9 help 9 help
10 Say yes here if you want to enable PMBus support. 10 Say yes here if you want to enable PMBus support.
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 29b319db573e..7d19b1bb9ce6 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -26,7 +26,7 @@
26#include <linux/i2c.h> 26#include <linux/i2c.h>
27#include <linux/hwmon.h> 27#include <linux/hwmon.h>
28#include <linux/hwmon-sysfs.h> 28#include <linux/hwmon-sysfs.h>
29#include <linux/delay.h> 29#include <linux/jiffies.h>
30#include <linux/i2c/pmbus.h> 30#include <linux/i2c/pmbus.h>
31#include "pmbus.h" 31#include "pmbus.h"
32 32
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index fe11b95670bd..bcecd025fcc4 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -22,7 +22,6 @@
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/delay.h>
26#include <linux/io.h> 25#include <linux/io.h>
27#include <linux/init.h> 26#include <linux/init.h>
28#include <linux/err.h> 27#include <linux/err.h>
diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c
index c2565d04cd4a..5f67546950b1 100644
--- a/drivers/hwmon/sht21.c
+++ b/drivers/hwmon/sht21.c
@@ -29,6 +29,7 @@
29#include <linux/err.h> 29#include <linux/err.h>
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31#include <linux/device.h> 31#include <linux/device.h>
32#include <linux/jiffies.h>
32 33
33/* I2C command bytes */ 34/* I2C command bytes */
34#define SHT21_TRIG_T_MEASUREMENT_HM 0xe3 35#define SHT21_TRIG_T_MEASUREMENT_HM 0xe3
diff --git a/drivers/hwmon/smm665.c b/drivers/hwmon/smm665.c
index cbc51fb30dba..d9e1b7de78da 100644
--- a/drivers/hwmon/smm665.c
+++ b/drivers/hwmon/smm665.c
@@ -24,6 +24,7 @@
24#include <linux/hwmon.h> 24#include <linux/hwmon.h>
25#include <linux/hwmon-sysfs.h> 25#include <linux/hwmon-sysfs.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/jiffies.h>
27 28
28/* Internal reference voltage (VREF, x 1000 */ 29/* Internal reference voltage (VREF, x 1000 */
29#define SMM665_VREF_ADC_X1000 1250 30#define SMM665_VREF_ADC_X1000 1250
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index 080c26370480..3c2c48d904e6 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -28,6 +28,7 @@
28#include <linux/hwmon-sysfs.h> 28#include <linux/hwmon-sysfs.h>
29#include <linux/err.h> 29#include <linux/err.h>
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31#include <linux/jiffies.h>
31 32
32MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
33 34
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 4e1ff82c63e0..b8777e54190a 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -26,6 +26,7 @@
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/device.h> 28#include <linux/device.h>
29#include <linux/jiffies.h>
29 30
30#define DRIVER_NAME "tmp102" 31#define DRIVER_NAME "tmp102"
31 32
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index c315c59f61fe..44136bb6d045 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -12,6 +12,7 @@
12#include <linux/io.h> 12#include <linux/io.h>
13#include <linux/hwmon.h> 13#include <linux/hwmon.h>
14#include <linux/hwmon-sysfs.h> 14#include <linux/hwmon-sysfs.h>
15#include <linux/err.h>
15 16
16#define DRV_MODULE_VERSION "0.1" 17#define DRV_MODULE_VERSION "0.1"
17 18
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 93ea81a4bf35..39ab7bcc616e 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -41,6 +41,7 @@
41#include <linux/hwmon-sysfs.h> 41#include <linux/hwmon-sysfs.h>
42#include <linux/err.h> 42#include <linux/err.h>
43#include <linux/mutex.h> 43#include <linux/mutex.h>
44#include <linux/jiffies.h>
44 45
45#define NUMBER_OF_VIN 10 46#define NUMBER_OF_VIN 10
46#define NUMBER_OF_FANIN 5 47#define NUMBER_OF_FANIN 5
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 06d6f56d4f69..053645279f38 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -44,6 +44,7 @@
44#include <linux/err.h> 44#include <linux/err.h>
45#include <linux/mutex.h> 45#include <linux/mutex.h>
46#include <linux/sysfs.h> 46#include <linux/sysfs.h>
47#include <linux/jiffies.h>
47 48
48/* Addresses to scan */ 49/* Addresses to scan */
49static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, 50static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 4fc47e062071..99799fd1d917 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -46,6 +46,7 @@
46#include <linux/kref.h> 46#include <linux/kref.h>
47#include <linux/notifier.h> 47#include <linux/notifier.h>
48#include <linux/reboot.h> 48#include <linux/reboot.h>
49#include <linux/jiffies.h>
49 50
50/* Default values */ 51/* Default values */
51#define WATCHDOG_TIMEOUT 2 /* 2 minute default timeout */ 52#define WATCHDOG_TIMEOUT 2 /* 2 minute default timeout */
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index b813c646c7ca..55a4f4894531 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -34,7 +34,7 @@
34#include <linux/hwmon-sysfs.h> 34#include <linux/hwmon-sysfs.h>
35#include <linux/err.h> 35#include <linux/err.h>
36#include <linux/mutex.h> 36#include <linux/mutex.h>
37#include <linux/delay.h> 37#include <linux/jiffies.h>
38 38
39/* Addresses to scan */ 39/* Addresses to scan */
40static const unsigned short normal_i2c[] = { 40static const unsigned short normal_i2c[] = {
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index c99c8a0473cf..f0e8286c3c70 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -33,6 +33,7 @@
33#include <linux/hwmon-sysfs.h> 33#include <linux/hwmon-sysfs.h>
34#include <linux/err.h> 34#include <linux/err.h>
35#include <linux/mutex.h> 35#include <linux/mutex.h>
36#include <linux/jiffies.h>
36 37
37/* Addresses to scan */ 38/* Addresses to scan */
38static const unsigned short normal_i2c[] = { 0x2e, 0x2f, I2C_CLIENT_END }; 39static const unsigned short normal_i2c[] = { 0x2e, 0x2f, I2C_CLIENT_END };
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index beee6b2d361d..1722f50f2473 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o
8obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o 8obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o
9obj-$(CONFIG_I2C_MUX) += i2c-mux.o 9obj-$(CONFIG_I2C_MUX) += i2c-mux.o
10obj-y += algos/ busses/ muxes/ 10obj-y += algos/ busses/ muxes/
11obj-$(CONFIG_I2C_STUB) += i2c-stub.o
11 12
12ccflags-$(CONFIG_I2C_DEBUG_CORE) := -DDEBUG 13ccflags-$(CONFIG_I2C_DEBUG_CORE) := -DDEBUG
13CFLAGS_i2c-core.o := -Wno-deprecated-declarations 14CFLAGS_i2c-core.o := -Wno-deprecated-declarations
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 6f5f98d69af7..f892a424009b 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -46,14 +46,19 @@ static int i2c_debug;
46#define pca_set_con(adap, val) pca_outw(adap, I2C_PCA_CON, val) 46#define pca_set_con(adap, val) pca_outw(adap, I2C_PCA_CON, val)
47#define pca_get_con(adap) pca_inw(adap, I2C_PCA_CON) 47#define pca_get_con(adap) pca_inw(adap, I2C_PCA_CON)
48#define pca_wait(adap) adap->wait_for_completion(adap->data) 48#define pca_wait(adap) adap->wait_for_completion(adap->data)
49#define pca_reset(adap) adap->reset_chip(adap->data)
50 49
51static void pca9665_reset(void *pd) 50static void pca_reset(struct i2c_algo_pca_data *adap)
52{ 51{
53 struct i2c_algo_pca_data *adap = pd; 52 if (adap->chip == I2C_PCA_CHIP_9665) {
54 pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IPRESET); 53 /* Ignore the reset function from the module,
55 pca_outw(adap, I2C_PCA_IND, 0xA5); 54 * we can use the parallel bus reset.
56 pca_outw(adap, I2C_PCA_IND, 0x5A); 55 */
56 pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IPRESET);
57 pca_outw(adap, I2C_PCA_IND, 0xA5);
58 pca_outw(adap, I2C_PCA_IND, 0x5A);
59 } else {
60 adap->reset_chip(adap->data);
61 }
57} 62}
58 63
59/* 64/*
@@ -378,11 +383,12 @@ static unsigned int pca_probe_chip(struct i2c_adapter *adap)
378 pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_IADR); 383 pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_IADR);
379 if (pca_inw(pca_data, I2C_PCA_IND) == 0xAA) { 384 if (pca_inw(pca_data, I2C_PCA_IND) == 0xAA) {
380 printk(KERN_INFO "%s: PCA9665 detected.\n", adap->name); 385 printk(KERN_INFO "%s: PCA9665 detected.\n", adap->name);
381 return I2C_PCA_CHIP_9665; 386 pca_data->chip = I2C_PCA_CHIP_9665;
382 } else { 387 } else {
383 printk(KERN_INFO "%s: PCA9564 detected.\n", adap->name); 388 printk(KERN_INFO "%s: PCA9564 detected.\n", adap->name);
384 return I2C_PCA_CHIP_9564; 389 pca_data->chip = I2C_PCA_CHIP_9564;
385 } 390 }
391 return pca_data->chip;
386} 392}
387 393
388static int pca_init(struct i2c_adapter *adap) 394static int pca_init(struct i2c_adapter *adap)
@@ -456,11 +462,6 @@ static int pca_init(struct i2c_adapter *adap)
456 */ 462 */
457 int raise_fall_time; 463 int raise_fall_time;
458 464
459 /* Ignore the reset function from the module,
460 * we can use the parallel bus reset
461 */
462 pca_data->reset_chip = pca9665_reset;
463
464 if (pca_data->i2c_clock > 1265800) { 465 if (pca_data->i2c_clock > 1265800) {
465 printk(KERN_WARNING "%s: I2C clock speed too high." 466 printk(KERN_WARNING "%s: I2C clock speed too high."
466 " Using 1265.8kHz.\n", adap->name); 467 " Using 1265.8kHz.\n", adap->name);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index ff01c389e2da..e9df4612b7eb 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -81,7 +81,6 @@ config I2C_I801
81 tristate "Intel 82801 (ICH/PCH)" 81 tristate "Intel 82801 (ICH/PCH)"
82 depends on PCI 82 depends on PCI
83 select CHECK_SIGNATURE if X86 && DMI 83 select CHECK_SIGNATURE if X86 && DMI
84 select GPIOLIB if I2C_MUX
85 help 84 help
86 If you say yes to this option, support will be included for the Intel 85 If you say yes to this option, support will be included for the Intel
87 801 family of mainboard I2C interfaces. Specifically, the following 86 801 family of mainboard I2C interfaces. Specifically, the following
@@ -294,18 +293,21 @@ comment "I2C system bus drivers (mostly embedded / system-on-chip)"
294 293
295config I2C_AT91 294config I2C_AT91
296 tristate "Atmel AT91 I2C Two-Wire interface (TWI)" 295 tristate "Atmel AT91 I2C Two-Wire interface (TWI)"
297 depends on ARCH_AT91 && EXPERIMENTAL && BROKEN 296 depends on ARCH_AT91 && EXPERIMENTAL
298 help 297 help
299 This supports the use of the I2C interface on Atmel AT91 298 This supports the use of the I2C interface on Atmel AT91
300 processors. 299 processors.
301 300
302 This driver is BROKEN because the controller which it uses 301 A serious problem is that there is no documented way to issue
303 will easily trigger RX overrun and TX underrun errors. Using 302 repeated START conditions for more than two messages, as needed
304 low I2C clock rates may partially work around those issues
305 on some systems. Another serious problem is that there is no
306 documented way to issue repeated START conditions, as needed
307 to support combined I2C messages. Use the i2c-gpio driver 303 to support combined I2C messages. Use the i2c-gpio driver
308 unless your system can cope with those limitations. 304 unless your system can cope with this limitation.
305
306 Caution! at91rm9200, at91sam9261, at91sam9260, at91sam9263 devices
307 don't have clock stretching in transmission mode. For that reason,
308 you can encounter underrun issues causing premature stop sendings if
309 the latency to fill the transmission register is too long. If you
310 are facing this situation, use the i2c-gpio driver.
309 311
310config I2C_AU1550 312config I2C_AU1550
311 tristate "Au1550/Au1200/Au1300 SMBus interface" 313 tristate "Au1550/Au1200/Au1300 SMBus interface"
@@ -718,6 +720,16 @@ config I2C_XLR
718 This driver can also be built as a module. If so, the module 720 This driver can also be built as a module. If so, the module
719 will be called i2c-xlr. 721 will be called i2c-xlr.
720 722
723config I2C_RCAR
724 tristate "Renesas R-Car I2C Controller"
725 depends on ARCH_SHMOBILE && I2C
726 help
727 If you say yes to this option, support will be included for the
728 R-Car I2C controller.
729
730 This driver can also be built as a module. If so, the module
731 will be called i2c-rcar.
732
721comment "External I2C/SMBus adapter drivers" 733comment "External I2C/SMBus adapter drivers"
722 734
723config I2C_DIOLAN_U2C 735config I2C_DIOLAN_U2C
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 37c4182cc98b..395b516ffa08 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -71,6 +71,7 @@ obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
71obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o 71obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o
72obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o 72obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
73obj-$(CONFIG_I2C_XLR) += i2c-xlr.o 73obj-$(CONFIG_I2C_XLR) += i2c-xlr.o
74obj-$(CONFIG_I2C_RCAR) += i2c-rcar.o
74 75
75# External I2C/SMBus adapter drivers 76# External I2C/SMBus adapter drivers
76obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o 77obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o
@@ -84,7 +85,6 @@ obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o
84obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o 85obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o
85obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o 86obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o
86obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o 87obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
87obj-$(CONFIG_I2C_STUB) += i2c-stub.o
88obj-$(CONFIG_SCx200_ACB) += scx200_acb.o 88obj-$(CONFIG_SCx200_ACB) += scx200_acb.o
89obj-$(CONFIG_SCx200_I2C) += scx200_i2c.o 89obj-$(CONFIG_SCx200_I2C) += scx200_i2c.o
90 90
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index e24484beef07..aa59a254be2c 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -1,315 +1,554 @@
1/* 1/*
2 i2c Support for Atmel's AT91 Two-Wire Interface (TWI) 2 * i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
3 3 *
4 Copyright (C) 2004 Rick Bronson 4 * Copyright (C) 2011 Weinmann Medical GmbH
5 Converted to 2.6 by Andrew Victor <andrew@sanpeople.com> 5 * Author: Nikolaus Voss <n.voss@weinmann.de>
6 6 *
7 Borrowed heavily from original work by: 7 * Evolved from original work by:
8 Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com> 8 * Copyright (C) 2004 Rick Bronson
9 9 * Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
10 This program is free software; you can redistribute it and/or modify 10 *
11 it under the terms of the GNU General Public License as published by 11 * Borrowed heavily from original work by:
12 the Free Software Foundation; either version 2 of the License, or 12 * Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
13 (at your option) any later version. 13 *
14*/ 14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 */
15 19
16#include <linux/module.h> 20#include <linux/clk.h>
17#include <linux/kernel.h> 21#include <linux/completion.h>
18#include <linux/err.h> 22#include <linux/err.h>
19#include <linux/slab.h>
20#include <linux/types.h>
21#include <linux/delay.h>
22#include <linux/i2c.h> 23#include <linux/i2c.h>
23#include <linux/init.h> 24#include <linux/interrupt.h>
24#include <linux/clk.h>
25#include <linux/platform_device.h>
26#include <linux/io.h> 25#include <linux/io.h>
26#include <linux/module.h>
27#include <linux/of.h>
28#include <linux/of_device.h>
29#include <linux/of_i2c.h>
30#include <linux/platform_device.h>
31#include <linux/slab.h>
32
33#define TWI_CLK_HZ 100000 /* max 400 Kbits/s */
34#define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
35
36/* AT91 TWI register definitions */
37#define AT91_TWI_CR 0x0000 /* Control Register */
38#define AT91_TWI_START 0x0001 /* Send a Start Condition */
39#define AT91_TWI_STOP 0x0002 /* Send a Stop Condition */
40#define AT91_TWI_MSEN 0x0004 /* Master Transfer Enable */
41#define AT91_TWI_SVDIS 0x0020 /* Slave Transfer Disable */
42#define AT91_TWI_SWRST 0x0080 /* Software Reset */
43
44#define AT91_TWI_MMR 0x0004 /* Master Mode Register */
45#define AT91_TWI_IADRSZ_1 0x0100 /* Internal Device Address Size */
46#define AT91_TWI_MREAD 0x1000 /* Master Read Direction */
47
48#define AT91_TWI_IADR 0x000c /* Internal Address Register */
49
50#define AT91_TWI_CWGR 0x0010 /* Clock Waveform Generator Reg */
51
52#define AT91_TWI_SR 0x0020 /* Status Register */
53#define AT91_TWI_TXCOMP 0x0001 /* Transmission Complete */
54#define AT91_TWI_RXRDY 0x0002 /* Receive Holding Register Ready */
55#define AT91_TWI_TXRDY 0x0004 /* Transmit Holding Register Ready */
27 56
28#include <mach/at91_twi.h> 57#define AT91_TWI_OVRE 0x0040 /* Overrun Error */
29#include <mach/board.h> 58#define AT91_TWI_UNRE 0x0080 /* Underrun Error */
30#include <mach/cpu.h> 59#define AT91_TWI_NACK 0x0100 /* Not Acknowledged */
31 60
32#define TWI_CLOCK 100000 /* Hz. max 400 Kbits/sec */ 61#define AT91_TWI_IER 0x0024 /* Interrupt Enable Register */
62#define AT91_TWI_IDR 0x0028 /* Interrupt Disable Register */
63#define AT91_TWI_IMR 0x002c /* Interrupt Mask Register */
64#define AT91_TWI_RHR 0x0030 /* Receive Holding Register */
65#define AT91_TWI_THR 0x0034 /* Transmit Holding Register */
33 66
67struct at91_twi_pdata {
68 unsigned clk_max_div;
69 unsigned clk_offset;
70 bool has_unre_flag;
71};
72
73struct at91_twi_dev {
74 struct device *dev;
75 void __iomem *base;
76 struct completion cmd_complete;
77 struct clk *clk;
78 u8 *buf;
79 size_t buf_len;
80 struct i2c_msg *msg;
81 int irq;
82 unsigned transfer_status;
83 struct i2c_adapter adapter;
84 unsigned twi_cwgr_reg;
85 struct at91_twi_pdata *pdata;
86};
34 87
35static struct clk *twi_clk; 88static unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg)
36static void __iomem *twi_base; 89{
90 return readl_relaxed(dev->base + reg);
91}
92
93static void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val)
94{
95 writel_relaxed(val, dev->base + reg);
96}
37 97
38#define at91_twi_read(reg) __raw_readl(twi_base + (reg)) 98static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
39#define at91_twi_write(reg, val) __raw_writel((val), twi_base + (reg)) 99{
100 at91_twi_write(dev, AT91_TWI_IDR,
101 AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY);
102}
40 103
104static void at91_init_twi_bus(struct at91_twi_dev *dev)
105{
106 at91_disable_twi_interrupts(dev);
107 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SWRST);
108 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSEN);
109 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVDIS);
110 at91_twi_write(dev, AT91_TWI_CWGR, dev->twi_cwgr_reg);
111}
41 112
42/* 113/*
43 * Initialize the TWI hardware registers. 114 * Calculate symmetric clock as stated in datasheet:
115 * twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
44 */ 116 */
45static void __devinit at91_twi_hwinit(void) 117static void __devinit at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
46{ 118{
47 unsigned long cdiv, ckdiv; 119 int ckdiv, cdiv, div;
48 120 struct at91_twi_pdata *pdata = dev->pdata;
49 at91_twi_write(AT91_TWI_IDR, 0xffffffff); /* Disable all interrupts */ 121 int offset = pdata->clk_offset;
50 at91_twi_write(AT91_TWI_CR, AT91_TWI_SWRST); /* Reset peripheral */ 122 int max_ckdiv = pdata->clk_max_div;
51 at91_twi_write(AT91_TWI_CR, AT91_TWI_MSEN); /* Set Master mode */ 123
52 124 div = max(0, (int)DIV_ROUND_UP(clk_get_rate(dev->clk),
53 /* Calcuate clock dividers */ 125 2 * twi_clk) - offset);
54 cdiv = (clk_get_rate(twi_clk) / (2 * TWI_CLOCK)) - 3; 126 ckdiv = fls(div >> 8);
55 cdiv = cdiv + 1; /* round up */ 127 cdiv = div >> ckdiv;
56 ckdiv = 0; 128
57 while (cdiv > 255) { 129 if (ckdiv > max_ckdiv) {
58 ckdiv++; 130 dev_warn(dev->dev, "%d exceeds ckdiv max value which is %d.\n",
59 cdiv = cdiv >> 1; 131 ckdiv, max_ckdiv);
132 ckdiv = max_ckdiv;
133 cdiv = 255;
60 } 134 }
61 135
62 if (cpu_is_at91rm9200()) { /* AT91RM9200 Errata #22 */ 136 dev->twi_cwgr_reg = (ckdiv << 16) | (cdiv << 8) | cdiv;
63 if (ckdiv > 5) { 137 dev_dbg(dev->dev, "cdiv %d ckdiv %d\n", cdiv, ckdiv);
64 printk(KERN_ERR "AT91 I2C: Invalid TWI_CLOCK value!\n"); 138}
65 ckdiv = 5;
66 }
67 }
68 139
69 at91_twi_write(AT91_TWI_CWGR, (ckdiv << 16) | (cdiv << 8) | cdiv); 140static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
141{
142 if (dev->buf_len <= 0)
143 return;
144
145 at91_twi_write(dev, AT91_TWI_THR, *dev->buf);
146
147 /* send stop when last byte has been written */
148 if (--dev->buf_len == 0)
149 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
150
151 dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len);
152
153 ++dev->buf;
70} 154}
71 155
72/* 156static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
73 * Poll the i2c status register until the specified bit is set.
74 * Returns 0 if timed out (100 msec).
75 */
76static short at91_poll_status(unsigned long bit)
77{ 157{
78 int loop_cntr = 10000; 158 if (dev->buf_len <= 0)
159 return;
160
161 *dev->buf = at91_twi_read(dev, AT91_TWI_RHR) & 0xff;
162 --dev->buf_len;
163
164 /* handle I2C_SMBUS_BLOCK_DATA */
165 if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
166 dev->msg->flags &= ~I2C_M_RECV_LEN;
167 dev->buf_len += *dev->buf;
168 dev->msg->len = dev->buf_len + 1;
169 dev_dbg(dev->dev, "received block length %d\n", dev->buf_len);
170 }
171
172 /* send stop if second but last byte has been read */
173 if (dev->buf_len == 1)
174 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
79 175
80 do { 176 dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len);
81 udelay(10);
82 } while (!(at91_twi_read(AT91_TWI_SR) & bit) && (--loop_cntr > 0));
83 177
84 return (loop_cntr > 0); 178 ++dev->buf;
85} 179}
86 180
87static int xfer_read(struct i2c_adapter *adap, unsigned char *buf, int length) 181static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
88{ 182{
89 /* Send Start */ 183 struct at91_twi_dev *dev = dev_id;
90 at91_twi_write(AT91_TWI_CR, AT91_TWI_START); 184 const unsigned status = at91_twi_read(dev, AT91_TWI_SR);
91 185 const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR);
92 /* Read data */ 186
93 while (length--) { 187 if (!irqstatus)
94 if (!length) /* need to send Stop before reading last byte */ 188 return IRQ_NONE;
95 at91_twi_write(AT91_TWI_CR, AT91_TWI_STOP); 189 else if (irqstatus & AT91_TWI_RXRDY)
96 if (!at91_poll_status(AT91_TWI_RXRDY)) { 190 at91_twi_read_next_byte(dev);
97 dev_dbg(&adap->dev, "RXRDY timeout\n"); 191 else if (irqstatus & AT91_TWI_TXRDY)
98 return -ETIMEDOUT; 192 at91_twi_write_next_byte(dev);
99 } 193
100 *buf++ = (at91_twi_read(AT91_TWI_RHR) & 0xff); 194 /* catch error flags */
195 dev->transfer_status |= status;
196
197 if (irqstatus & AT91_TWI_TXCOMP) {
198 at91_disable_twi_interrupts(dev);
199 complete(&dev->cmd_complete);
101 } 200 }
102 201
103 return 0; 202 return IRQ_HANDLED;
104} 203}
105 204
106static int xfer_write(struct i2c_adapter *adap, unsigned char *buf, int length) 205static int at91_do_twi_transfer(struct at91_twi_dev *dev)
107{ 206{
108 /* Load first byte into transmitter */ 207 int ret;
109 at91_twi_write(AT91_TWI_THR, *buf++); 208 bool has_unre_flag = dev->pdata->has_unre_flag;
110 209
111 /* Send Start */ 210 dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
112 at91_twi_write(AT91_TWI_CR, AT91_TWI_START); 211 (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
113 212
114 do { 213 INIT_COMPLETION(dev->cmd_complete);
115 if (!at91_poll_status(AT91_TWI_TXRDY)) { 214 dev->transfer_status = 0;
116 dev_dbg(&adap->dev, "TXRDY timeout\n"); 215 if (dev->msg->flags & I2C_M_RD) {
117 return -ETIMEDOUT; 216 unsigned start_flags = AT91_TWI_START;
118 }
119 217
120 length--; /* byte was transmitted */ 218 if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) {
219 dev_err(dev->dev, "RXRDY still set!");
220 at91_twi_read(dev, AT91_TWI_RHR);
221 }
121 222
122 if (length > 0) /* more data to send? */ 223 /* if only one byte is to be read, immediately stop transfer */
123 at91_twi_write(AT91_TWI_THR, *buf++); 224 if (dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN))
124 } while (length); 225 start_flags |= AT91_TWI_STOP;
226 at91_twi_write(dev, AT91_TWI_CR, start_flags);
227 at91_twi_write(dev, AT91_TWI_IER,
228 AT91_TWI_TXCOMP | AT91_TWI_RXRDY);
229 } else {
230 at91_twi_write_next_byte(dev);
231 at91_twi_write(dev, AT91_TWI_IER,
232 AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
233 }
125 234
126 /* Send Stop */ 235 ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
127 at91_twi_write(AT91_TWI_CR, AT91_TWI_STOP); 236 dev->adapter.timeout);
237 if (ret == 0) {
238 dev_err(dev->dev, "controller timed out\n");
239 at91_init_twi_bus(dev);
240 return -ETIMEDOUT;
241 }
242 if (dev->transfer_status & AT91_TWI_NACK) {
243 dev_dbg(dev->dev, "received nack\n");
244 return -EREMOTEIO;
245 }
246 if (dev->transfer_status & AT91_TWI_OVRE) {
247 dev_err(dev->dev, "overrun while reading\n");
248 return -EIO;
249 }
250 if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
251 dev_err(dev->dev, "underrun while writing\n");
252 return -EIO;
253 }
254 dev_dbg(dev->dev, "transfer complete\n");
128 255
129 return 0; 256 return 0;
130} 257}
131 258
132/* 259static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
133 * Generic i2c master transfer entrypoint.
134 *
135 * Note: We do not use Atmel's feature of storing the "internal device address".
136 * Instead the "internal device address" has to be written using a separate
137 * i2c message.
138 * http://lists.arm.linux.org.uk/pipermail/linux-arm-kernel/2004-September/024411.html
139 */
140static int at91_xfer(struct i2c_adapter *adap, struct i2c_msg *pmsg, int num)
141{ 260{
142 int i, ret; 261 struct at91_twi_dev *dev = i2c_get_adapdata(adap);
262 int ret;
263 unsigned int_addr_flag = 0;
264 struct i2c_msg *m_start = msg;
143 265
144 dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num); 266 dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
145 267
146 for (i = 0; i < num; i++) { 268 /*
147 dev_dbg(&adap->dev, " #%d: %sing %d byte%s %s 0x%02x\n", i, 269 * The hardware can handle at most two messages concatenated by a
148 pmsg->flags & I2C_M_RD ? "read" : "writ", 270 * repeated start via it's internal address feature.
149 pmsg->len, pmsg->len > 1 ? "s" : "", 271 */
150 pmsg->flags & I2C_M_RD ? "from" : "to", pmsg->addr); 272 if (num > 2) {
151 273 dev_err(dev->dev,
152 at91_twi_write(AT91_TWI_MMR, (pmsg->addr << 16) 274 "cannot handle more than two concatenated messages.\n");
153 | ((pmsg->flags & I2C_M_RD) ? AT91_TWI_MREAD : 0)); 275 return 0;
154 276 } else if (num == 2) {
155 if (pmsg->len && pmsg->buf) { /* sanity check */ 277 int internal_address = 0;
156 if (pmsg->flags & I2C_M_RD) 278 int i;
157 ret = xfer_read(adap, pmsg->buf, pmsg->len); 279
158 else 280 if (msg->flags & I2C_M_RD) {
159 ret = xfer_write(adap, pmsg->buf, pmsg->len); 281 dev_err(dev->dev, "first transfer must be write.\n");
160 282 return -EINVAL;
161 if (ret)
162 return ret;
163
164 /* Wait until transfer is finished */
165 if (!at91_poll_status(AT91_TWI_TXCOMP)) {
166 dev_dbg(&adap->dev, "TXCOMP timeout\n");
167 return -ETIMEDOUT;
168 }
169 } 283 }
170 dev_dbg(&adap->dev, "transfer complete\n"); 284 if (msg->len > 3) {
171 pmsg++; /* next message */ 285 dev_err(dev->dev, "first message size must be <= 3.\n");
286 return -EINVAL;
287 }
288
289 /* 1st msg is put into the internal address, start with 2nd */
290 m_start = &msg[1];
291 for (i = 0; i < msg->len; ++i) {
292 const unsigned addr = msg->buf[msg->len - 1 - i];
293
294 internal_address |= addr << (8 * i);
295 int_addr_flag += AT91_TWI_IADRSZ_1;
296 }
297 at91_twi_write(dev, AT91_TWI_IADR, internal_address);
172 } 298 }
173 return i; 299
300 at91_twi_write(dev, AT91_TWI_MMR, (m_start->addr << 16) | int_addr_flag
301 | ((m_start->flags & I2C_M_RD) ? AT91_TWI_MREAD : 0));
302
303 dev->buf_len = m_start->len;
304 dev->buf = m_start->buf;
305 dev->msg = m_start;
306
307 ret = at91_do_twi_transfer(dev);
308
309 return (ret < 0) ? ret : num;
174} 310}
175 311
176/* 312static u32 at91_twi_func(struct i2c_adapter *adapter)
177 * Return list of supported functionality.
178 */
179static u32 at91_func(struct i2c_adapter *adapter)
180{ 313{
181 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 314 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
315 | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
182} 316}
183 317
184static struct i2c_algorithm at91_algorithm = { 318static struct i2c_algorithm at91_twi_algorithm = {
185 .master_xfer = at91_xfer, 319 .master_xfer = at91_twi_xfer,
186 .functionality = at91_func, 320 .functionality = at91_twi_func,
187}; 321};
188 322
189/* 323static struct at91_twi_pdata at91rm9200_config = {
190 * Main initialization routine. 324 .clk_max_div = 5,
191 */ 325 .clk_offset = 3,
192static int __devinit at91_i2c_probe(struct platform_device *pdev) 326 .has_unre_flag = true,
193{ 327};
194 struct i2c_adapter *adapter;
195 struct resource *res;
196 int rc;
197 328
198 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 329static struct at91_twi_pdata at91sam9261_config = {
199 if (!res) 330 .clk_max_div = 5,
200 return -ENXIO; 331 .clk_offset = 4,
332 .has_unre_flag = false,
333};
201 334
202 if (!request_mem_region(res->start, resource_size(res), "at91_i2c")) 335static struct at91_twi_pdata at91sam9260_config = {
203 return -EBUSY; 336 .clk_max_div = 7,
337 .clk_offset = 4,
338 .has_unre_flag = false,
339};
340
341static struct at91_twi_pdata at91sam9g20_config = {
342 .clk_max_div = 7,
343 .clk_offset = 4,
344 .has_unre_flag = false,
345};
346
347static struct at91_twi_pdata at91sam9g10_config = {
348 .clk_max_div = 7,
349 .clk_offset = 4,
350 .has_unre_flag = false,
351};
204 352
205 twi_base = ioremap(res->start, resource_size(res)); 353static struct at91_twi_pdata at91sam9x5_config = {
206 if (!twi_base) { 354 .clk_max_div = 7,
207 rc = -ENOMEM; 355 .clk_offset = 4,
208 goto fail0; 356 .has_unre_flag = false,
357};
358
359static const struct platform_device_id at91_twi_devtypes[] = {
360 {
361 .name = "i2c-at91rm9200",
362 .driver_data = (unsigned long) &at91rm9200_config,
363 }, {
364 .name = "i2c-at91sam9261",
365 .driver_data = (unsigned long) &at91sam9261_config,
366 }, {
367 .name = "i2c-at91sam9260",
368 .driver_data = (unsigned long) &at91sam9260_config,
369 }, {
370 .name = "i2c-at91sam9g20",
371 .driver_data = (unsigned long) &at91sam9g20_config,
372 }, {
373 .name = "i2c-at91sam9g10",
374 .driver_data = (unsigned long) &at91sam9g10_config,
375 }, {
376 /* sentinel */
209 } 377 }
378};
210 379
211 twi_clk = clk_get(NULL, "twi_clk"); 380#if defined(CONFIG_OF)
212 if (IS_ERR(twi_clk)) { 381static const struct of_device_id atmel_twi_dt_ids[] = {
213 dev_err(&pdev->dev, "no clock defined\n"); 382 {
214 rc = -ENODEV; 383 .compatible = "atmel,at91sam9260-i2c",
215 goto fail1; 384 .data = &at91sam9260_config,
385 } , {
386 .compatible = "atmel,at91sam9g20-i2c",
387 .data = &at91sam9g20_config,
388 } , {
389 .compatible = "atmel,at91sam9g10-i2c",
390 .data = &at91sam9g10_config,
391 }, {
392 .compatible = "atmel,at91sam9x5-i2c",
393 .data = &at91sam9x5_config,
394 }, {
395 /* sentinel */
216 } 396 }
397};
398MODULE_DEVICE_TABLE(of, atmel_twi_dt_ids);
399#else
400#define atmel_twi_dt_ids NULL
401#endif
217 402
218 adapter = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL); 403static struct at91_twi_pdata * __devinit at91_twi_get_driver_data(
219 if (adapter == NULL) { 404 struct platform_device *pdev)
220 dev_err(&pdev->dev, "can't allocate inteface!\n"); 405{
221 rc = -ENOMEM; 406 if (pdev->dev.of_node) {
222 goto fail2; 407 const struct of_device_id *match;
408 match = of_match_node(atmel_twi_dt_ids, pdev->dev.of_node);
409 if (!match)
410 return NULL;
411 return match->data;
223 } 412 }
224 snprintf(adapter->name, sizeof(adapter->name), "AT91"); 413 return (struct at91_twi_pdata *) platform_get_device_id(pdev)->driver_data;
225 adapter->algo = &at91_algorithm; 414}
226 adapter->class = I2C_CLASS_HWMON; 415
227 adapter->dev.parent = &pdev->dev; 416static int __devinit at91_twi_probe(struct platform_device *pdev)
228 /* adapter->id == 0 ... only one TWI controller for now */ 417{
418 struct at91_twi_dev *dev;
419 struct resource *mem;
420 int rc;
421
422 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
423 if (!dev)
424 return -ENOMEM;
425 init_completion(&dev->cmd_complete);
426 dev->dev = &pdev->dev;
427
428 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
429 if (!mem)
430 return -ENODEV;
431
432 dev->pdata = at91_twi_get_driver_data(pdev);
433 if (!dev->pdata)
434 return -ENODEV;
229 435
230 platform_set_drvdata(pdev, adapter); 436 dev->base = devm_request_and_ioremap(&pdev->dev, mem);
437 if (!dev->base)
438 return -EBUSY;
231 439
232 clk_enable(twi_clk); /* enable peripheral clock */ 440 dev->irq = platform_get_irq(pdev, 0);
233 at91_twi_hwinit(); /* initialize TWI controller */ 441 if (dev->irq < 0)
442 return dev->irq;
234 443
235 rc = i2c_add_numbered_adapter(adapter); 444 rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt, 0,
445 dev_name(dev->dev), dev);
236 if (rc) { 446 if (rc) {
237 dev_err(&pdev->dev, "Adapter %s registration failed\n", 447 dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc);
238 adapter->name); 448 return rc;
239 goto fail3;
240 } 449 }
241 450
242 dev_info(&pdev->dev, "AT91 i2c bus driver.\n"); 451 platform_set_drvdata(pdev, dev);
243 return 0;
244 452
245fail3: 453 dev->clk = devm_clk_get(dev->dev, NULL);
246 platform_set_drvdata(pdev, NULL); 454 if (IS_ERR(dev->clk)) {
247 kfree(adapter); 455 dev_err(dev->dev, "no clock defined\n");
248 clk_disable(twi_clk); 456 return -ENODEV;
249fail2: 457 }
250 clk_put(twi_clk); 458 clk_prepare_enable(dev->clk);
251fail1: 459
252 iounmap(twi_base); 460 at91_calc_twi_clock(dev, TWI_CLK_HZ);
253fail0: 461 at91_init_twi_bus(dev);
254 release_mem_region(res->start, resource_size(res)); 462
463 snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91");
464 i2c_set_adapdata(&dev->adapter, dev);
465 dev->adapter.owner = THIS_MODULE;
466 dev->adapter.class = I2C_CLASS_HWMON;
467 dev->adapter.algo = &at91_twi_algorithm;
468 dev->adapter.dev.parent = dev->dev;
469 dev->adapter.nr = pdev->id;
470 dev->adapter.timeout = AT91_I2C_TIMEOUT;
471 dev->adapter.dev.of_node = pdev->dev.of_node;
472
473 rc = i2c_add_numbered_adapter(&dev->adapter);
474 if (rc) {
475 dev_err(dev->dev, "Adapter %s registration failed\n",
476 dev->adapter.name);
477 clk_disable_unprepare(dev->clk);
478 return rc;
479 }
255 480
256 return rc; 481 of_i2c_register_devices(&dev->adapter);
482
483 dev_info(dev->dev, "AT91 i2c bus driver.\n");
484 return 0;
257} 485}
258 486
259static int __devexit at91_i2c_remove(struct platform_device *pdev) 487static int __devexit at91_twi_remove(struct platform_device *pdev)
260{ 488{
261 struct i2c_adapter *adapter = platform_get_drvdata(pdev); 489 struct at91_twi_dev *dev = platform_get_drvdata(pdev);
262 struct resource *res;
263 int rc; 490 int rc;
264 491
265 rc = i2c_del_adapter(adapter); 492 rc = i2c_del_adapter(&dev->adapter);
266 platform_set_drvdata(pdev, NULL); 493 clk_disable_unprepare(dev->clk);
267
268 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
269 iounmap(twi_base);
270 release_mem_region(res->start, resource_size(res));
271
272 clk_disable(twi_clk); /* disable peripheral clock */
273 clk_put(twi_clk);
274 494
275 return rc; 495 return rc;
276} 496}
277 497
278#ifdef CONFIG_PM 498#ifdef CONFIG_PM
279 499
280/* NOTE: could save a few mA by keeping clock off outside of at91_xfer... */ 500static int at91_twi_runtime_suspend(struct device *dev)
281
282static int at91_i2c_suspend(struct device *dev)
283{ 501{
284 clk_disable(twi_clk); 502 struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
503
504 clk_disable(twi_dev->clk);
505
285 return 0; 506 return 0;
286} 507}
287 508
288static int at91_i2c_resume(struct device *dev) 509static int at91_twi_runtime_resume(struct device *dev)
289{ 510{
290 return clk_enable(twi_clk); 511 struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
512
513 return clk_enable(twi_dev->clk);
291} 514}
292 515
293static SIMPLE_DEV_PM_OPS(at91_i2c_pm, at91_i2c_suspend, at91_i2c_resume); 516static const struct dev_pm_ops at91_twi_pm = {
294#define AT91_I2C_PM (&at91_i2c_pm) 517 .runtime_suspend = at91_twi_runtime_suspend,
518 .runtime_resume = at91_twi_runtime_resume,
519};
295 520
521#define at91_twi_pm_ops (&at91_twi_pm)
296#else 522#else
297#define AT91_I2C_PM NULL 523#define at91_twi_pm_ops NULL
298#endif 524#endif
299 525
300static struct platform_driver at91_i2c_driver = { 526static struct platform_driver at91_twi_driver = {
301 .probe = at91_i2c_probe, 527 .probe = at91_twi_probe,
302 .remove = __devexit_p(at91_i2c_remove), 528 .remove = __devexit_p(at91_twi_remove),
529 .id_table = at91_twi_devtypes,
303 .driver = { 530 .driver = {
304 .name = "at91_i2c", 531 .name = "at91_i2c",
305 .owner = THIS_MODULE, 532 .owner = THIS_MODULE,
306 .pm = AT91_I2C_PM, 533 .of_match_table = atmel_twi_dt_ids,
534 .pm = at91_twi_pm_ops,
307 }, 535 },
308}; 536};
309 537
310module_platform_driver(at91_i2c_driver); 538static int __init at91_twi_init(void)
539{
540 return platform_driver_register(&at91_twi_driver);
541}
542
543static void __exit at91_twi_exit(void)
544{
545 platform_driver_unregister(&at91_twi_driver);
546}
547
548subsys_initcall(at91_twi_init);
549module_exit(at91_twi_exit);
311 550
312MODULE_AUTHOR("Rick Bronson"); 551MODULE_AUTHOR("Nikolaus Voss <n.voss@weinmann.de>");
313MODULE_DESCRIPTION("I2C (TWI) driver for Atmel AT91"); 552MODULE_DESCRIPTION("I2C (TWI) driver for Atmel AT91");
314MODULE_LICENSE("GPL"); 553MODULE_LICENSE("GPL");
315MODULE_ALIAS("platform:at91_i2c"); 554MODULE_ALIAS("platform:at91_i2c");
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 79a2542d8c41..6a0a55319449 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -38,6 +38,8 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/cpufreq.h> 39#include <linux/cpufreq.h>
40#include <linux/gpio.h> 40#include <linux/gpio.h>
41#include <linux/of_i2c.h>
42#include <linux/of_device.h>
41 43
42#include <mach/hardware.h> 44#include <mach/hardware.h>
43#include <linux/platform_data/i2c-davinci.h> 45#include <linux/platform_data/i2c-davinci.h>
@@ -114,6 +116,7 @@ struct davinci_i2c_dev {
114 struct completion xfr_complete; 116 struct completion xfr_complete;
115 struct notifier_block freq_transition; 117 struct notifier_block freq_transition;
116#endif 118#endif
119 struct davinci_i2c_platform_data *pdata;
117}; 120};
118 121
119/* default platform data to use if not supplied in the platform_device */ 122/* default platform data to use if not supplied in the platform_device */
@@ -155,7 +158,7 @@ static void generic_i2c_clock_pulse(unsigned int scl_pin)
155static void i2c_recover_bus(struct davinci_i2c_dev *dev) 158static void i2c_recover_bus(struct davinci_i2c_dev *dev)
156{ 159{
157 u32 flag = 0; 160 u32 flag = 0;
158 struct davinci_i2c_platform_data *pdata = dev->dev->platform_data; 161 struct davinci_i2c_platform_data *pdata = dev->pdata;
159 162
160 dev_err(dev->dev, "initiating i2c bus recovery\n"); 163 dev_err(dev->dev, "initiating i2c bus recovery\n");
161 /* Send NACK to the slave */ 164 /* Send NACK to the slave */
@@ -163,8 +166,7 @@ static void i2c_recover_bus(struct davinci_i2c_dev *dev)
163 flag |= DAVINCI_I2C_MDR_NACK; 166 flag |= DAVINCI_I2C_MDR_NACK;
164 /* write the data into mode register */ 167 /* write the data into mode register */
165 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag); 168 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
166 if (pdata) 169 generic_i2c_clock_pulse(pdata->scl_pin);
167 generic_i2c_clock_pulse(pdata->scl_pin);
168 /* Send STOP */ 170 /* Send STOP */
169 flag = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG); 171 flag = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
170 flag |= DAVINCI_I2C_MDR_STP; 172 flag |= DAVINCI_I2C_MDR_STP;
@@ -187,7 +189,7 @@ static inline void davinci_i2c_reset_ctrl(struct davinci_i2c_dev *i2c_dev,
187 189
188static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev) 190static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
189{ 191{
190 struct davinci_i2c_platform_data *pdata = dev->dev->platform_data; 192 struct davinci_i2c_platform_data *pdata = dev->pdata;
191 u16 psc; 193 u16 psc;
192 u32 clk; 194 u32 clk;
193 u32 d; 195 u32 d;
@@ -235,10 +237,7 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
235 */ 237 */
236static int i2c_davinci_init(struct davinci_i2c_dev *dev) 238static int i2c_davinci_init(struct davinci_i2c_dev *dev)
237{ 239{
238 struct davinci_i2c_platform_data *pdata = dev->dev->platform_data; 240 struct davinci_i2c_platform_data *pdata = dev->pdata;
239
240 if (!pdata)
241 pdata = &davinci_i2c_platform_data_default;
242 241
243 /* put I2C into reset */ 242 /* put I2C into reset */
244 davinci_i2c_reset_ctrl(dev, 0); 243 davinci_i2c_reset_ctrl(dev, 0);
@@ -260,6 +259,7 @@ static int i2c_davinci_init(struct davinci_i2c_dev *dev)
260 dev_dbg(dev->dev, "bus_freq = %dkHz, bus_delay = %d\n", 259 dev_dbg(dev->dev, "bus_freq = %dkHz, bus_delay = %d\n",
261 pdata->bus_freq, pdata->bus_delay); 260 pdata->bus_freq, pdata->bus_delay);
262 261
262
263 /* Take the I2C module out of reset: */ 263 /* Take the I2C module out of reset: */
264 davinci_i2c_reset_ctrl(dev, 1); 264 davinci_i2c_reset_ctrl(dev, 1);
265 265
@@ -308,13 +308,11 @@ static int
308i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) 308i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
309{ 309{
310 struct davinci_i2c_dev *dev = i2c_get_adapdata(adap); 310 struct davinci_i2c_dev *dev = i2c_get_adapdata(adap);
311 struct davinci_i2c_platform_data *pdata = dev->dev->platform_data; 311 struct davinci_i2c_platform_data *pdata = dev->pdata;
312 u32 flag; 312 u32 flag;
313 u16 w; 313 u16 w;
314 int r; 314 int r;
315 315
316 if (!pdata)
317 pdata = &davinci_i2c_platform_data_default;
318 /* Introduce a delay, required for some boards (e.g Davinci EVM) */ 316 /* Introduce a delay, required for some boards (e.g Davinci EVM) */
319 if (pdata->bus_delay) 317 if (pdata->bus_delay)
320 udelay(pdata->bus_delay); 318 udelay(pdata->bus_delay);
@@ -635,6 +633,12 @@ static struct i2c_algorithm i2c_davinci_algo = {
635 .functionality = i2c_davinci_func, 633 .functionality = i2c_davinci_func,
636}; 634};
637 635
636static const struct of_device_id davinci_i2c_of_match[] = {
637 {.compatible = "ti,davinci-i2c", },
638 {},
639};
640MODULE_DEVICE_TABLE(of, davinci_i2c_of_match);
641
638static int davinci_i2c_probe(struct platform_device *pdev) 642static int davinci_i2c_probe(struct platform_device *pdev)
639{ 643{
640 struct davinci_i2c_dev *dev; 644 struct davinci_i2c_dev *dev;
@@ -674,14 +678,33 @@ static int davinci_i2c_probe(struct platform_device *pdev)
674#endif 678#endif
675 dev->dev = get_device(&pdev->dev); 679 dev->dev = get_device(&pdev->dev);
676 dev->irq = irq->start; 680 dev->irq = irq->start;
681 dev->pdata = dev->dev->platform_data;
677 platform_set_drvdata(pdev, dev); 682 platform_set_drvdata(pdev, dev);
678 683
684 if (!dev->pdata && pdev->dev.of_node) {
685 u32 prop;
686
687 dev->pdata = devm_kzalloc(&pdev->dev,
688 sizeof(struct davinci_i2c_platform_data), GFP_KERNEL);
689 if (!dev->pdata) {
690 r = -ENOMEM;
691 goto err_free_mem;
692 }
693 memcpy(dev->pdata, &davinci_i2c_platform_data_default,
694 sizeof(struct davinci_i2c_platform_data));
695 if (!of_property_read_u32(pdev->dev.of_node, "clock-frequency",
696 &prop))
697 dev->pdata->bus_freq = prop / 1000;
698 } else if (!dev->pdata) {
699 dev->pdata = &davinci_i2c_platform_data_default;
700 }
701
679 dev->clk = clk_get(&pdev->dev, NULL); 702 dev->clk = clk_get(&pdev->dev, NULL);
680 if (IS_ERR(dev->clk)) { 703 if (IS_ERR(dev->clk)) {
681 r = -ENODEV; 704 r = -ENODEV;
682 goto err_free_mem; 705 goto err_free_mem;
683 } 706 }
684 clk_enable(dev->clk); 707 clk_prepare_enable(dev->clk);
685 708
686 dev->base = ioremap(mem->start, resource_size(mem)); 709 dev->base = ioremap(mem->start, resource_size(mem));
687 if (!dev->base) { 710 if (!dev->base) {
@@ -711,6 +734,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
711 adap->algo = &i2c_davinci_algo; 734 adap->algo = &i2c_davinci_algo;
712 adap->dev.parent = &pdev->dev; 735 adap->dev.parent = &pdev->dev;
713 adap->timeout = DAVINCI_I2C_TIMEOUT; 736 adap->timeout = DAVINCI_I2C_TIMEOUT;
737 adap->dev.of_node = pdev->dev.of_node;
714 738
715 adap->nr = pdev->id; 739 adap->nr = pdev->id;
716 r = i2c_add_numbered_adapter(adap); 740 r = i2c_add_numbered_adapter(adap);
@@ -718,6 +742,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
718 dev_err(&pdev->dev, "failure adding adapter\n"); 742 dev_err(&pdev->dev, "failure adding adapter\n");
719 goto err_free_irq; 743 goto err_free_irq;
720 } 744 }
745 of_i2c_register_devices(adap);
721 746
722 return 0; 747 return 0;
723 748
@@ -726,7 +751,7 @@ err_free_irq:
726err_unuse_clocks: 751err_unuse_clocks:
727 iounmap(dev->base); 752 iounmap(dev->base);
728err_mem_ioremap: 753err_mem_ioremap:
729 clk_disable(dev->clk); 754 clk_disable_unprepare(dev->clk);
730 clk_put(dev->clk); 755 clk_put(dev->clk);
731 dev->clk = NULL; 756 dev->clk = NULL;
732err_free_mem: 757err_free_mem:
@@ -750,7 +775,7 @@ static int davinci_i2c_remove(struct platform_device *pdev)
750 i2c_del_adapter(&dev->adapter); 775 i2c_del_adapter(&dev->adapter);
751 put_device(&pdev->dev); 776 put_device(&pdev->dev);
752 777
753 clk_disable(dev->clk); 778 clk_disable_unprepare(dev->clk);
754 clk_put(dev->clk); 779 clk_put(dev->clk);
755 dev->clk = NULL; 780 dev->clk = NULL;
756 781
@@ -772,7 +797,7 @@ static int davinci_i2c_suspend(struct device *dev)
772 797
773 /* put I2C into reset */ 798 /* put I2C into reset */
774 davinci_i2c_reset_ctrl(i2c_dev, 0); 799 davinci_i2c_reset_ctrl(i2c_dev, 0);
775 clk_disable(i2c_dev->clk); 800 clk_disable_unprepare(i2c_dev->clk);
776 801
777 return 0; 802 return 0;
778} 803}
@@ -782,7 +807,7 @@ static int davinci_i2c_resume(struct device *dev)
782 struct platform_device *pdev = to_platform_device(dev); 807 struct platform_device *pdev = to_platform_device(dev);
783 struct davinci_i2c_dev *i2c_dev = platform_get_drvdata(pdev); 808 struct davinci_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
784 809
785 clk_enable(i2c_dev->clk); 810 clk_prepare_enable(i2c_dev->clk);
786 /* take I2C out of reset */ 811 /* take I2C out of reset */
787 davinci_i2c_reset_ctrl(i2c_dev, 1); 812 davinci_i2c_reset_ctrl(i2c_dev, 1);
788 813
@@ -809,6 +834,7 @@ static struct platform_driver davinci_i2c_driver = {
809 .name = "i2c_davinci", 834 .name = "i2c_davinci",
810 .owner = THIS_MODULE, 835 .owner = THIS_MODULE,
811 .pm = davinci_i2c_pm_ops, 836 .pm = davinci_i2c_pm_ops,
837 .of_match_table = of_match_ptr(davinci_i2c_of_match),
812 }, 838 },
813}; 839};
814 840
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 37793156bd93..6abc00d59881 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -82,7 +82,8 @@
82#include <linux/wait.h> 82#include <linux/wait.h>
83#include <linux/err.h> 83#include <linux/err.h>
84 84
85#if defined CONFIG_I2C_MUX || defined CONFIG_I2C_MUX_MODULE 85#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
86 defined CONFIG_DMI
86#include <linux/gpio.h> 87#include <linux/gpio.h>
87#include <linux/i2c-mux-gpio.h> 88#include <linux/i2c-mux-gpio.h>
88#include <linux/platform_device.h> 89#include <linux/platform_device.h>
@@ -192,7 +193,8 @@ struct i801_priv {
192 int len; 193 int len;
193 u8 *data; 194 u8 *data;
194 195
195#if defined CONFIG_I2C_MUX || defined CONFIG_I2C_MUX_MODULE 196#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
197 defined CONFIG_DMI
196 const struct i801_mux_config *mux_drvdata; 198 const struct i801_mux_config *mux_drvdata;
197 struct platform_device *mux_pdev; 199 struct platform_device *mux_pdev;
198#endif 200#endif
@@ -921,7 +923,8 @@ static void __init input_apanel_init(void) {}
921static void __devinit i801_probe_optional_slaves(struct i801_priv *priv) {} 923static void __devinit i801_probe_optional_slaves(struct i801_priv *priv) {}
922#endif /* CONFIG_X86 && CONFIG_DMI */ 924#endif /* CONFIG_X86 && CONFIG_DMI */
923 925
924#if defined CONFIG_I2C_MUX || defined CONFIG_I2C_MUX_MODULE 926#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
927 defined CONFIG_DMI
925static struct i801_mux_config i801_mux_config_asus_z8_d12 = { 928static struct i801_mux_config i801_mux_config_asus_z8_d12 = {
926 .gpio_chip = "gpio_ich", 929 .gpio_chip = "gpio_ich",
927 .values = { 0x02, 0x03 }, 930 .values = { 0x02, 0x03 },
@@ -1059,7 +1062,7 @@ static unsigned int __devinit i801_get_adapter_class(struct i801_priv *priv)
1059 1062
1060 id = dmi_first_match(mux_dmi_table); 1063 id = dmi_first_match(mux_dmi_table);
1061 if (id) { 1064 if (id) {
1062 /* Remove from branch classes from trunk */ 1065 /* Remove branch classes from trunk */
1063 mux_config = id->driver_data; 1066 mux_config = id->driver_data;
1064 for (i = 0; i < mux_config->n_values; i++) 1067 for (i = 0; i < mux_config->n_values; i++)
1065 class &= ~mux_config->classes[i]; 1068 class &= ~mux_config->classes[i];
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index b7907ba7448a..2ef162d148cb 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -272,9 +272,9 @@ static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
272 272
273 /* dev_dbg() can't be used, because adapter is not yet registered */ 273 /* dev_dbg() can't be used, because adapter is not yet registered */
274#ifdef CONFIG_I2C_DEBUG_BUS 274#ifdef CONFIG_I2C_DEBUG_BUS
275 printk(KERN_DEBUG "I2C: <%s> I2C_CLK=%d, REQ DIV=%d\n", 275 dev_dbg(&i2c_imx->adapter.dev, "<%s> I2C_CLK=%d, REQ DIV=%d\n",
276 __func__, i2c_clk_rate, div); 276 __func__, i2c_clk_rate, div);
277 printk(KERN_DEBUG "I2C: <%s> IFDR[IC]=0x%x, REAL DIV=%d\n", 277 dev_dbg(&i2c_imx->adapter.dev, "<%s> IFDR[IC]=0x%x, REAL DIV=%d\n",
278 __func__, i2c_clk_div[i][1], i2c_clk_div[i][0]); 278 __func__, i2c_clk_div[i][1], i2c_clk_div[i][0]);
279#endif 279#endif
280} 280}
@@ -564,7 +564,7 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
564 resource_size(res), res->start); 564 resource_size(res), res->start);
565 dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n", 565 dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n",
566 i2c_imx->adapter.name); 566 i2c_imx->adapter.name);
567 dev_dbg(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); 567 dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
568 568
569 return 0; /* Return OK */ 569 return 0; /* Return OK */
570} 570}
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 57f7703ce2e8..ca86430cb4a2 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -576,7 +576,23 @@ static int mpc_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
576 mpc_write(i2c, pmsg->addr, pmsg->buf, pmsg->len, i); 576 mpc_write(i2c, pmsg->addr, pmsg->buf, pmsg->len, i);
577 } 577 }
578 } 578 }
579 mpc_i2c_stop(i2c); 579 mpc_i2c_stop(i2c); /* Initiate STOP */
580 orig_jiffies = jiffies;
581 /* Wait until STOP is seen, allow up to 1 s */
582 while (readb(i2c->base + MPC_I2C_SR) & CSR_MBB) {
583 if (time_after(jiffies, orig_jiffies + HZ)) {
584 u8 status = readb(i2c->base + MPC_I2C_SR);
585
586 dev_dbg(i2c->dev, "timeout\n");
587 if ((status & (CSR_MCF | CSR_MBB | CSR_RXAK)) != 0) {
588 writeb(status & ~CSR_MAL,
589 i2c->base + MPC_I2C_SR);
590 mpc_i2c_fixup(i2c);
591 }
592 return -EIO;
593 }
594 cond_resched();
595 }
580 return (ret < 0) ? ret : num; 596 return (ret < 0) ? ret : num;
581} 597}
582 598
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 51f05b8520ed..286ca1917820 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -1,14 +1,12 @@
1/* 1/*
2 * Freescale MXS I2C bus driver 2 * Freescale MXS I2C bus driver
3 * 3 *
4 * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K. 4 * Copyright (C) 2011-2012 Wolfram Sang, Pengutronix e.K.
5 * 5 *
6 * based on a (non-working) driver which was: 6 * based on a (non-working) driver which was:
7 * 7 *
8 * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved. 8 * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
9 * 9 *
10 * TODO: add dma-support if platform-support for it is available
11 *
12 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or 12 * the Free Software Foundation; either version 2 of the License, or
@@ -31,6 +29,9 @@
31#include <linux/of.h> 29#include <linux/of.h>
32#include <linux/of_device.h> 30#include <linux/of_device.h>
33#include <linux/of_i2c.h> 31#include <linux/of_i2c.h>
32#include <linux/dma-mapping.h>
33#include <linux/dmaengine.h>
34#include <linux/fsl/mxs-dma.h>
34 35
35#define DRIVER_NAME "mxs-i2c" 36#define DRIVER_NAME "mxs-i2c"
36 37
@@ -70,23 +71,6 @@
70 MXS_I2C_CTRL1_SLAVE_STOP_IRQ | \ 71 MXS_I2C_CTRL1_SLAVE_STOP_IRQ | \
71 MXS_I2C_CTRL1_SLAVE_IRQ) 72 MXS_I2C_CTRL1_SLAVE_IRQ)
72 73
73#define MXS_I2C_QUEUECTRL (0x60)
74#define MXS_I2C_QUEUECTRL_SET (0x64)
75#define MXS_I2C_QUEUECTRL_CLR (0x68)
76
77#define MXS_I2C_QUEUECTRL_QUEUE_RUN 0x20
78#define MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE 0x04
79
80#define MXS_I2C_QUEUESTAT (0x70)
81#define MXS_I2C_QUEUESTAT_RD_QUEUE_EMPTY 0x00002000
82#define MXS_I2C_QUEUESTAT_WRITE_QUEUE_CNT_MASK 0x0000001F
83
84#define MXS_I2C_QUEUECMD (0x80)
85
86#define MXS_I2C_QUEUEDATA (0x90)
87
88#define MXS_I2C_DATA (0xa0)
89
90 74
91#define MXS_CMD_I2C_SELECT (MXS_I2C_CTRL0_RETAIN_CLOCK | \ 75#define MXS_CMD_I2C_SELECT (MXS_I2C_CTRL0_RETAIN_CLOCK | \
92 MXS_I2C_CTRL0_PRE_SEND_START | \ 76 MXS_I2C_CTRL0_PRE_SEND_START | \
@@ -146,6 +130,15 @@ struct mxs_i2c_dev {
146 u32 cmd_err; 130 u32 cmd_err;
147 struct i2c_adapter adapter; 131 struct i2c_adapter adapter;
148 const struct mxs_i2c_speed_config *speed; 132 const struct mxs_i2c_speed_config *speed;
133
134 /* DMA support components */
135 int dma_channel;
136 struct dma_chan *dmach;
137 struct mxs_dma_data dma_data;
138 uint32_t pio_data[2];
139 uint32_t addr_data;
140 struct scatterlist sg_io[2];
141 bool dma_read;
149}; 142};
150 143
151static void mxs_i2c_reset(struct mxs_i2c_dev *i2c) 144static void mxs_i2c_reset(struct mxs_i2c_dev *i2c)
@@ -157,95 +150,150 @@ static void mxs_i2c_reset(struct mxs_i2c_dev *i2c)
157 writel(i2c->speed->timing2, i2c->regs + MXS_I2C_TIMING2); 150 writel(i2c->speed->timing2, i2c->regs + MXS_I2C_TIMING2);
158 151
159 writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET); 152 writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
160 writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE,
161 i2c->regs + MXS_I2C_QUEUECTRL_SET);
162} 153}
163 154
164static void mxs_i2c_pioq_setup_read(struct mxs_i2c_dev *i2c, u8 addr, int len, 155static void mxs_i2c_dma_finish(struct mxs_i2c_dev *i2c)
165 int flags)
166{ 156{
167 u32 data; 157 if (i2c->dma_read) {
168 158 dma_unmap_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE);
169 writel(MXS_CMD_I2C_SELECT, i2c->regs + MXS_I2C_QUEUECMD); 159 dma_unmap_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE);
170 160 } else {
171 data = (addr << 1) | I2C_SMBUS_READ; 161 dma_unmap_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
172 writel(data, i2c->regs + MXS_I2C_DATA); 162 }
173
174 data = MXS_CMD_I2C_READ | MXS_I2C_CTRL0_XFER_COUNT(len) | flags;
175 writel(data, i2c->regs + MXS_I2C_QUEUECMD);
176} 163}
177 164
178static void mxs_i2c_pioq_setup_write(struct mxs_i2c_dev *i2c, 165static void mxs_i2c_dma_irq_callback(void *param)
179 u8 addr, u8 *buf, int len, int flags)
180{ 166{
181 u32 data; 167 struct mxs_i2c_dev *i2c = param;
182 int i, shifts_left;
183
184 data = MXS_CMD_I2C_WRITE | MXS_I2C_CTRL0_XFER_COUNT(len + 1) | flags;
185 writel(data, i2c->regs + MXS_I2C_QUEUECMD);
186
187 /*
188 * We have to copy the slave address (u8) and buffer (arbitrary number
189 * of u8) into the data register (u32). To achieve that, the u8 are put
190 * into the MSBs of 'data' which is then shifted for the next u8. When
191 * appropriate, 'data' is written to MXS_I2C_DATA. So, the first u32
192 * looks like this:
193 *
194 * 3 2 1 0
195 * 10987654|32109876|54321098|76543210
196 * --------+--------+--------+--------
197 * buffer+2|buffer+1|buffer+0|slave_addr
198 */
199
200 data = ((addr << 1) | I2C_SMBUS_WRITE) << 24;
201 168
202 for (i = 0; i < len; i++) { 169 complete(&i2c->cmd_complete);
203 data >>= 8; 170 mxs_i2c_dma_finish(i2c);
204 data |= buf[i] << 24;
205 if ((i & 3) == 2)
206 writel(data, i2c->regs + MXS_I2C_DATA);
207 }
208
209 /* Write out the remaining bytes if any */
210 shifts_left = 24 - (i & 3) * 8;
211 if (shifts_left)
212 writel(data >> shifts_left, i2c->regs + MXS_I2C_DATA);
213} 171}
214 172
215/* 173static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
216 * TODO: should be replaceable with a waitqueue and RD_QUEUE_IRQ (setting the 174 struct i2c_msg *msg, uint32_t flags)
217 * rd_threshold to 1). Couldn't get this to work, though.
218 */
219static int mxs_i2c_wait_for_data(struct mxs_i2c_dev *i2c)
220{ 175{
221 unsigned long timeout = jiffies + msecs_to_jiffies(1000); 176 struct dma_async_tx_descriptor *desc;
177 struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
222 178
223 while (readl(i2c->regs + MXS_I2C_QUEUESTAT) 179 if (msg->flags & I2C_M_RD) {
224 & MXS_I2C_QUEUESTAT_RD_QUEUE_EMPTY) { 180 i2c->dma_read = 1;
225 if (time_after(jiffies, timeout)) 181 i2c->addr_data = (msg->addr << 1) | I2C_SMBUS_READ;
226 return -ETIMEDOUT; 182
227 cond_resched(); 183 /*
228 } 184 * SELECT command.
185 */
186
187 /* Queue the PIO register write transfer. */
188 i2c->pio_data[0] = MXS_CMD_I2C_SELECT;
189 desc = dmaengine_prep_slave_sg(i2c->dmach,
190 (struct scatterlist *)&i2c->pio_data[0],
191 1, DMA_TRANS_NONE, 0);
192 if (!desc) {
193 dev_err(i2c->dev,
194 "Failed to get PIO reg. write descriptor.\n");
195 goto select_init_pio_fail;
196 }
229 197
230 return 0; 198 /* Queue the DMA data transfer. */
231} 199 sg_init_one(&i2c->sg_io[0], &i2c->addr_data, 1);
200 dma_map_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE);
201 desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[0], 1,
202 DMA_MEM_TO_DEV,
203 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
204 if (!desc) {
205 dev_err(i2c->dev,
206 "Failed to get DMA data write descriptor.\n");
207 goto select_init_dma_fail;
208 }
232 209
233static int mxs_i2c_finish_read(struct mxs_i2c_dev *i2c, u8 *buf, int len) 210 /*
234{ 211 * READ command.
235 u32 uninitialized_var(data); 212 */
236 int i; 213
214 /* Queue the PIO register write transfer. */
215 i2c->pio_data[1] = flags | MXS_CMD_I2C_READ |
216 MXS_I2C_CTRL0_XFER_COUNT(msg->len);
217 desc = dmaengine_prep_slave_sg(i2c->dmach,
218 (struct scatterlist *)&i2c->pio_data[1],
219 1, DMA_TRANS_NONE, DMA_PREP_INTERRUPT);
220 if (!desc) {
221 dev_err(i2c->dev,
222 "Failed to get PIO reg. write descriptor.\n");
223 goto select_init_dma_fail;
224 }
225
226 /* Queue the DMA data transfer. */
227 sg_init_one(&i2c->sg_io[1], msg->buf, msg->len);
228 dma_map_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE);
229 desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[1], 1,
230 DMA_DEV_TO_MEM,
231 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
232 if (!desc) {
233 dev_err(i2c->dev,
234 "Failed to get DMA data write descriptor.\n");
235 goto read_init_dma_fail;
236 }
237 } else {
238 i2c->dma_read = 0;
239 i2c->addr_data = (msg->addr << 1) | I2C_SMBUS_WRITE;
240
241 /*
242 * WRITE command.
243 */
244
245 /* Queue the PIO register write transfer. */
246 i2c->pio_data[0] = flags | MXS_CMD_I2C_WRITE |
247 MXS_I2C_CTRL0_XFER_COUNT(msg->len + 1);
248 desc = dmaengine_prep_slave_sg(i2c->dmach,
249 (struct scatterlist *)&i2c->pio_data[0],
250 1, DMA_TRANS_NONE, 0);
251 if (!desc) {
252 dev_err(i2c->dev,
253 "Failed to get PIO reg. write descriptor.\n");
254 goto write_init_pio_fail;
255 }
237 256
238 for (i = 0; i < len; i++) { 257 /* Queue the DMA data transfer. */
239 if ((i & 3) == 0) { 258 sg_init_table(i2c->sg_io, 2);
240 if (mxs_i2c_wait_for_data(i2c)) 259 sg_set_buf(&i2c->sg_io[0], &i2c->addr_data, 1);
241 return -ETIMEDOUT; 260 sg_set_buf(&i2c->sg_io[1], msg->buf, msg->len);
242 data = readl(i2c->regs + MXS_I2C_QUEUEDATA); 261 dma_map_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
262 desc = dmaengine_prep_slave_sg(i2c->dmach, i2c->sg_io, 2,
263 DMA_MEM_TO_DEV,
264 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
265 if (!desc) {
266 dev_err(i2c->dev,
267 "Failed to get DMA data write descriptor.\n");
268 goto write_init_dma_fail;
243 } 269 }
244 buf[i] = data & 0xff;
245 data >>= 8;
246 } 270 }
247 271
272 /*
273 * The last descriptor must have this callback,
274 * to finish the DMA transaction.
275 */
276 desc->callback = mxs_i2c_dma_irq_callback;
277 desc->callback_param = i2c;
278
279 /* Start the transfer. */
280 dmaengine_submit(desc);
281 dma_async_issue_pending(i2c->dmach);
248 return 0; 282 return 0;
283
284/* Read failpath. */
285read_init_dma_fail:
286 dma_unmap_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE);
287select_init_dma_fail:
288 dma_unmap_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE);
289select_init_pio_fail:
290 return -EINVAL;
291
292/* Write failpath. */
293write_init_dma_fail:
294 dma_unmap_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
295write_init_pio_fail:
296 return -EINVAL;
249} 297}
250 298
251/* 299/*
@@ -258,6 +306,8 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
258 int ret; 306 int ret;
259 int flags; 307 int flags;
260 308
309 flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0;
310
261 dev_dbg(i2c->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n", 311 dev_dbg(i2c->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
262 msg->addr, msg->len, msg->flags, stop); 312 msg->addr, msg->len, msg->flags, stop);
263 313
@@ -267,33 +317,17 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
267 init_completion(&i2c->cmd_complete); 317 init_completion(&i2c->cmd_complete);
268 i2c->cmd_err = 0; 318 i2c->cmd_err = 0;
269 319
270 flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0; 320 ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
271 321 if (ret)
272 if (msg->flags & I2C_M_RD) 322 return ret;
273 mxs_i2c_pioq_setup_read(i2c, msg->addr, msg->len, flags);
274 else
275 mxs_i2c_pioq_setup_write(i2c, msg->addr, msg->buf, msg->len,
276 flags);
277
278 writel(MXS_I2C_QUEUECTRL_QUEUE_RUN,
279 i2c->regs + MXS_I2C_QUEUECTRL_SET);
280 323
281 ret = wait_for_completion_timeout(&i2c->cmd_complete, 324 ret = wait_for_completion_timeout(&i2c->cmd_complete,
282 msecs_to_jiffies(1000)); 325 msecs_to_jiffies(1000));
283 if (ret == 0) 326 if (ret == 0)
284 goto timeout; 327 goto timeout;
285 328
286 if ((!i2c->cmd_err) && (msg->flags & I2C_M_RD)) {
287 ret = mxs_i2c_finish_read(i2c, msg->buf, msg->len);
288 if (ret)
289 goto timeout;
290 }
291
292 if (i2c->cmd_err == -ENXIO) 329 if (i2c->cmd_err == -ENXIO)
293 mxs_i2c_reset(i2c); 330 mxs_i2c_reset(i2c);
294 else
295 writel(MXS_I2C_QUEUECTRL_QUEUE_RUN,
296 i2c->regs + MXS_I2C_QUEUECTRL_CLR);
297 331
298 dev_dbg(i2c->dev, "Done with err=%d\n", i2c->cmd_err); 332 dev_dbg(i2c->dev, "Done with err=%d\n", i2c->cmd_err);
299 333
@@ -301,6 +335,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
301 335
302timeout: 336timeout:
303 dev_dbg(i2c->dev, "Timeout!\n"); 337 dev_dbg(i2c->dev, "Timeout!\n");
338 mxs_i2c_dma_finish(i2c);
304 mxs_i2c_reset(i2c); 339 mxs_i2c_reset(i2c);
305 return -ETIMEDOUT; 340 return -ETIMEDOUT;
306} 341}
@@ -329,7 +364,6 @@ static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id)
329{ 364{
330 struct mxs_i2c_dev *i2c = dev_id; 365 struct mxs_i2c_dev *i2c = dev_id;
331 u32 stat = readl(i2c->regs + MXS_I2C_CTRL1) & MXS_I2C_IRQ_MASK; 366 u32 stat = readl(i2c->regs + MXS_I2C_CTRL1) & MXS_I2C_IRQ_MASK;
332 bool is_last_cmd;
333 367
334 if (!stat) 368 if (!stat)
335 return IRQ_NONE; 369 return IRQ_NONE;
@@ -342,12 +376,6 @@ static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id)
342 /* MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ is only for slaves */ 376 /* MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ is only for slaves */
343 i2c->cmd_err = -EIO; 377 i2c->cmd_err = -EIO;
344 378
345 is_last_cmd = (readl(i2c->regs + MXS_I2C_QUEUESTAT) &
346 MXS_I2C_QUEUESTAT_WRITE_QUEUE_CNT_MASK) == 0;
347
348 if (is_last_cmd || i2c->cmd_err)
349 complete(&i2c->cmd_complete);
350
351 writel(stat, i2c->regs + MXS_I2C_CTRL1_CLR); 379 writel(stat, i2c->regs + MXS_I2C_CTRL1_CLR);
352 380
353 return IRQ_HANDLED; 381 return IRQ_HANDLED;
@@ -358,6 +386,21 @@ static const struct i2c_algorithm mxs_i2c_algo = {
358 .functionality = mxs_i2c_func, 386 .functionality = mxs_i2c_func,
359}; 387};
360 388
389static bool mxs_i2c_dma_filter(struct dma_chan *chan, void *param)
390{
391 struct mxs_i2c_dev *i2c = param;
392
393 if (!mxs_dma_is_apbx(chan))
394 return false;
395
396 if (chan->chan_id != i2c->dma_channel)
397 return false;
398
399 chan->private = &i2c->dma_data;
400
401 return true;
402}
403
361static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c) 404static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c)
362{ 405{
363 uint32_t speed; 406 uint32_t speed;
@@ -365,6 +408,17 @@ static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c)
365 struct device_node *node = dev->of_node; 408 struct device_node *node = dev->of_node;
366 int ret; 409 int ret;
367 410
411 /*
412 * TODO: This is a temporary solution and should be changed
413 * to use generic DMA binding later when the helpers get in.
414 */
415 ret = of_property_read_u32(node, "fsl,i2c-dma-channel",
416 &i2c->dma_channel);
417 if (ret) {
418 dev_err(dev, "Failed to get DMA channel!\n");
419 return -ENODEV;
420 }
421
368 ret = of_property_read_u32(node, "clock-frequency", &speed); 422 ret = of_property_read_u32(node, "clock-frequency", &speed);
369 if (ret) 423 if (ret)
370 dev_warn(dev, "No I2C speed selected, using 100kHz\n"); 424 dev_warn(dev, "No I2C speed selected, using 100kHz\n");
@@ -384,7 +438,8 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
384 struct pinctrl *pinctrl; 438 struct pinctrl *pinctrl;
385 struct resource *res; 439 struct resource *res;
386 resource_size_t res_size; 440 resource_size_t res_size;
387 int err, irq; 441 int err, irq, dmairq;
442 dma_cap_mask_t mask;
388 443
389 pinctrl = devm_pinctrl_get_select_default(dev); 444 pinctrl = devm_pinctrl_get_select_default(dev);
390 if (IS_ERR(pinctrl)) 445 if (IS_ERR(pinctrl))
@@ -395,7 +450,10 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
395 return -ENOMEM; 450 return -ENOMEM;
396 451
397 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 452 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
398 if (!res) 453 irq = platform_get_irq(pdev, 0);
454 dmairq = platform_get_irq(pdev, 1);
455
456 if (!res || irq < 0 || dmairq < 0)
399 return -ENOENT; 457 return -ENOENT;
400 458
401 res_size = resource_size(res); 459 res_size = resource_size(res);
@@ -406,10 +464,6 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
406 if (!i2c->regs) 464 if (!i2c->regs)
407 return -EBUSY; 465 return -EBUSY;
408 466
409 irq = platform_get_irq(pdev, 0);
410 if (irq < 0)
411 return irq;
412
413 err = devm_request_irq(dev, irq, mxs_i2c_isr, 0, dev_name(dev), i2c); 467 err = devm_request_irq(dev, irq, mxs_i2c_isr, 0, dev_name(dev), i2c);
414 if (err) 468 if (err)
415 return err; 469 return err;
@@ -423,6 +477,16 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
423 return err; 477 return err;
424 } 478 }
425 479
480 /* Setup the DMA */
481 dma_cap_zero(mask);
482 dma_cap_set(DMA_SLAVE, mask);
483 i2c->dma_data.chan_irq = dmairq;
484 i2c->dmach = dma_request_channel(mask, mxs_i2c_dma_filter, i2c);
485 if (!i2c->dmach) {
486 dev_err(dev, "Failed to request dma\n");
487 return -ENODEV;
488 }
489
426 platform_set_drvdata(pdev, i2c); 490 platform_set_drvdata(pdev, i2c);
427 491
428 /* Do reset to enforce correct startup after pinmuxing */ 492 /* Do reset to enforce correct startup after pinmuxing */
@@ -458,6 +522,9 @@ static int __devexit mxs_i2c_remove(struct platform_device *pdev)
458 if (ret) 522 if (ret)
459 return -EBUSY; 523 return -EBUSY;
460 524
525 if (i2c->dmach)
526 dma_release_channel(i2c->dmach);
527
461 writel(MXS_I2C_CTRL0_SFTRST, i2c->regs + MXS_I2C_CTRL0_SET); 528 writel(MXS_I2C_CTRL0_SFTRST, i2c->regs + MXS_I2C_CTRL0_SET);
462 529
463 platform_set_drvdata(pdev, NULL); 530 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 61b00edacb08..02c3115a2dfa 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -22,9 +22,10 @@
22#include <linux/err.h> 22#include <linux/err.h>
23#include <linux/clk.h> 23#include <linux/clk.h>
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/regulator/consumer.h>
26#include <linux/pm_runtime.h> 25#include <linux/pm_runtime.h>
27#include <linux/platform_data/i2c-nomadik.h> 26#include <linux/platform_data/i2c-nomadik.h>
27#include <linux/of.h>
28#include <linux/of_i2c.h>
28 29
29#define DRIVER_NAME "nmk-i2c" 30#define DRIVER_NAME "nmk-i2c"
30 31
@@ -146,7 +147,6 @@ struct i2c_nmk_client {
146 * @stop: stop condition. 147 * @stop: stop condition.
147 * @xfer_complete: acknowledge completion for a I2C message. 148 * @xfer_complete: acknowledge completion for a I2C message.
148 * @result: controller propogated result. 149 * @result: controller propogated result.
149 * @regulator: pointer to i2c regulator.
150 * @busy: Busy doing transfer. 150 * @busy: Busy doing transfer.
151 */ 151 */
152struct nmk_i2c_dev { 152struct nmk_i2c_dev {
@@ -160,7 +160,6 @@ struct nmk_i2c_dev {
160 int stop; 160 int stop;
161 struct completion xfer_complete; 161 struct completion xfer_complete;
162 int result; 162 int result;
163 struct regulator *regulator;
164 bool busy; 163 bool busy;
165}; 164};
166 165
@@ -643,11 +642,13 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
643 642
644 dev->busy = true; 643 dev->busy = true;
645 644
646 if (dev->regulator)
647 regulator_enable(dev->regulator);
648 pm_runtime_get_sync(&dev->adev->dev); 645 pm_runtime_get_sync(&dev->adev->dev);
649 646
650 clk_enable(dev->clk); 647 status = clk_prepare_enable(dev->clk);
648 if (status) {
649 dev_err(&dev->adev->dev, "can't prepare_enable clock\n");
650 goto out_clk;
651 }
651 652
652 status = init_hw(dev); 653 status = init_hw(dev);
653 if (status) 654 if (status)
@@ -674,10 +675,9 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
674 } 675 }
675 676
676out: 677out:
677 clk_disable(dev->clk); 678 clk_disable_unprepare(dev->clk);
679out_clk:
678 pm_runtime_put_sync(&dev->adev->dev); 680 pm_runtime_put_sync(&dev->adev->dev);
679 if (dev->regulator)
680 regulator_disable(dev->regulator);
681 681
682 dev->busy = false; 682 dev->busy = false;
683 683
@@ -920,18 +920,42 @@ static struct nmk_i2c_controller u8500_i2c = {
920 .sm = I2C_FREQ_MODE_FAST, 920 .sm = I2C_FREQ_MODE_FAST,
921}; 921};
922 922
923static void nmk_i2c_of_probe(struct device_node *np,
924 struct nmk_i2c_controller *pdata)
925{
926 of_property_read_u32(np, "clock-frequency", &pdata->clk_freq);
927
928 /* This driver only supports 'standard' and 'fast' modes of operation. */
929 if (pdata->clk_freq <= 100000)
930 pdata->sm = I2C_FREQ_MODE_STANDARD;
931 else
932 pdata->sm = I2C_FREQ_MODE_FAST;
933}
934
923static atomic_t adapter_id = ATOMIC_INIT(0); 935static atomic_t adapter_id = ATOMIC_INIT(0);
924 936
925static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) 937static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
926{ 938{
927 int ret = 0; 939 int ret = 0;
928 struct nmk_i2c_controller *pdata = adev->dev.platform_data; 940 struct nmk_i2c_controller *pdata = adev->dev.platform_data;
941 struct device_node *np = adev->dev.of_node;
929 struct nmk_i2c_dev *dev; 942 struct nmk_i2c_dev *dev;
930 struct i2c_adapter *adap; 943 struct i2c_adapter *adap;
931 944
932 if (!pdata) 945 if (!pdata) {
933 /* No i2c configuration found, using the default. */ 946 if (np) {
934 pdata = &u8500_i2c; 947 pdata = devm_kzalloc(&adev->dev, sizeof(*pdata), GFP_KERNEL);
948 if (!pdata) {
949 ret = -ENOMEM;
950 goto err_no_mem;
951 }
952 /* Provide the default configuration as a base. */
953 memcpy(pdata, &u8500_i2c, sizeof(struct nmk_i2c_controller));
954 nmk_i2c_of_probe(np, pdata);
955 } else
956 /* No i2c configuration found, using the default. */
957 pdata = &u8500_i2c;
958 }
935 959
936 dev = kzalloc(sizeof(struct nmk_i2c_dev), GFP_KERNEL); 960 dev = kzalloc(sizeof(struct nmk_i2c_dev), GFP_KERNEL);
937 if (!dev) { 961 if (!dev) {
@@ -957,12 +981,6 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
957 goto err_irq; 981 goto err_irq;
958 } 982 }
959 983
960 dev->regulator = regulator_get(&adev->dev, "v-i2c");
961 if (IS_ERR(dev->regulator)) {
962 dev_warn(&adev->dev, "could not get i2c regulator\n");
963 dev->regulator = NULL;
964 }
965
966 pm_suspend_ignore_children(&adev->dev, true); 984 pm_suspend_ignore_children(&adev->dev, true);
967 985
968 dev->clk = clk_get(&adev->dev, NULL); 986 dev->clk = clk_get(&adev->dev, NULL);
@@ -973,6 +991,7 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
973 } 991 }
974 992
975 adap = &dev->adap; 993 adap = &dev->adap;
994 adap->dev.of_node = np;
976 adap->dev.parent = &adev->dev; 995 adap->dev.parent = &adev->dev;
977 adap->owner = THIS_MODULE; 996 adap->owner = THIS_MODULE;
978 adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 997 adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
@@ -1002,6 +1021,8 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
1002 goto err_add_adap; 1021 goto err_add_adap;
1003 } 1022 }
1004 1023
1024 of_i2c_register_devices(adap);
1025
1005 pm_runtime_put(&adev->dev); 1026 pm_runtime_put(&adev->dev);
1006 1027
1007 return 0; 1028 return 0;
@@ -1009,8 +1030,6 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
1009 err_add_adap: 1030 err_add_adap:
1010 clk_put(dev->clk); 1031 clk_put(dev->clk);
1011 err_no_clk: 1032 err_no_clk:
1012 if (dev->regulator)
1013 regulator_put(dev->regulator);
1014 free_irq(dev->irq, dev); 1033 free_irq(dev->irq, dev);
1015 err_irq: 1034 err_irq:
1016 iounmap(dev->virtbase); 1035 iounmap(dev->virtbase);
@@ -1038,8 +1057,6 @@ static int nmk_i2c_remove(struct amba_device *adev)
1038 if (res) 1057 if (res)
1039 release_mem_region(res->start, resource_size(res)); 1058 release_mem_region(res->start, resource_size(res));
1040 clk_put(dev->clk); 1059 clk_put(dev->clk);
1041 if (dev->regulator)
1042 regulator_put(dev->regulator);
1043 pm_runtime_disable(&adev->dev); 1060 pm_runtime_disable(&adev->dev);
1044 amba_set_drvdata(adev, NULL); 1061 amba_set_drvdata(adev, NULL);
1045 kfree(dev); 1062 kfree(dev);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index a0e49f6aaf96..db31eaed6ea5 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -43,6 +43,7 @@
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <linux/i2c-omap.h> 44#include <linux/i2c-omap.h>
45#include <linux/pm_runtime.h> 45#include <linux/pm_runtime.h>
46#include <linux/pm_qos.h>
46 47
47/* I2C controller revisions */ 48/* I2C controller revisions */
48#define OMAP_I2C_OMAP1_REV_2 0x20 49#define OMAP_I2C_OMAP1_REV_2 0x20
@@ -55,6 +56,9 @@
55/* timeout waiting for the controller to respond */ 56/* timeout waiting for the controller to respond */
56#define OMAP_I2C_TIMEOUT (msecs_to_jiffies(1000)) 57#define OMAP_I2C_TIMEOUT (msecs_to_jiffies(1000))
57 58
59/* timeout for pm runtime autosuspend */
60#define OMAP_I2C_PM_TIMEOUT 1000 /* ms */
61
58/* For OMAP3 I2C_IV has changed to I2C_WE (wakeup enable) */ 62/* For OMAP3 I2C_IV has changed to I2C_WE (wakeup enable) */
59enum { 63enum {
60 OMAP_I2C_REV_REG = 0, 64 OMAP_I2C_REV_REG = 0,
@@ -176,15 +180,15 @@ enum {
176#define I2C_OMAP_ERRATA_I462 (1 << 1) 180#define I2C_OMAP_ERRATA_I462 (1 << 1)
177 181
178struct omap_i2c_dev { 182struct omap_i2c_dev {
183 spinlock_t lock; /* IRQ synchronization */
179 struct device *dev; 184 struct device *dev;
180 void __iomem *base; /* virtual */ 185 void __iomem *base; /* virtual */
181 int irq; 186 int irq;
182 int reg_shift; /* bit shift for I2C register addresses */ 187 int reg_shift; /* bit shift for I2C register addresses */
183 struct completion cmd_complete; 188 struct completion cmd_complete;
184 struct resource *ioarea; 189 struct resource *ioarea;
185 u32 latency; /* maximum mpu wkup latency */ 190 u32 latency; /* maximum MPU wkup latency */
186 void (*set_mpu_wkup_lat)(struct device *dev, 191 struct pm_qos_request pm_qos_request;
187 long latency);
188 u32 speed; /* Speed of bus in kHz */ 192 u32 speed; /* Speed of bus in kHz */
189 u32 dtrev; /* extra revision from DT */ 193 u32 dtrev; /* extra revision from DT */
190 u32 flags; 194 u32 flags;
@@ -193,12 +197,14 @@ struct omap_i2c_dev {
193 u8 *regs; 197 u8 *regs;
194 size_t buf_len; 198 size_t buf_len;
195 struct i2c_adapter adapter; 199 struct i2c_adapter adapter;
200 u8 threshold;
196 u8 fifo_size; /* use as flag and value 201 u8 fifo_size; /* use as flag and value
197 * fifo_size==0 implies no fifo 202 * fifo_size==0 implies no fifo
198 * if set, should be trsh+1 203 * if set, should be trsh+1
199 */ 204 */
200 u8 rev; 205 u8 rev;
201 unsigned b_hw:1; /* bad h/w fixes */ 206 unsigned b_hw:1; /* bad h/w fixes */
207 unsigned receiver:1; /* true when we're in receiver mode */
202 u16 iestate; /* Saved interrupt register */ 208 u16 iestate; /* Saved interrupt register */
203 u16 pscstate; 209 u16 pscstate;
204 u16 scllstate; 210 u16 scllstate;
@@ -417,13 +423,6 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
417 omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, scll); 423 omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, scll);
418 omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, sclh); 424 omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, sclh);
419 425
420 if (dev->fifo_size) {
421 /* Note: setup required fifo size - 1. RTRSH and XTRSH */
422 buf = (dev->fifo_size - 1) << 8 | OMAP_I2C_BUF_RXFIF_CLR |
423 (dev->fifo_size - 1) | OMAP_I2C_BUF_TXFIF_CLR;
424 omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, buf);
425 }
426
427 /* Take the I2C module out of reset: */ 426 /* Take the I2C module out of reset: */
428 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN); 427 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
429 428
@@ -461,6 +460,43 @@ static int omap_i2c_wait_for_bb(struct omap_i2c_dev *dev)
461 return 0; 460 return 0;
462} 461}
463 462
463static void omap_i2c_resize_fifo(struct omap_i2c_dev *dev, u8 size, bool is_rx)
464{
465 u16 buf;
466
467 if (dev->flags & OMAP_I2C_FLAG_NO_FIFO)
468 return;
469
470 /*
471 * Set up notification threshold based on message size. We're doing
472 * this to try and avoid draining feature as much as possible. Whenever
473 * we have big messages to transfer (bigger than our total fifo size)
474 * then we might use draining feature to transfer the remaining bytes.
475 */
476
477 dev->threshold = clamp(size, (u8) 1, dev->fifo_size);
478
479 buf = omap_i2c_read_reg(dev, OMAP_I2C_BUF_REG);
480
481 if (is_rx) {
482 /* Clear RX Threshold */
483 buf &= ~(0x3f << 8);
484 buf |= ((dev->threshold - 1) << 8) | OMAP_I2C_BUF_RXFIF_CLR;
485 } else {
486 /* Clear TX Threshold */
487 buf &= ~0x3f;
488 buf |= (dev->threshold - 1) | OMAP_I2C_BUF_TXFIF_CLR;
489 }
490
491 omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, buf);
492
493 if (dev->rev < OMAP_I2C_REV_ON_3630_4430)
494 dev->b_hw = 1; /* Enable hardware fixes */
495
496 /* calculate wakeup latency constraint for MPU */
497 dev->latency = (1000000 * dev->threshold) / (1000 * dev->speed / 8);
498}
499
464/* 500/*
465 * Low level master read/write transaction. 501 * Low level master read/write transaction.
466 */ 502 */
@@ -477,6 +513,9 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
477 if (msg->len == 0) 513 if (msg->len == 0)
478 return -EINVAL; 514 return -EINVAL;
479 515
516 dev->receiver = !!(msg->flags & I2C_M_RD);
517 omap_i2c_resize_fifo(dev, msg->len, dev->receiver);
518
480 omap_i2c_write_reg(dev, OMAP_I2C_SA_REG, msg->addr); 519 omap_i2c_write_reg(dev, OMAP_I2C_SA_REG, msg->addr);
481 520
482 /* REVISIT: Could the STB bit of I2C_CON be used with probing? */ 521 /* REVISIT: Could the STB bit of I2C_CON be used with probing? */
@@ -590,8 +629,16 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
590 if (r < 0) 629 if (r < 0)
591 goto out; 630 goto out;
592 631
593 if (dev->set_mpu_wkup_lat != NULL) 632 /*
594 dev->set_mpu_wkup_lat(dev->dev, dev->latency); 633 * When waiting for completion of a i2c transfer, we need to
634 * set a wake up latency constraint for the MPU. This is to
635 * ensure quick enough wakeup from idle, when transfer
636 * completes.
637 */
638 if (dev->latency)
639 pm_qos_add_request(&dev->pm_qos_request,
640 PM_QOS_CPU_DMA_LATENCY,
641 dev->latency);
595 642
596 for (i = 0; i < num; i++) { 643 for (i = 0; i < num; i++) {
597 r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1))); 644 r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1)));
@@ -599,15 +646,16 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
599 break; 646 break;
600 } 647 }
601 648
602 if (dev->set_mpu_wkup_lat != NULL) 649 if (dev->latency)
603 dev->set_mpu_wkup_lat(dev->dev, -1); 650 pm_qos_remove_request(&dev->pm_qos_request);
604 651
605 if (r == 0) 652 if (r == 0)
606 r = num; 653 r = num;
607 654
608 omap_i2c_wait_for_bb(dev); 655 omap_i2c_wait_for_bb(dev);
609out: 656out:
610 pm_runtime_put(dev->dev); 657 pm_runtime_mark_last_busy(dev->dev);
658 pm_runtime_put_autosuspend(dev->dev);
611 return r; 659 return r;
612} 660}
613 661
@@ -725,186 +773,252 @@ omap_i2c_omap1_isr(int this_irq, void *dev_id)
725 * data to DATA_REG. Otherwise some data bytes can be lost while transferring 773 * data to DATA_REG. Otherwise some data bytes can be lost while transferring
726 * them from the memory to the I2C interface. 774 * them from the memory to the I2C interface.
727 */ 775 */
728static int errata_omap3_i462(struct omap_i2c_dev *dev, u16 *stat, int *err) 776static int errata_omap3_i462(struct omap_i2c_dev *dev)
729{ 777{
730 unsigned long timeout = 10000; 778 unsigned long timeout = 10000;
779 u16 stat;
731 780
732 while (--timeout && !(*stat & OMAP_I2C_STAT_XUDF)) { 781 do {
733 if (*stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) { 782 stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
734 omap_i2c_ack_stat(dev, *stat & (OMAP_I2C_STAT_XRDY | 783 if (stat & OMAP_I2C_STAT_XUDF)
784 break;
785
786 if (stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) {
787 omap_i2c_ack_stat(dev, (OMAP_I2C_STAT_XRDY |
735 OMAP_I2C_STAT_XDR)); 788 OMAP_I2C_STAT_XDR));
736 return -ETIMEDOUT; 789 if (stat & OMAP_I2C_STAT_NACK) {
790 dev->cmd_err |= OMAP_I2C_STAT_NACK;
791 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
792 }
793
794 if (stat & OMAP_I2C_STAT_AL) {
795 dev_err(dev->dev, "Arbitration lost\n");
796 dev->cmd_err |= OMAP_I2C_STAT_AL;
797 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
798 }
799
800 return -EIO;
737 } 801 }
738 802
739 cpu_relax(); 803 cpu_relax();
740 *stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG); 804 } while (--timeout);
741 }
742 805
743 if (!timeout) { 806 if (!timeout) {
744 dev_err(dev->dev, "timeout waiting on XUDF bit\n"); 807 dev_err(dev->dev, "timeout waiting on XUDF bit\n");
745 return 0; 808 return 0;
746 } 809 }
747 810
748 *err |= OMAP_I2C_STAT_XUDF;
749 return 0; 811 return 0;
750} 812}
751 813
814static void omap_i2c_receive_data(struct omap_i2c_dev *dev, u8 num_bytes,
815 bool is_rdr)
816{
817 u16 w;
818
819 while (num_bytes--) {
820 w = omap_i2c_read_reg(dev, OMAP_I2C_DATA_REG);
821 *dev->buf++ = w;
822 dev->buf_len--;
823
824 /*
825 * Data reg in 2430, omap3 and
826 * omap4 is 8 bit wide
827 */
828 if (dev->flags & OMAP_I2C_FLAG_16BIT_DATA_REG) {
829 *dev->buf++ = w >> 8;
830 dev->buf_len--;
831 }
832 }
833}
834
835static int omap_i2c_transmit_data(struct omap_i2c_dev *dev, u8 num_bytes,
836 bool is_xdr)
837{
838 u16 w;
839
840 while (num_bytes--) {
841 w = *dev->buf++;
842 dev->buf_len--;
843
844 /*
845 * Data reg in 2430, omap3 and
846 * omap4 is 8 bit wide
847 */
848 if (dev->flags & OMAP_I2C_FLAG_16BIT_DATA_REG) {
849 w |= *dev->buf++ << 8;
850 dev->buf_len--;
851 }
852
853 if (dev->errata & I2C_OMAP_ERRATA_I462) {
854 int ret;
855
856 ret = errata_omap3_i462(dev);
857 if (ret < 0)
858 return ret;
859 }
860
861 omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w);
862 }
863
864 return 0;
865}
866
867static irqreturn_t
868omap_i2c_isr(int irq, void *dev_id)
869{
870 struct omap_i2c_dev *dev = dev_id;
871 irqreturn_t ret = IRQ_HANDLED;
872 u16 mask;
873 u16 stat;
874
875 spin_lock(&dev->lock);
876 mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
877 stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
878
879 if (stat & mask)
880 ret = IRQ_WAKE_THREAD;
881
882 spin_unlock(&dev->lock);
883
884 return ret;
885}
886
752static irqreturn_t 887static irqreturn_t
753omap_i2c_isr(int this_irq, void *dev_id) 888omap_i2c_isr_thread(int this_irq, void *dev_id)
754{ 889{
755 struct omap_i2c_dev *dev = dev_id; 890 struct omap_i2c_dev *dev = dev_id;
891 unsigned long flags;
756 u16 bits; 892 u16 bits;
757 u16 stat, w; 893 u16 stat;
758 int err, count = 0; 894 int err = 0, count = 0;
895
896 spin_lock_irqsave(&dev->lock, flags);
897 do {
898 bits = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
899 stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
900 stat &= bits;
901
902 /* If we're in receiver mode, ignore XDR/XRDY */
903 if (dev->receiver)
904 stat &= ~(OMAP_I2C_STAT_XDR | OMAP_I2C_STAT_XRDY);
905 else
906 stat &= ~(OMAP_I2C_STAT_RDR | OMAP_I2C_STAT_RRDY);
759 907
760 if (pm_runtime_suspended(dev->dev)) 908 if (!stat) {
761 return IRQ_NONE; 909 /* my work here is done */
910 goto out;
911 }
762 912
763 bits = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
764 while ((stat = (omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG))) & bits) {
765 dev_dbg(dev->dev, "IRQ (ISR = 0x%04x)\n", stat); 913 dev_dbg(dev->dev, "IRQ (ISR = 0x%04x)\n", stat);
766 if (count++ == 100) { 914 if (count++ == 100) {
767 dev_warn(dev->dev, "Too much work in one IRQ\n"); 915 dev_warn(dev->dev, "Too much work in one IRQ\n");
768 break; 916 break;
769 } 917 }
770 918
771 err = 0; 919 if (stat & OMAP_I2C_STAT_NACK) {
772complete:
773 /*
774 * Ack the stat in one go, but [R/X]DR and [R/X]RDY should be
775 * acked after the data operation is complete.
776 * Ref: TRM SWPU114Q Figure 18-31
777 */
778 omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat &
779 ~(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
780 OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
781
782 if (stat & OMAP_I2C_STAT_NACK)
783 err |= OMAP_I2C_STAT_NACK; 920 err |= OMAP_I2C_STAT_NACK;
921 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
922 break;
923 }
784 924
785 if (stat & OMAP_I2C_STAT_AL) { 925 if (stat & OMAP_I2C_STAT_AL) {
786 dev_err(dev->dev, "Arbitration lost\n"); 926 dev_err(dev->dev, "Arbitration lost\n");
787 err |= OMAP_I2C_STAT_AL; 927 err |= OMAP_I2C_STAT_AL;
928 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
929 break;
788 } 930 }
931
789 /* 932 /*
790 * ProDB0017052: Clear ARDY bit twice 933 * ProDB0017052: Clear ARDY bit twice
791 */ 934 */
792 if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | 935 if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
793 OMAP_I2C_STAT_AL)) { 936 OMAP_I2C_STAT_AL)) {
794 omap_i2c_ack_stat(dev, stat & 937 omap_i2c_ack_stat(dev, (OMAP_I2C_STAT_RRDY |
795 (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR | 938 OMAP_I2C_STAT_RDR |
796 OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR | 939 OMAP_I2C_STAT_XRDY |
797 OMAP_I2C_STAT_ARDY)); 940 OMAP_I2C_STAT_XDR |
798 omap_i2c_complete_cmd(dev, err); 941 OMAP_I2C_STAT_ARDY));
799 return IRQ_HANDLED; 942 break;
800 } 943 }
801 if (stat & (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR)) { 944
945 if (stat & OMAP_I2C_STAT_RDR) {
802 u8 num_bytes = 1; 946 u8 num_bytes = 1;
803 947
948 if (dev->fifo_size)
949 num_bytes = dev->buf_len;
950
951 omap_i2c_receive_data(dev, num_bytes, true);
952
804 if (dev->errata & I2C_OMAP_ERRATA_I207) 953 if (dev->errata & I2C_OMAP_ERRATA_I207)
805 i2c_omap_errata_i207(dev, stat); 954 i2c_omap_errata_i207(dev, stat);
806 955
807 if (dev->fifo_size) { 956 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
808 if (stat & OMAP_I2C_STAT_RRDY) 957 break;
809 num_bytes = dev->fifo_size; 958 }
810 else /* read RXSTAT on RDR interrupt */ 959
811 num_bytes = (omap_i2c_read_reg(dev, 960 if (stat & OMAP_I2C_STAT_RRDY) {
812 OMAP_I2C_BUFSTAT_REG) 961 u8 num_bytes = 1;
813 >> 8) & 0x3F; 962
814 } 963 if (dev->threshold)
815 while (num_bytes) { 964 num_bytes = dev->threshold;
816 num_bytes--; 965
817 w = omap_i2c_read_reg(dev, OMAP_I2C_DATA_REG); 966 omap_i2c_receive_data(dev, num_bytes, false);
818 if (dev->buf_len) { 967 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RRDY);
819 *dev->buf++ = w;
820 dev->buf_len--;
821 /*
822 * Data reg in 2430, omap3 and
823 * omap4 is 8 bit wide
824 */
825 if (dev->flags &
826 OMAP_I2C_FLAG_16BIT_DATA_REG) {
827 if (dev->buf_len) {
828 *dev->buf++ = w >> 8;
829 dev->buf_len--;
830 }
831 }
832 } else {
833 if (stat & OMAP_I2C_STAT_RRDY)
834 dev_err(dev->dev,
835 "RRDY IRQ while no data"
836 " requested\n");
837 if (stat & OMAP_I2C_STAT_RDR)
838 dev_err(dev->dev,
839 "RDR IRQ while no data"
840 " requested\n");
841 break;
842 }
843 }
844 omap_i2c_ack_stat(dev,
845 stat & (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR));
846 continue; 968 continue;
847 } 969 }
848 if (stat & (OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)) { 970
971 if (stat & OMAP_I2C_STAT_XDR) {
849 u8 num_bytes = 1; 972 u8 num_bytes = 1;
850 if (dev->fifo_size) { 973 int ret;
851 if (stat & OMAP_I2C_STAT_XRDY) 974
852 num_bytes = dev->fifo_size; 975 if (dev->fifo_size)
853 else /* read TXSTAT on XDR interrupt */ 976 num_bytes = dev->buf_len;
854 num_bytes = omap_i2c_read_reg(dev, 977
855 OMAP_I2C_BUFSTAT_REG) 978 ret = omap_i2c_transmit_data(dev, num_bytes, true);
856 & 0x3F; 979 if (ret < 0)
857 } 980 break;
858 while (num_bytes) { 981
859 num_bytes--; 982 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_XDR);
860 w = 0; 983 break;
861 if (dev->buf_len) { 984 }
862 w = *dev->buf++; 985
863 dev->buf_len--; 986 if (stat & OMAP_I2C_STAT_XRDY) {
864 /* 987 u8 num_bytes = 1;
865 * Data reg in 2430, omap3 and 988 int ret;
866 * omap4 is 8 bit wide 989
867 */ 990 if (dev->threshold)
868 if (dev->flags & 991 num_bytes = dev->threshold;
869 OMAP_I2C_FLAG_16BIT_DATA_REG) { 992
870 if (dev->buf_len) { 993 ret = omap_i2c_transmit_data(dev, num_bytes, false);
871 w |= *dev->buf++ << 8; 994 if (ret < 0)
872 dev->buf_len--; 995 break;
873 } 996
874 } 997 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_XRDY);
875 } else {
876 if (stat & OMAP_I2C_STAT_XRDY)
877 dev_err(dev->dev,
878 "XRDY IRQ while no "
879 "data to send\n");
880 if (stat & OMAP_I2C_STAT_XDR)
881 dev_err(dev->dev,
882 "XDR IRQ while no "
883 "data to send\n");
884 break;
885 }
886
887 if ((dev->errata & I2C_OMAP_ERRATA_I462) &&
888 errata_omap3_i462(dev, &stat, &err))
889 goto complete;
890
891 omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w);
892 }
893 omap_i2c_ack_stat(dev,
894 stat & (OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
895 continue; 998 continue;
896 } 999 }
1000
897 if (stat & OMAP_I2C_STAT_ROVR) { 1001 if (stat & OMAP_I2C_STAT_ROVR) {
898 dev_err(dev->dev, "Receive overrun\n"); 1002 dev_err(dev->dev, "Receive overrun\n");
899 dev->cmd_err |= OMAP_I2C_STAT_ROVR; 1003 err |= OMAP_I2C_STAT_ROVR;
1004 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_ROVR);
1005 break;
900 } 1006 }
1007
901 if (stat & OMAP_I2C_STAT_XUDF) { 1008 if (stat & OMAP_I2C_STAT_XUDF) {
902 dev_err(dev->dev, "Transmit underflow\n"); 1009 dev_err(dev->dev, "Transmit underflow\n");
903 dev->cmd_err |= OMAP_I2C_STAT_XUDF; 1010 err |= OMAP_I2C_STAT_XUDF;
1011 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_XUDF);
1012 break;
904 } 1013 }
905 } 1014 } while (stat);
1015
1016 omap_i2c_complete_cmd(dev, err);
1017
1018out:
1019 spin_unlock_irqrestore(&dev->lock, flags);
906 1020
907 return count ? IRQ_HANDLED : IRQ_NONE; 1021 return IRQ_HANDLED;
908} 1022}
909 1023
910static const struct i2c_algorithm omap_i2c_algo = { 1024static const struct i2c_algorithm omap_i2c_algo = {
@@ -943,12 +1057,12 @@ omap_i2c_probe(struct platform_device *pdev)
943{ 1057{
944 struct omap_i2c_dev *dev; 1058 struct omap_i2c_dev *dev;
945 struct i2c_adapter *adap; 1059 struct i2c_adapter *adap;
946 struct resource *mem, *irq, *ioarea; 1060 struct resource *mem;
947 const struct omap_i2c_bus_platform_data *pdata = 1061 const struct omap_i2c_bus_platform_data *pdata =
948 pdev->dev.platform_data; 1062 pdev->dev.platform_data;
949 struct device_node *node = pdev->dev.of_node; 1063 struct device_node *node = pdev->dev.of_node;
950 const struct of_device_id *match; 1064 const struct of_device_id *match;
951 irq_handler_t isr; 1065 int irq;
952 int r; 1066 int r;
953 1067
954 /* NOTE: driver uses the static register mapping */ 1068 /* NOTE: driver uses the static register mapping */
@@ -957,23 +1071,23 @@ omap_i2c_probe(struct platform_device *pdev)
957 dev_err(&pdev->dev, "no mem resource?\n"); 1071 dev_err(&pdev->dev, "no mem resource?\n");
958 return -ENODEV; 1072 return -ENODEV;
959 } 1073 }
960 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1074
961 if (!irq) { 1075 irq = platform_get_irq(pdev, 0);
1076 if (irq < 0) {
962 dev_err(&pdev->dev, "no irq resource?\n"); 1077 dev_err(&pdev->dev, "no irq resource?\n");
963 return -ENODEV; 1078 return irq;
964 } 1079 }
965 1080
966 ioarea = request_mem_region(mem->start, resource_size(mem), 1081 dev = devm_kzalloc(&pdev->dev, sizeof(struct omap_i2c_dev), GFP_KERNEL);
967 pdev->name); 1082 if (!dev) {
968 if (!ioarea) { 1083 dev_err(&pdev->dev, "Menory allocation failed\n");
969 dev_err(&pdev->dev, "I2C region already claimed\n"); 1084 return -ENOMEM;
970 return -EBUSY;
971 } 1085 }
972 1086
973 dev = kzalloc(sizeof(struct omap_i2c_dev), GFP_KERNEL); 1087 dev->base = devm_request_and_ioremap(&pdev->dev, mem);
974 if (!dev) { 1088 if (!dev->base) {
975 r = -ENOMEM; 1089 dev_err(&pdev->dev, "I2C region already claimed\n");
976 goto err_release_region; 1090 return -ENOMEM;
977 } 1091 }
978 1092
979 match = of_match_device(of_match_ptr(omap_i2c_of_match), &pdev->dev); 1093 match = of_match_device(of_match_ptr(omap_i2c_of_match), &pdev->dev);
@@ -990,17 +1104,13 @@ omap_i2c_probe(struct platform_device *pdev)
990 } else if (pdata != NULL) { 1104 } else if (pdata != NULL) {
991 dev->speed = pdata->clkrate; 1105 dev->speed = pdata->clkrate;
992 dev->flags = pdata->flags; 1106 dev->flags = pdata->flags;
993 dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
994 dev->dtrev = pdata->rev; 1107 dev->dtrev = pdata->rev;
995 } 1108 }
996 1109
997 dev->dev = &pdev->dev; 1110 dev->dev = &pdev->dev;
998 dev->irq = irq->start; 1111 dev->irq = irq;
999 dev->base = ioremap(mem->start, resource_size(mem)); 1112
1000 if (!dev->base) { 1113 spin_lock_init(&dev->lock);
1001 r = -ENOMEM;
1002 goto err_free_mem;
1003 }
1004 1114
1005 platform_set_drvdata(pdev, dev); 1115 platform_set_drvdata(pdev, dev);
1006 init_completion(&dev->cmd_complete); 1116 init_completion(&dev->cmd_complete);
@@ -1013,6 +1123,9 @@ omap_i2c_probe(struct platform_device *pdev)
1013 dev->regs = (u8 *)reg_map_ip_v1; 1123 dev->regs = (u8 *)reg_map_ip_v1;
1014 1124
1015 pm_runtime_enable(dev->dev); 1125 pm_runtime_enable(dev->dev);
1126 pm_runtime_set_autosuspend_delay(dev->dev, OMAP_I2C_PM_TIMEOUT);
1127 pm_runtime_use_autosuspend(dev->dev);
1128
1016 r = pm_runtime_get_sync(dev->dev); 1129 r = pm_runtime_get_sync(dev->dev);
1017 if (IS_ERR_VALUE(r)) 1130 if (IS_ERR_VALUE(r))
1018 goto err_free_mem; 1131 goto err_free_mem;
@@ -1042,32 +1155,31 @@ omap_i2c_probe(struct platform_device *pdev)
1042 1155
1043 dev->fifo_size = (dev->fifo_size / 2); 1156 dev->fifo_size = (dev->fifo_size / 2);
1044 1157
1045 if (dev->rev >= OMAP_I2C_REV_ON_3630_4430) 1158 if (dev->rev < OMAP_I2C_REV_ON_3630_4430)
1046 dev->b_hw = 0; /* Disable hardware fixes */
1047 else
1048 dev->b_hw = 1; /* Enable hardware fixes */ 1159 dev->b_hw = 1; /* Enable hardware fixes */
1049 1160
1050 /* calculate wakeup latency constraint for MPU */ 1161 /* calculate wakeup latency constraint for MPU */
1051 if (dev->set_mpu_wkup_lat != NULL) 1162 dev->latency = (1000000 * dev->fifo_size) /
1052 dev->latency = (1000000 * dev->fifo_size) / 1163 (1000 * dev->speed / 8);
1053 (1000 * dev->speed / 8);
1054 } 1164 }
1055 1165
1056 /* reset ASAP, clearing any IRQs */ 1166 /* reset ASAP, clearing any IRQs */
1057 omap_i2c_init(dev); 1167 omap_i2c_init(dev);
1058 1168
1059 isr = (dev->rev < OMAP_I2C_OMAP1_REV_2) ? omap_i2c_omap1_isr : 1169 if (dev->rev < OMAP_I2C_OMAP1_REV_2)
1060 omap_i2c_isr; 1170 r = devm_request_irq(&pdev->dev, dev->irq, omap_i2c_omap1_isr,
1061 r = request_irq(dev->irq, isr, IRQF_NO_SUSPEND, pdev->name, dev); 1171 IRQF_NO_SUSPEND, pdev->name, dev);
1172 else
1173 r = devm_request_threaded_irq(&pdev->dev, dev->irq,
1174 omap_i2c_isr, omap_i2c_isr_thread,
1175 IRQF_NO_SUSPEND | IRQF_ONESHOT,
1176 pdev->name, dev);
1062 1177
1063 if (r) { 1178 if (r) {
1064 dev_err(dev->dev, "failure requesting irq %i\n", dev->irq); 1179 dev_err(dev->dev, "failure requesting irq %i\n", dev->irq);
1065 goto err_unuse_clocks; 1180 goto err_unuse_clocks;
1066 } 1181 }
1067 1182
1068 dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", pdev->id,
1069 dev->dtrev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
1070
1071 adap = &dev->adapter; 1183 adap = &dev->adapter;
1072 i2c_set_adapdata(adap, dev); 1184 i2c_set_adapdata(adap, dev);
1073 adap->owner = THIS_MODULE; 1185 adap->owner = THIS_MODULE;
@@ -1082,27 +1194,25 @@ omap_i2c_probe(struct platform_device *pdev)
1082 r = i2c_add_numbered_adapter(adap); 1194 r = i2c_add_numbered_adapter(adap);
1083 if (r) { 1195 if (r) {
1084 dev_err(dev->dev, "failure adding adapter\n"); 1196 dev_err(dev->dev, "failure adding adapter\n");
1085 goto err_free_irq; 1197 goto err_unuse_clocks;
1086 } 1198 }
1087 1199
1200 dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", adap->nr,
1201 dev->dtrev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
1202
1088 of_i2c_register_devices(adap); 1203 of_i2c_register_devices(adap);
1089 1204
1090 pm_runtime_put(dev->dev); 1205 pm_runtime_mark_last_busy(dev->dev);
1206 pm_runtime_put_autosuspend(dev->dev);
1091 1207
1092 return 0; 1208 return 0;
1093 1209
1094err_free_irq:
1095 free_irq(dev->irq, dev);
1096err_unuse_clocks: 1210err_unuse_clocks:
1097 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); 1211 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
1098 pm_runtime_put(dev->dev); 1212 pm_runtime_put(dev->dev);
1099 iounmap(dev->base);
1100 pm_runtime_disable(&pdev->dev); 1213 pm_runtime_disable(&pdev->dev);
1101err_free_mem: 1214err_free_mem:
1102 platform_set_drvdata(pdev, NULL); 1215 platform_set_drvdata(pdev, NULL);
1103 kfree(dev);
1104err_release_region:
1105 release_mem_region(mem->start, resource_size(mem));
1106 1216
1107 return r; 1217 return r;
1108} 1218}
@@ -1110,12 +1220,10 @@ err_release_region:
1110static int __devexit omap_i2c_remove(struct platform_device *pdev) 1220static int __devexit omap_i2c_remove(struct platform_device *pdev)
1111{ 1221{
1112 struct omap_i2c_dev *dev = platform_get_drvdata(pdev); 1222 struct omap_i2c_dev *dev = platform_get_drvdata(pdev);
1113 struct resource *mem;
1114 int ret; 1223 int ret;
1115 1224
1116 platform_set_drvdata(pdev, NULL); 1225 platform_set_drvdata(pdev, NULL);
1117 1226
1118 free_irq(dev->irq, dev);
1119 i2c_del_adapter(&dev->adapter); 1227 i2c_del_adapter(&dev->adapter);
1120 ret = pm_runtime_get_sync(&pdev->dev); 1228 ret = pm_runtime_get_sync(&pdev->dev);
1121 if (IS_ERR_VALUE(ret)) 1229 if (IS_ERR_VALUE(ret))
@@ -1124,10 +1232,6 @@ static int __devexit omap_i2c_remove(struct platform_device *pdev)
1124 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); 1232 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
1125 pm_runtime_put(&pdev->dev); 1233 pm_runtime_put(&pdev->dev);
1126 pm_runtime_disable(&pdev->dev); 1234 pm_runtime_disable(&pdev->dev);
1127 iounmap(dev->base);
1128 kfree(dev);
1129 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1130 release_mem_region(mem->start, resource_size(mem));
1131 return 0; 1235 return 0;
1132} 1236}
1133 1237
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
new file mode 100644
index 000000000000..f9399d163af2
--- /dev/null
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -0,0 +1,709 @@
1/*
2 * drivers/i2c/busses/i2c-rcar.c
3 *
4 * Copyright (C) 2012 Renesas Solutions Corp.
5 * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6 *
7 * This file is based on the drivers/i2c/busses/i2c-sh7760.c
8 * (c) 2005-2008 MSC Vertriebsges.m.b.H, Manuel Lauss <mlau@msc-ge.com>
9 *
10 * This file used out-of-tree driver i2c-rcar.c
11 * Copyright (C) 2011-2012 Renesas Electronics Corporation
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26#include <linux/clk.h>
27#include <linux/delay.h>
28#include <linux/err.h>
29#include <linux/init.h>
30#include <linux/interrupt.h>
31#include <linux/io.h>
32#include <linux/i2c.h>
33#include <linux/i2c/i2c-rcar.h>
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/platform_device.h>
37#include <linux/pm_runtime.h>
38#include <linux/slab.h>
39#include <linux/spinlock.h>
40
41/* register offsets */
42#define ICSCR 0x00 /* slave ctrl */
43#define ICMCR 0x04 /* master ctrl */
44#define ICSSR 0x08 /* slave status */
45#define ICMSR 0x0C /* master status */
46#define ICSIER 0x10 /* slave irq enable */
47#define ICMIER 0x14 /* master irq enable */
48#define ICCCR 0x18 /* clock dividers */
49#define ICSAR 0x1C /* slave address */
50#define ICMAR 0x20 /* master address */
51#define ICRXTX 0x24 /* data port */
52
53/* ICMCR */
54#define MDBS (1 << 7) /* non-fifo mode switch */
55#define FSCL (1 << 6) /* override SCL pin */
56#define FSDA (1 << 5) /* override SDA pin */
57#define OBPC (1 << 4) /* override pins */
58#define MIE (1 << 3) /* master if enable */
59#define TSBE (1 << 2)
60#define FSB (1 << 1) /* force stop bit */
61#define ESG (1 << 0) /* en startbit gen */
62
63/* ICMSR */
64#define MNR (1 << 6) /* nack received */
65#define MAL (1 << 5) /* arbitration lost */
66#define MST (1 << 4) /* sent a stop */
67#define MDE (1 << 3)
68#define MDT (1 << 2)
69#define MDR (1 << 1)
70#define MAT (1 << 0) /* slave addr xfer done */
71
72/* ICMIE */
73#define MNRE (1 << 6) /* nack irq en */
74#define MALE (1 << 5) /* arblos irq en */
75#define MSTE (1 << 4) /* stop irq en */
76#define MDEE (1 << 3)
77#define MDTE (1 << 2)
78#define MDRE (1 << 1)
79#define MATE (1 << 0) /* address sent irq en */
80
81
82enum {
83 RCAR_BUS_PHASE_ADDR,
84 RCAR_BUS_PHASE_DATA,
85 RCAR_BUS_PHASE_STOP,
86};
87
88enum {
89 RCAR_IRQ_CLOSE,
90 RCAR_IRQ_OPEN_FOR_SEND,
91 RCAR_IRQ_OPEN_FOR_RECV,
92 RCAR_IRQ_OPEN_FOR_STOP,
93};
94
95/*
96 * flags
97 */
98#define ID_LAST_MSG (1 << 0)
99#define ID_IOERROR (1 << 1)
100#define ID_DONE (1 << 2)
101#define ID_ARBLOST (1 << 3)
102#define ID_NACK (1 << 4)
103
104struct rcar_i2c_priv {
105 void __iomem *io;
106 struct i2c_adapter adap;
107 struct i2c_msg *msg;
108
109 spinlock_t lock;
110 wait_queue_head_t wait;
111
112 int pos;
113 int irq;
114 u32 icccr;
115 u32 flags;
116};
117
118#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
119#define rcar_i2c_is_recv(p) ((p)->msg->flags & I2C_M_RD)
120
121#define rcar_i2c_flags_set(p, f) ((p)->flags |= (f))
122#define rcar_i2c_flags_has(p, f) ((p)->flags & (f))
123
124#define LOOP_TIMEOUT 1024
125
126/*
127 * basic functions
128 */
129static void rcar_i2c_write(struct rcar_i2c_priv *priv, int reg, u32 val)
130{
131 writel(val, priv->io + reg);
132}
133
134static u32 rcar_i2c_read(struct rcar_i2c_priv *priv, int reg)
135{
136 return readl(priv->io + reg);
137}
138
139static void rcar_i2c_init(struct rcar_i2c_priv *priv)
140{
141 /*
142 * reset slave mode.
143 * slave mode is not used on this driver
144 */
145 rcar_i2c_write(priv, ICSIER, 0);
146 rcar_i2c_write(priv, ICSAR, 0);
147 rcar_i2c_write(priv, ICSCR, 0);
148 rcar_i2c_write(priv, ICSSR, 0);
149
150 /* reset master mode */
151 rcar_i2c_write(priv, ICMIER, 0);
152 rcar_i2c_write(priv, ICMCR, 0);
153 rcar_i2c_write(priv, ICMSR, 0);
154 rcar_i2c_write(priv, ICMAR, 0);
155}
156
157static void rcar_i2c_irq_mask(struct rcar_i2c_priv *priv, int open)
158{
159 u32 val = MNRE | MALE | MSTE | MATE; /* default */
160
161 switch (open) {
162 case RCAR_IRQ_OPEN_FOR_SEND:
163 val |= MDEE; /* default + send */
164 break;
165 case RCAR_IRQ_OPEN_FOR_RECV:
166 val |= MDRE; /* default + read */
167 break;
168 case RCAR_IRQ_OPEN_FOR_STOP:
169 val = MSTE; /* stop irq only */
170 break;
171 case RCAR_IRQ_CLOSE:
172 default:
173 val = 0; /* all close */
174 break;
175 }
176 rcar_i2c_write(priv, ICMIER, val);
177}
178
179static void rcar_i2c_set_addr(struct rcar_i2c_priv *priv, u32 recv)
180{
181 rcar_i2c_write(priv, ICMAR, (priv->msg->addr << 1) | recv);
182}
183
184/*
185 * bus control functions
186 */
187static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv)
188{
189 int i;
190
191 for (i = 0; i < LOOP_TIMEOUT; i++) {
192 /* make sure that bus is not busy */
193 if (!(rcar_i2c_read(priv, ICMCR) & FSDA))
194 return 0;
195 udelay(1);
196 }
197
198 return -EBUSY;
199}
200
201static void rcar_i2c_bus_phase(struct rcar_i2c_priv *priv, int phase)
202{
203 switch (phase) {
204 case RCAR_BUS_PHASE_ADDR:
205 rcar_i2c_write(priv, ICMCR, MDBS | MIE | ESG);
206 break;
207 case RCAR_BUS_PHASE_DATA:
208 rcar_i2c_write(priv, ICMCR, MDBS | MIE);
209 break;
210 case RCAR_BUS_PHASE_STOP:
211 rcar_i2c_write(priv, ICMCR, MDBS | MIE | FSB);
212 break;
213 }
214}
215
216/*
217 * clock function
218 */
219static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv,
220 u32 bus_speed,
221 struct device *dev)
222{
223 struct clk *clkp = clk_get(NULL, "peripheral_clk");
224 u32 scgd, cdf;
225 u32 round, ick;
226 u32 scl;
227
228 if (!clkp) {
229 dev_err(dev, "there is no peripheral_clk\n");
230 return -EIO;
231 }
232
233 /*
234 * calculate SCL clock
235 * see
236 * ICCCR
237 *
238 * ick = clkp / (1 + CDF)
239 * SCL = ick / (20 + SCGD * 8 + F[(ticf + tr + intd) * ick])
240 *
241 * ick : I2C internal clock < 20 MHz
242 * ticf : I2C SCL falling time = 35 ns here
243 * tr : I2C SCL rising time = 200 ns here
244 * intd : LSI internal delay = 50 ns here
245 * clkp : peripheral_clk
246 * F[] : integer up-valuation
247 */
248 for (cdf = 0; cdf < 4; cdf++) {
249 ick = clk_get_rate(clkp) / (1 + cdf);
250 if (ick < 20000000)
251 goto ick_find;
252 }
253 dev_err(dev, "there is no best CDF\n");
254 return -EIO;
255
256ick_find:
257 /*
258 * it is impossible to calculate large scale
259 * number on u32. separate it
260 *
261 * F[(ticf + tr + intd) * ick]
262 * = F[(35 + 200 + 50)ns * ick]
263 * = F[285 * ick / 1000000000]
264 * = F[(ick / 1000000) * 285 / 1000]
265 */
266 round = (ick + 500000) / 1000000 * 285;
267 round = (round + 500) / 1000;
268
269 /*
270 * SCL = ick / (20 + SCGD * 8 + F[(ticf + tr + intd) * ick])
271 *
272 * Calculation result (= SCL) should be less than
273 * bus_speed for hardware safety
274 */
275 for (scgd = 0; scgd < 0x40; scgd++) {
276 scl = ick / (20 + (scgd * 8) + round);
277 if (scl <= bus_speed)
278 goto scgd_find;
279 }
280 dev_err(dev, "it is impossible to calculate best SCL\n");
281 return -EIO;
282
283scgd_find:
284 dev_dbg(dev, "clk %d/%d(%lu), round %u, CDF:0x%x, SCGD: 0x%x\n",
285 scl, bus_speed, clk_get_rate(clkp), round, cdf, scgd);
286
287 /*
288 * keep icccr value
289 */
290 priv->icccr = (scgd << 2 | cdf);
291
292 return 0;
293}
294
295static void rcar_i2c_clock_start(struct rcar_i2c_priv *priv)
296{
297 rcar_i2c_write(priv, ICCCR, priv->icccr);
298}
299
300/*
301 * status functions
302 */
303static u32 rcar_i2c_status_get(struct rcar_i2c_priv *priv)
304{
305 return rcar_i2c_read(priv, ICMSR);
306}
307
308#define rcar_i2c_status_clear(priv) rcar_i2c_status_bit_clear(priv, 0xffffffff)
309static void rcar_i2c_status_bit_clear(struct rcar_i2c_priv *priv, u32 bit)
310{
311 rcar_i2c_write(priv, ICMSR, ~bit);
312}
313
314/*
315 * recv/send functions
316 */
317static int rcar_i2c_recv(struct rcar_i2c_priv *priv)
318{
319 rcar_i2c_set_addr(priv, 1);
320 rcar_i2c_status_clear(priv);
321 rcar_i2c_bus_phase(priv, RCAR_BUS_PHASE_ADDR);
322 rcar_i2c_irq_mask(priv, RCAR_IRQ_OPEN_FOR_RECV);
323
324 return 0;
325}
326
327static int rcar_i2c_send(struct rcar_i2c_priv *priv)
328{
329 int ret;
330
331 /*
332 * It should check bus status when send case
333 */
334 ret = rcar_i2c_bus_barrier(priv);
335 if (ret < 0)
336 return ret;
337
338 rcar_i2c_set_addr(priv, 0);
339 rcar_i2c_status_clear(priv);
340 rcar_i2c_bus_phase(priv, RCAR_BUS_PHASE_ADDR);
341 rcar_i2c_irq_mask(priv, RCAR_IRQ_OPEN_FOR_SEND);
342
343 return 0;
344}
345
346#define rcar_i2c_send_restart(priv) rcar_i2c_status_bit_clear(priv, (MAT | MDE))
347#define rcar_i2c_recv_restart(priv) rcar_i2c_status_bit_clear(priv, (MAT | MDR))
348
349/*
350 * interrupt functions
351 */
352static int rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr)
353{
354 struct i2c_msg *msg = priv->msg;
355
356 /*
357 * FIXME
358 * sometimes, unknown interrupt happened.
359 * Do nothing
360 */
361 if (!(msr & MDE))
362 return 0;
363
364 /*
365 * If address transfer phase finished,
366 * goto data phase.
367 */
368 if (msr & MAT)
369 rcar_i2c_bus_phase(priv, RCAR_BUS_PHASE_DATA);
370
371 if (priv->pos < msg->len) {
372 /*
373 * Prepare next data to ICRXTX register.
374 * This data will go to _SHIFT_ register.
375 *
376 * *
377 * [ICRXTX] -> [SHIFT] -> [I2C bus]
378 */
379 rcar_i2c_write(priv, ICRXTX, msg->buf[priv->pos]);
380 priv->pos++;
381
382 } else {
383 /*
384 * The last data was pushed to ICRXTX on _PREV_ empty irq.
385 * It is on _SHIFT_ register, and will sent to I2C bus.
386 *
387 * *
388 * [ICRXTX] -> [SHIFT] -> [I2C bus]
389 */
390
391 if (priv->flags & ID_LAST_MSG)
392 /*
393 * If current msg is the _LAST_ msg,
394 * prepare stop condition here.
395 * ID_DONE will be set on STOP irq.
396 */
397 rcar_i2c_bus_phase(priv, RCAR_BUS_PHASE_STOP);
398 else
399 /*
400 * If current msg is _NOT_ last msg,
401 * it doesn't call stop phase.
402 * thus, there is no STOP irq.
403 * return ID_DONE here.
404 */
405 return ID_DONE;
406 }
407
408 rcar_i2c_send_restart(priv);
409
410 return 0;
411}
412
413static int rcar_i2c_irq_recv(struct rcar_i2c_priv *priv, u32 msr)
414{
415 struct i2c_msg *msg = priv->msg;
416
417 /*
418 * FIXME
419 * sometimes, unknown interrupt happened.
420 * Do nothing
421 */
422 if (!(msr & MDR))
423 return 0;
424
425 if (msr & MAT) {
426 /*
427 * Address transfer phase finished,
428 * but, there is no data at this point.
429 * Do nothing.
430 */
431 } else if (priv->pos < msg->len) {
432 /*
433 * get received data
434 */
435 msg->buf[priv->pos] = rcar_i2c_read(priv, ICRXTX);
436 priv->pos++;
437 }
438
439 /*
440 * If next received data is the _LAST_,
441 * go to STOP phase,
442 * otherwise, go to DATA phase.
443 */
444 if (priv->pos + 1 >= msg->len)
445 rcar_i2c_bus_phase(priv, RCAR_BUS_PHASE_STOP);
446 else
447 rcar_i2c_bus_phase(priv, RCAR_BUS_PHASE_DATA);
448
449 rcar_i2c_recv_restart(priv);
450
451 return 0;
452}
453
454static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
455{
456 struct rcar_i2c_priv *priv = ptr;
457 struct device *dev = rcar_i2c_priv_to_dev(priv);
458 u32 msr;
459
460 /*-------------- spin lock -----------------*/
461 spin_lock(&priv->lock);
462
463 msr = rcar_i2c_status_get(priv);
464
465 /*
466 * Arbitration lost
467 */
468 if (msr & MAL) {
469 /*
470 * CAUTION
471 *
472 * When arbitration lost, device become _slave_ mode.
473 */
474 dev_dbg(dev, "Arbitration Lost\n");
475 rcar_i2c_flags_set(priv, (ID_DONE | ID_ARBLOST));
476 goto out;
477 }
478
479 /*
480 * Stop
481 */
482 if (msr & MST) {
483 dev_dbg(dev, "Stop\n");
484 rcar_i2c_flags_set(priv, ID_DONE);
485 goto out;
486 }
487
488 /*
489 * Nack
490 */
491 if (msr & MNR) {
492 dev_dbg(dev, "Nack\n");
493
494 /* go to stop phase */
495 rcar_i2c_bus_phase(priv, RCAR_BUS_PHASE_STOP);
496 rcar_i2c_irq_mask(priv, RCAR_IRQ_OPEN_FOR_STOP);
497 rcar_i2c_flags_set(priv, ID_NACK);
498 goto out;
499 }
500
501 /*
502 * recv/send
503 */
504 if (rcar_i2c_is_recv(priv))
505 rcar_i2c_flags_set(priv, rcar_i2c_irq_recv(priv, msr));
506 else
507 rcar_i2c_flags_set(priv, rcar_i2c_irq_send(priv, msr));
508
509out:
510 if (rcar_i2c_flags_has(priv, ID_DONE)) {
511 rcar_i2c_irq_mask(priv, RCAR_IRQ_CLOSE);
512 rcar_i2c_status_clear(priv);
513 wake_up(&priv->wait);
514 }
515
516 spin_unlock(&priv->lock);
517 /*-------------- spin unlock -----------------*/
518
519 return IRQ_HANDLED;
520}
521
522static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
523 struct i2c_msg *msgs,
524 int num)
525{
526 struct rcar_i2c_priv *priv = i2c_get_adapdata(adap);
527 struct device *dev = rcar_i2c_priv_to_dev(priv);
528 unsigned long flags;
529 int i, ret, timeout;
530
531 pm_runtime_get_sync(dev);
532
533 /*-------------- spin lock -----------------*/
534 spin_lock_irqsave(&priv->lock, flags);
535
536 rcar_i2c_init(priv);
537 rcar_i2c_clock_start(priv);
538
539 spin_unlock_irqrestore(&priv->lock, flags);
540 /*-------------- spin unlock -----------------*/
541
542 ret = -EINVAL;
543 for (i = 0; i < num; i++) {
544 /*-------------- spin lock -----------------*/
545 spin_lock_irqsave(&priv->lock, flags);
546
547 /* init each data */
548 priv->msg = &msgs[i];
549 priv->pos = 0;
550 priv->flags = 0;
551 if (priv->msg == &msgs[num - 1])
552 rcar_i2c_flags_set(priv, ID_LAST_MSG);
553
554 /* start send/recv */
555 if (rcar_i2c_is_recv(priv))
556 ret = rcar_i2c_recv(priv);
557 else
558 ret = rcar_i2c_send(priv);
559
560 spin_unlock_irqrestore(&priv->lock, flags);
561 /*-------------- spin unlock -----------------*/
562
563 if (ret < 0)
564 break;
565
566 /*
567 * wait result
568 */
569 timeout = wait_event_timeout(priv->wait,
570 rcar_i2c_flags_has(priv, ID_DONE),
571 5 * HZ);
572 if (!timeout) {
573 ret = -ETIMEDOUT;
574 break;
575 }
576
577 /*
578 * error handling
579 */
580 if (rcar_i2c_flags_has(priv, ID_NACK)) {
581 ret = -EREMOTEIO;
582 break;
583 }
584
585 if (rcar_i2c_flags_has(priv, ID_ARBLOST)) {
586 ret = -EAGAIN;
587 break;
588 }
589
590 if (rcar_i2c_flags_has(priv, ID_IOERROR)) {
591 ret = -EIO;
592 break;
593 }
594
595 ret = i + 1; /* The number of transfer */
596 }
597
598 pm_runtime_put(dev);
599
600 if (ret < 0)
601 dev_err(dev, "error %d : %x\n", ret, priv->flags);
602
603 return ret;
604}
605
606static u32 rcar_i2c_func(struct i2c_adapter *adap)
607{
608 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
609}
610
611static const struct i2c_algorithm rcar_i2c_algo = {
612 .master_xfer = rcar_i2c_master_xfer,
613 .functionality = rcar_i2c_func,
614};
615
616static int __devinit rcar_i2c_probe(struct platform_device *pdev)
617{
618 struct i2c_rcar_platform_data *pdata = pdev->dev.platform_data;
619 struct rcar_i2c_priv *priv;
620 struct i2c_adapter *adap;
621 struct resource *res;
622 struct device *dev = &pdev->dev;
623 u32 bus_speed;
624 int ret;
625
626 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
627 if (!res) {
628 dev_err(dev, "no mmio resources\n");
629 return -ENODEV;
630 }
631
632 priv = devm_kzalloc(dev, sizeof(struct rcar_i2c_priv), GFP_KERNEL);
633 if (!priv) {
634 dev_err(dev, "no mem for private data\n");
635 return -ENOMEM;
636 }
637
638 bus_speed = 100000; /* default 100 kHz */
639 if (pdata && pdata->bus_speed)
640 bus_speed = pdata->bus_speed;
641 ret = rcar_i2c_clock_calculate(priv, bus_speed, dev);
642 if (ret < 0)
643 return ret;
644
645 priv->io = devm_ioremap(dev, res->start, resource_size(res));
646 if (!priv->io) {
647 dev_err(dev, "cannot ioremap\n");
648 return -ENODEV;
649 }
650
651 priv->irq = platform_get_irq(pdev, 0);
652 init_waitqueue_head(&priv->wait);
653 spin_lock_init(&priv->lock);
654
655 adap = &priv->adap;
656 adap->nr = pdev->id;
657 adap->algo = &rcar_i2c_algo;
658 adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
659 adap->retries = 3;
660 adap->dev.parent = dev;
661 i2c_set_adapdata(adap, priv);
662 strlcpy(adap->name, pdev->name, sizeof(adap->name));
663
664 ret = devm_request_irq(dev, priv->irq, rcar_i2c_irq, 0,
665 dev_name(dev), priv);
666 if (ret < 0) {
667 dev_err(dev, "cannot get irq %d\n", priv->irq);
668 return ret;
669 }
670
671 ret = i2c_add_numbered_adapter(adap);
672 if (ret < 0) {
673 dev_err(dev, "reg adap failed: %d\n", ret);
674 return ret;
675 }
676
677 pm_runtime_enable(dev);
678 platform_set_drvdata(pdev, priv);
679
680 dev_info(dev, "probed\n");
681
682 return 0;
683}
684
685static int __devexit rcar_i2c_remove(struct platform_device *pdev)
686{
687 struct rcar_i2c_priv *priv = platform_get_drvdata(pdev);
688 struct device *dev = &pdev->dev;
689
690 i2c_del_adapter(&priv->adap);
691 pm_runtime_disable(dev);
692
693 return 0;
694}
695
696static struct platform_driver rcar_i2c_drv = {
697 .driver = {
698 .name = "i2c-rcar",
699 .owner = THIS_MODULE,
700 },
701 .probe = rcar_i2c_probe,
702 .remove = __devexit_p(rcar_i2c_remove),
703};
704
705module_platform_driver(rcar_i2c_drv);
706
707MODULE_LICENSE("GPL");
708MODULE_DESCRIPTION("Renesas R-Car I2C bus driver");
709MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 4d07dea9bca9..3e0335f1fc60 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -601,14 +601,14 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
601 int ret; 601 int ret;
602 602
603 pm_runtime_get_sync(&adap->dev); 603 pm_runtime_get_sync(&adap->dev);
604 clk_enable(i2c->clk); 604 clk_prepare_enable(i2c->clk);
605 605
606 for (retry = 0; retry < adap->retries; retry++) { 606 for (retry = 0; retry < adap->retries; retry++) {
607 607
608 ret = s3c24xx_i2c_doxfer(i2c, msgs, num); 608 ret = s3c24xx_i2c_doxfer(i2c, msgs, num);
609 609
610 if (ret != -EAGAIN) { 610 if (ret != -EAGAIN) {
611 clk_disable(i2c->clk); 611 clk_disable_unprepare(i2c->clk);
612 pm_runtime_put(&adap->dev); 612 pm_runtime_put(&adap->dev);
613 return ret; 613 return ret;
614 } 614 }
@@ -618,7 +618,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
618 udelay(100); 618 udelay(100);
619 } 619 }
620 620
621 clk_disable(i2c->clk); 621 clk_disable_unprepare(i2c->clk);
622 pm_runtime_put(&adap->dev); 622 pm_runtime_put(&adap->dev);
623 return -EREMOTEIO; 623 return -EREMOTEIO;
624} 624}
@@ -977,7 +977,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
977 977
978 dev_dbg(&pdev->dev, "clock source %p\n", i2c->clk); 978 dev_dbg(&pdev->dev, "clock source %p\n", i2c->clk);
979 979
980 clk_enable(i2c->clk); 980 clk_prepare_enable(i2c->clk);
981 981
982 /* map the registers */ 982 /* map the registers */
983 983
@@ -1065,7 +1065,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1065 pm_runtime_enable(&i2c->adap.dev); 1065 pm_runtime_enable(&i2c->adap.dev);
1066 1066
1067 dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev)); 1067 dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev));
1068 clk_disable(i2c->clk); 1068 clk_disable_unprepare(i2c->clk);
1069 return 0; 1069 return 0;
1070 1070
1071 err_cpufreq: 1071 err_cpufreq:
@@ -1082,7 +1082,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1082 kfree(i2c->ioarea); 1082 kfree(i2c->ioarea);
1083 1083
1084 err_clk: 1084 err_clk:
1085 clk_disable(i2c->clk); 1085 clk_disable_unprepare(i2c->clk);
1086 clk_put(i2c->clk); 1086 clk_put(i2c->clk);
1087 1087
1088 err_noclk: 1088 err_noclk:
@@ -1106,7 +1106,7 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
1106 i2c_del_adapter(&i2c->adap); 1106 i2c_del_adapter(&i2c->adap);
1107 free_irq(i2c->irq, i2c); 1107 free_irq(i2c->irq, i2c);
1108 1108
1109 clk_disable(i2c->clk); 1109 clk_disable_unprepare(i2c->clk);
1110 clk_put(i2c->clk); 1110 clk_put(i2c->clk);
1111 1111
1112 iounmap(i2c->regs); 1112 iounmap(i2c->regs);
@@ -1135,9 +1135,9 @@ static int s3c24xx_i2c_resume(struct device *dev)
1135 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); 1135 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
1136 1136
1137 i2c->suspended = 0; 1137 i2c->suspended = 0;
1138 clk_enable(i2c->clk); 1138 clk_prepare_enable(i2c->clk);
1139 s3c24xx_i2c_init(i2c); 1139 s3c24xx_i2c_init(i2c);
1140 clk_disable(i2c->clk); 1140 clk_disable_unprepare(i2c->clk);
1141 1141
1142 return 0; 1142 return 0;
1143} 1143}
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index f981ac4e6783..dcea77bf6f50 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -742,7 +742,7 @@ static int __devinit tegra_i2c_probe(struct platform_device *pdev)
742 } 742 }
743 743
744 ret = devm_request_irq(&pdev->dev, i2c_dev->irq, 744 ret = devm_request_irq(&pdev->dev, i2c_dev->irq,
745 tegra_i2c_isr, 0, pdev->name, i2c_dev); 745 tegra_i2c_isr, 0, dev_name(&pdev->dev), i2c_dev);
746 if (ret) { 746 if (ret) {
747 dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq); 747 dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq);
748 return ret; 748 return ret;
diff --git a/drivers/i2c/busses/i2c-stub.c b/drivers/i2c/i2c-stub.c
index b1b3447942c9..d0a9c590c3cd 100644
--- a/drivers/i2c/busses/i2c-stub.c
+++ b/drivers/i2c/i2c-stub.c
@@ -2,7 +2,7 @@
2 i2c-stub.c - I2C/SMBus chip emulator 2 i2c-stub.c - I2C/SMBus chip emulator
3 3
4 Copyright (c) 2004 Mark M. Hoffman <mhoffman@lightlink.com> 4 Copyright (c) 2004 Mark M. Hoffman <mhoffman@lightlink.com>
5 Copyright (C) 2007 Jean Delvare <khali@linux-fr.org> 5 Copyright (C) 2007, 2012 Jean Delvare <khali@linux-fr.org>
6 6
7 This program is free software; you can redistribute it and/or modify 7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by 8 it under the terms of the GNU General Public License as published by
@@ -51,8 +51,8 @@ struct stub_chip {
51static struct stub_chip *stub_chips; 51static struct stub_chip *stub_chips;
52 52
53/* Return negative errno on error. */ 53/* Return negative errno on error. */
54static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags, 54static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags,
55 char read_write, u8 command, int size, union i2c_smbus_data * data) 55 char read_write, u8 command, int size, union i2c_smbus_data *data)
56{ 56{
57 s32 ret; 57 s32 ret;
58 int i, len; 58 int i, len;
@@ -78,14 +78,14 @@ static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags,
78 case I2C_SMBUS_BYTE: 78 case I2C_SMBUS_BYTE:
79 if (read_write == I2C_SMBUS_WRITE) { 79 if (read_write == I2C_SMBUS_WRITE) {
80 chip->pointer = command; 80 chip->pointer = command;
81 dev_dbg(&adap->dev, "smbus byte - addr 0x%02x, " 81 dev_dbg(&adap->dev,
82 "wrote 0x%02x.\n", 82 "smbus byte - addr 0x%02x, wrote 0x%02x.\n",
83 addr, command); 83 addr, command);
84 } else { 84 } else {
85 data->byte = chip->words[chip->pointer++] & 0xff; 85 data->byte = chip->words[chip->pointer++] & 0xff;
86 dev_dbg(&adap->dev, "smbus byte - addr 0x%02x, " 86 dev_dbg(&adap->dev,
87 "read 0x%02x.\n", 87 "smbus byte - addr 0x%02x, read 0x%02x.\n",
88 addr, data->byte); 88 addr, data->byte);
89 } 89 }
90 90
91 ret = 0; 91 ret = 0;
@@ -95,14 +95,14 @@ static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags,
95 if (read_write == I2C_SMBUS_WRITE) { 95 if (read_write == I2C_SMBUS_WRITE) {
96 chip->words[command] &= 0xff00; 96 chip->words[command] &= 0xff00;
97 chip->words[command] |= data->byte; 97 chip->words[command] |= data->byte;
98 dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, " 98 dev_dbg(&adap->dev,
99 "wrote 0x%02x at 0x%02x.\n", 99 "smbus byte data - addr 0x%02x, wrote 0x%02x at 0x%02x.\n",
100 addr, data->byte, command); 100 addr, data->byte, command);
101 } else { 101 } else {
102 data->byte = chip->words[command] & 0xff; 102 data->byte = chip->words[command] & 0xff;
103 dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, " 103 dev_dbg(&adap->dev,
104 "read 0x%02x at 0x%02x.\n", 104 "smbus byte data - addr 0x%02x, read 0x%02x at 0x%02x.\n",
105 addr, data->byte, command); 105 addr, data->byte, command);
106 } 106 }
107 chip->pointer = command + 1; 107 chip->pointer = command + 1;
108 108
@@ -112,14 +112,14 @@ static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags,
112 case I2C_SMBUS_WORD_DATA: 112 case I2C_SMBUS_WORD_DATA:
113 if (read_write == I2C_SMBUS_WRITE) { 113 if (read_write == I2C_SMBUS_WRITE) {
114 chip->words[command] = data->word; 114 chip->words[command] = data->word;
115 dev_dbg(&adap->dev, "smbus word data - addr 0x%02x, " 115 dev_dbg(&adap->dev,
116 "wrote 0x%04x at 0x%02x.\n", 116 "smbus word data - addr 0x%02x, wrote 0x%04x at 0x%02x.\n",
117 addr, data->word, command); 117 addr, data->word, command);
118 } else { 118 } else {
119 data->word = chip->words[command]; 119 data->word = chip->words[command];
120 dev_dbg(&adap->dev, "smbus word data - addr 0x%02x, " 120 dev_dbg(&adap->dev,
121 "read 0x%04x at 0x%02x.\n", 121 "smbus word data - addr 0x%02x, read 0x%04x at 0x%02x.\n",
122 addr, data->word, command); 122 addr, data->word, command);
123 } 123 }
124 124
125 ret = 0; 125 ret = 0;
@@ -132,17 +132,17 @@ static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags,
132 chip->words[command + i] &= 0xff00; 132 chip->words[command + i] &= 0xff00;
133 chip->words[command + i] |= data->block[1 + i]; 133 chip->words[command + i] |= data->block[1 + i];
134 } 134 }
135 dev_dbg(&adap->dev, "i2c block data - addr 0x%02x, " 135 dev_dbg(&adap->dev,
136 "wrote %d bytes at 0x%02x.\n", 136 "i2c block data - addr 0x%02x, wrote %d bytes at 0x%02x.\n",
137 addr, len, command); 137 addr, len, command);
138 } else { 138 } else {
139 for (i = 0; i < len; i++) { 139 for (i = 0; i < len; i++) {
140 data->block[1 + i] = 140 data->block[1 + i] =
141 chip->words[command + i] & 0xff; 141 chip->words[command + i] & 0xff;
142 } 142 }
143 dev_dbg(&adap->dev, "i2c block data - addr 0x%02x, " 143 dev_dbg(&adap->dev,
144 "read %d bytes at 0x%02x.\n", 144 "i2c block data - addr 0x%02x, read %d bytes at 0x%02x.\n",
145 addr, len, command); 145 addr, len, command);
146 } 146 }
147 147
148 ret = 0; 148 ret = 0;
@@ -179,25 +179,24 @@ static int __init i2c_stub_init(void)
179 int i, ret; 179 int i, ret;
180 180
181 if (!chip_addr[0]) { 181 if (!chip_addr[0]) {
182 printk(KERN_ERR "i2c-stub: Please specify a chip address\n"); 182 pr_err("i2c-stub: Please specify a chip address\n");
183 return -ENODEV; 183 return -ENODEV;
184 } 184 }
185 185
186 for (i = 0; i < MAX_CHIPS && chip_addr[i]; i++) { 186 for (i = 0; i < MAX_CHIPS && chip_addr[i]; i++) {
187 if (chip_addr[i] < 0x03 || chip_addr[i] > 0x77) { 187 if (chip_addr[i] < 0x03 || chip_addr[i] > 0x77) {
188 printk(KERN_ERR "i2c-stub: Invalid chip address " 188 pr_err("i2c-stub: Invalid chip address 0x%02x\n",
189 "0x%02x\n", chip_addr[i]); 189 chip_addr[i]);
190 return -EINVAL; 190 return -EINVAL;
191 } 191 }
192 192
193 printk(KERN_INFO "i2c-stub: Virtual chip at 0x%02x\n", 193 pr_info("i2c-stub: Virtual chip at 0x%02x\n", chip_addr[i]);
194 chip_addr[i]);
195 } 194 }
196 195
197 /* Allocate memory for all chips at once */ 196 /* Allocate memory for all chips at once */
198 stub_chips = kzalloc(i * sizeof(struct stub_chip), GFP_KERNEL); 197 stub_chips = kzalloc(i * sizeof(struct stub_chip), GFP_KERNEL);
199 if (!stub_chips) { 198 if (!stub_chips) {
200 printk(KERN_ERR "i2c-stub: Out of memory\n"); 199 pr_err("i2c-stub: Out of memory\n");
201 return -ENOMEM; 200 return -ENOMEM;
202 } 201 }
203 202
@@ -219,4 +218,3 @@ MODULE_LICENSE("GPL");
219 218
220module_init(i2c_stub_init); 219module_init(i2c_stub_init);
221module_exit(i2c_stub_exit); 220module_exit(i2c_stub_exit);
222
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 6e3f143fc71d..fc937aca71fb 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -62,7 +62,6 @@ source "drivers/iio/frequency/Kconfig"
62source "drivers/iio/dac/Kconfig" 62source "drivers/iio/dac/Kconfig"
63source "drivers/iio/common/Kconfig" 63source "drivers/iio/common/Kconfig"
64source "drivers/iio/gyro/Kconfig" 64source "drivers/iio/gyro/Kconfig"
65source "drivers/iio/light/Kconfig"
66source "drivers/iio/magnetometer/Kconfig" 65source "drivers/iio/magnetometer/Kconfig"
67 66
68endif # IIO 67endif # IIO
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index f7fa3c0867b4..761f2b65ac52 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -18,5 +18,4 @@ obj-y += frequency/
18obj-y += dac/ 18obj-y += dac/
19obj-y += common/ 19obj-y += common/
20obj-y += gyro/ 20obj-y += gyro/
21obj-y += light/
22obj-y += magnetometer/ 21obj-y += magnetometer/
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 1983adc19243..a7568c34a1aa 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -3498,7 +3498,8 @@ out:
3498} 3498}
3499 3499
3500static const struct ibnl_client_cbs cma_cb_table[] = { 3500static const struct ibnl_client_cbs cma_cb_table[] = {
3501 [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats }, 3501 [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats,
3502 .module = THIS_MODULE },
3502}; 3503};
3503 3504
3504static int __init cma_init(void) 3505static int __init cma_init(void)
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index fe10a949aef9..da06abde9e0d 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -154,6 +154,7 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
154 { 154 {
155 struct netlink_dump_control c = { 155 struct netlink_dump_control c = {
156 .dump = client->cb_table[op].dump, 156 .dump = client->cb_table[op].dump,
157 .module = client->cb_table[op].module,
157 }; 158 };
158 return netlink_dump_start(nls, skb, nlh, &c); 159 return netlink_dump_start(nls, skb, nlh, &c);
159 } 160 }
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 57e07c61ace2..afd81790ab3c 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -468,7 +468,7 @@ struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
468 ret = alloc_pbl(mhp, npages); 468 ret = alloc_pbl(mhp, npages);
469 if (ret) { 469 if (ret) {
470 kfree(page_list); 470 kfree(page_list);
471 goto err_pbl; 471 goto err;
472 } 472 }
473 473
474 ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr, 474 ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index d2fb38d43571..2f215b93db6b 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -107,7 +107,7 @@ static __be64 get_cached_alias_guid(struct mlx4_ib_dev *dev, int port, int index
107{ 107{
108 if (index >= NUM_ALIAS_GUID_PER_PORT) { 108 if (index >= NUM_ALIAS_GUID_PER_PORT) {
109 pr_err("%s: ERROR: asked for index:%d\n", __func__, index); 109 pr_err("%s: ERROR: asked for index:%d\n", __func__, index);
110 return (__force __be64) ((u64) 0xFFFFFFFFFFFFFFFFUL); 110 return (__force __be64) -1;
111 } 111 }
112 return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index]; 112 return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index];
113} 113}
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 21a794152d15..0a903c129f0a 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -409,38 +409,45 @@ int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
409} 409}
410 410
411 411
412static int get_pkey_phys_indices(struct mlx4_ib_dev *ibdev, u8 port, u8 ph_pkey_ix, 412static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
413 u8 *full_pk_ix, u8 *partial_pk_ix, 413 u8 port, u16 pkey, u16 *ix)
414 int *is_full_member)
415{ 414{
416 u16 search_pkey; 415 int i, ret;
417 int fm; 416 u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF;
418 int err = 0; 417 u16 slot_pkey;
419 u16 pk;
420 418
421 err = ib_get_cached_pkey(&ibdev->ib_dev, port, ph_pkey_ix, &search_pkey); 419 if (slave == mlx4_master_func_num(dev->dev))
422 if (err) 420 return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix);
423 return err;
424 421
425 fm = (search_pkey & 0x8000) ? 1 : 0; 422 unassigned_pkey_ix = dev->dev->phys_caps.pkey_phys_table_len[port] - 1;
426 if (fm) {
427 *full_pk_ix = ph_pkey_ix;
428 search_pkey &= 0x7FFF;
429 } else {
430 *partial_pk_ix = ph_pkey_ix;
431 search_pkey |= 0x8000;
432 }
433 423
434 if (ib_find_exact_cached_pkey(&ibdev->ib_dev, port, search_pkey, &pk)) 424 for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
435 pk = 0xFFFF; 425 if (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == unassigned_pkey_ix)
426 continue;
436 427
437 if (fm) 428 pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][i];
438 *partial_pk_ix = (pk & 0xFF);
439 else
440 *full_pk_ix = (pk & 0xFF);
441 429
442 *is_full_member = fm; 430 ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey);
443 return err; 431 if (ret)
432 continue;
433 if ((slot_pkey & 0x7FFF) == (pkey & 0x7FFF)) {
434 if (slot_pkey & 0x8000) {
435 *ix = (u16) pkey_ix;
436 return 0;
437 } else {
438 /* take first partial pkey index found */
439 if (partial_ix == 0xFF)
440 partial_ix = pkey_ix;
441 }
442 }
443 }
444
445 if (partial_ix < 0xFF) {
446 *ix = (u16) partial_ix;
447 return 0;
448 }
449
450 return -EINVAL;
444} 451}
445 452
446int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, 453int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
@@ -458,10 +465,8 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
458 unsigned tun_tx_ix = 0; 465 unsigned tun_tx_ix = 0;
459 int dqpn; 466 int dqpn;
460 int ret = 0; 467 int ret = 0;
461 int i;
462 int is_full_member = 0;
463 u16 tun_pkey_ix; 468 u16 tun_pkey_ix;
464 u8 ph_pkey_ix, full_pk_ix = 0, partial_pk_ix = 0; 469 u16 cached_pkey;
465 470
466 if (dest_qpt > IB_QPT_GSI) 471 if (dest_qpt > IB_QPT_GSI)
467 return -EINVAL; 472 return -EINVAL;
@@ -481,27 +486,17 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
481 else 486 else
482 tun_qp = &tun_ctx->qp[1]; 487 tun_qp = &tun_ctx->qp[1];
483 488
484 /* compute pkey index for slave */ 489 /* compute P_Key index to put in tunnel header for slave */
485 /* get physical pkey -- virtualized Dom0 pkey to phys*/
486 if (dest_qpt) { 490 if (dest_qpt) {
487 ph_pkey_ix = 491 u16 pkey_ix;
488 dev->pkeys.virt2phys_pkey[mlx4_master_func_num(dev->dev)][port - 1][wc->pkey_index]; 492 ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey);
489
490 /* now, translate this to the slave pkey index */
491 ret = get_pkey_phys_indices(dev, port, ph_pkey_ix, &full_pk_ix,
492 &partial_pk_ix, &is_full_member);
493 if (ret) 493 if (ret)
494 return -EINVAL; 494 return -EINVAL;
495 495
496 for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) { 496 ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix);
497 if ((dev->pkeys.virt2phys_pkey[slave][port - 1][i] == full_pk_ix) || 497 if (ret)
498 (is_full_member &&
499 (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == partial_pk_ix)))
500 break;
501 }
502 if (i == dev->dev->caps.pkey_table_len[port])
503 return -EINVAL; 498 return -EINVAL;
504 tun_pkey_ix = i; 499 tun_pkey_ix = pkey_ix;
505 } else 500 } else
506 tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0]; 501 tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
507 502
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 3c3b54c3fdd9..25b2cdff00f8 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -233,7 +233,8 @@ static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx,
233 233
234 ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr); 234 ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
235 235
236 wc.pkey_index = 0; 236 if (ib_find_cached_pkey(&dev->ib_dev, ctx->port, IB_DEFAULT_PKEY_FULL, &wc.pkey_index))
237 return -EINVAL;
237 wc.sl = 0; 238 wc.sl = 0;
238 wc.dlid_path_bits = 0; 239 wc.dlid_path_bits = 0;
239 wc.port_num = ctx->port; 240 wc.port_num = ctx->port;
@@ -1074,10 +1075,6 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
1074 unsigned long end; 1075 unsigned long end;
1075 int count; 1076 int count;
1076 1077
1077 if (ctx->flushing)
1078 return;
1079
1080 ctx->flushing = 1;
1081 for (i = 0; i < MAX_VFS; ++i) 1078 for (i = 0; i < MAX_VFS; ++i)
1082 clean_vf_mcast(ctx, i); 1079 clean_vf_mcast(ctx, i);
1083 1080
@@ -1107,9 +1104,6 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
1107 force_clean_group(group); 1104 force_clean_group(group);
1108 } 1105 }
1109 mutex_unlock(&ctx->mcg_table_lock); 1106 mutex_unlock(&ctx->mcg_table_lock);
1110
1111 if (!destroy_wq)
1112 ctx->flushing = 0;
1113} 1107}
1114 1108
1115struct clean_work { 1109struct clean_work {
@@ -1123,6 +1117,7 @@ static void mcg_clean_task(struct work_struct *work)
1123 struct clean_work *cw = container_of(work, struct clean_work, work); 1117 struct clean_work *cw = container_of(work, struct clean_work, work);
1124 1118
1125 _mlx4_ib_mcg_port_cleanup(cw->ctx, cw->destroy_wq); 1119 _mlx4_ib_mcg_port_cleanup(cw->ctx, cw->destroy_wq);
1120 cw->ctx->flushing = 0;
1126 kfree(cw); 1121 kfree(cw);
1127} 1122}
1128 1123
@@ -1130,13 +1125,20 @@ void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
1130{ 1125{
1131 struct clean_work *work; 1126 struct clean_work *work;
1132 1127
1128 if (ctx->flushing)
1129 return;
1130
1131 ctx->flushing = 1;
1132
1133 if (destroy_wq) { 1133 if (destroy_wq) {
1134 _mlx4_ib_mcg_port_cleanup(ctx, destroy_wq); 1134 _mlx4_ib_mcg_port_cleanup(ctx, destroy_wq);
1135 ctx->flushing = 0;
1135 return; 1136 return;
1136 } 1137 }
1137 1138
1138 work = kmalloc(sizeof *work, GFP_KERNEL); 1139 work = kmalloc(sizeof *work, GFP_KERNEL);
1139 if (!work) { 1140 if (!work) {
1141 ctx->flushing = 0;
1140 mcg_warn("failed allocating work for cleanup\n"); 1142 mcg_warn("failed allocating work for cleanup\n");
1141 return; 1143 return;
1142 } 1144 }
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 9e1449f8c6a2..cf23c46185b2 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -3564,16 +3564,6 @@ static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
3564 return srpt_get_cmd_state(ioctx); 3564 return srpt_get_cmd_state(ioctx);
3565} 3565}
3566 3566
3567static u16 srpt_set_fabric_sense_len(struct se_cmd *cmd, u32 sense_length)
3568{
3569 return 0;
3570}
3571
3572static u16 srpt_get_fabric_sense_len(void)
3573{
3574 return 0;
3575}
3576
3577/** 3567/**
3578 * srpt_parse_i_port_id() - Parse an initiator port ID. 3568 * srpt_parse_i_port_id() - Parse an initiator port ID.
3579 * @name: ASCII representation of a 128-bit initiator port ID. 3569 * @name: ASCII representation of a 128-bit initiator port ID.
@@ -3953,8 +3943,6 @@ static struct target_core_fabric_ops srpt_template = {
3953 .queue_data_in = srpt_queue_response, 3943 .queue_data_in = srpt_queue_response,
3954 .queue_status = srpt_queue_status, 3944 .queue_status = srpt_queue_status,
3955 .queue_tm_rsp = srpt_queue_response, 3945 .queue_tm_rsp = srpt_queue_response,
3956 .get_fabric_sense_len = srpt_get_fabric_sense_len,
3957 .set_fabric_sense_len = srpt_set_fabric_sense_len,
3958 /* 3946 /*
3959 * Setup function pointers for generic logic in 3947 * Setup function pointers for generic logic in
3960 * target_core_fabric_configfs.c 3948 * target_core_fabric_configfs.c
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 118d0300f1fb..f0f8928b3c8a 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -23,11 +23,11 @@
23#include <linux/input/mt.h> 23#include <linux/input/mt.h>
24#include <linux/major.h> 24#include <linux/major.h>
25#include <linux/device.h> 25#include <linux/device.h>
26#include <linux/cdev.h>
26#include "input-compat.h" 27#include "input-compat.h"
27 28
28struct evdev { 29struct evdev {
29 int open; 30 int open;
30 int minor;
31 struct input_handle handle; 31 struct input_handle handle;
32 wait_queue_head_t wait; 32 wait_queue_head_t wait;
33 struct evdev_client __rcu *grab; 33 struct evdev_client __rcu *grab;
@@ -35,6 +35,7 @@ struct evdev {
35 spinlock_t client_lock; /* protects client_list */ 35 spinlock_t client_lock; /* protects client_list */
36 struct mutex mutex; 36 struct mutex mutex;
37 struct device dev; 37 struct device dev;
38 struct cdev cdev;
38 bool exist; 39 bool exist;
39}; 40};
40 41
@@ -51,9 +52,6 @@ struct evdev_client {
51 struct input_event buffer[]; 52 struct input_event buffer[];
52}; 53};
53 54
54static struct evdev *evdev_table[EVDEV_MINORS];
55static DEFINE_MUTEX(evdev_table_mutex);
56
57static void __pass_event(struct evdev_client *client, 55static void __pass_event(struct evdev_client *client,
58 const struct input_event *event) 56 const struct input_event *event)
59{ 57{
@@ -294,7 +292,6 @@ static int evdev_release(struct inode *inode, struct file *file)
294 kfree(client); 292 kfree(client);
295 293
296 evdev_close_device(evdev); 294 evdev_close_device(evdev);
297 put_device(&evdev->dev);
298 295
299 return 0; 296 return 0;
300} 297}
@@ -310,35 +307,16 @@ static unsigned int evdev_compute_buffer_size(struct input_dev *dev)
310 307
311static int evdev_open(struct inode *inode, struct file *file) 308static int evdev_open(struct inode *inode, struct file *file)
312{ 309{
313 struct evdev *evdev; 310 struct evdev *evdev = container_of(inode->i_cdev, struct evdev, cdev);
311 unsigned int bufsize = evdev_compute_buffer_size(evdev->handle.dev);
314 struct evdev_client *client; 312 struct evdev_client *client;
315 int i = iminor(inode) - EVDEV_MINOR_BASE;
316 unsigned int bufsize;
317 int error; 313 int error;
318 314
319 if (i >= EVDEV_MINORS)
320 return -ENODEV;
321
322 error = mutex_lock_interruptible(&evdev_table_mutex);
323 if (error)
324 return error;
325 evdev = evdev_table[i];
326 if (evdev)
327 get_device(&evdev->dev);
328 mutex_unlock(&evdev_table_mutex);
329
330 if (!evdev)
331 return -ENODEV;
332
333 bufsize = evdev_compute_buffer_size(evdev->handle.dev);
334
335 client = kzalloc(sizeof(struct evdev_client) + 315 client = kzalloc(sizeof(struct evdev_client) +
336 bufsize * sizeof(struct input_event), 316 bufsize * sizeof(struct input_event),
337 GFP_KERNEL); 317 GFP_KERNEL);
338 if (!client) { 318 if (!client)
339 error = -ENOMEM; 319 return -ENOMEM;
340 goto err_put_evdev;
341 }
342 320
343 client->bufsize = bufsize; 321 client->bufsize = bufsize;
344 spin_lock_init(&client->buffer_lock); 322 spin_lock_init(&client->buffer_lock);
@@ -357,8 +335,6 @@ static int evdev_open(struct inode *inode, struct file *file)
357 err_free_client: 335 err_free_client:
358 evdev_detach_client(evdev, client); 336 evdev_detach_client(evdev, client);
359 kfree(client); 337 kfree(client);
360 err_put_evdev:
361 put_device(&evdev->dev);
362 return error; 338 return error;
363} 339}
364 340
@@ -942,26 +918,6 @@ static const struct file_operations evdev_fops = {
942 .llseek = no_llseek, 918 .llseek = no_llseek,
943}; 919};
944 920
945static int evdev_install_chrdev(struct evdev *evdev)
946{
947 /*
948 * No need to do any locking here as calls to connect and
949 * disconnect are serialized by the input core
950 */
951 evdev_table[evdev->minor] = evdev;
952 return 0;
953}
954
955static void evdev_remove_chrdev(struct evdev *evdev)
956{
957 /*
958 * Lock evdev table to prevent race with evdev_open()
959 */
960 mutex_lock(&evdev_table_mutex);
961 evdev_table[evdev->minor] = NULL;
962 mutex_unlock(&evdev_table_mutex);
963}
964
965/* 921/*
966 * Mark device non-existent. This disables writes, ioctls and 922 * Mark device non-existent. This disables writes, ioctls and
967 * prevents new users from opening the device. Already posted 923 * prevents new users from opening the device. Already posted
@@ -980,7 +936,8 @@ static void evdev_cleanup(struct evdev *evdev)
980 936
981 evdev_mark_dead(evdev); 937 evdev_mark_dead(evdev);
982 evdev_hangup(evdev); 938 evdev_hangup(evdev);
983 evdev_remove_chrdev(evdev); 939
940 cdev_del(&evdev->cdev);
984 941
985 /* evdev is marked dead so no one else accesses evdev->open */ 942 /* evdev is marked dead so no one else accesses evdev->open */
986 if (evdev->open) { 943 if (evdev->open) {
@@ -991,43 +948,47 @@ static void evdev_cleanup(struct evdev *evdev)
991 948
992/* 949/*
993 * Create new evdev device. Note that input core serializes calls 950 * Create new evdev device. Note that input core serializes calls
994 * to connect and disconnect so we don't need to lock evdev_table here. 951 * to connect and disconnect.
995 */ 952 */
996static int evdev_connect(struct input_handler *handler, struct input_dev *dev, 953static int evdev_connect(struct input_handler *handler, struct input_dev *dev,
997 const struct input_device_id *id) 954 const struct input_device_id *id)
998{ 955{
999 struct evdev *evdev; 956 struct evdev *evdev;
1000 int minor; 957 int minor;
958 int dev_no;
1001 int error; 959 int error;
1002 960
1003 for (minor = 0; minor < EVDEV_MINORS; minor++) 961 minor = input_get_new_minor(EVDEV_MINOR_BASE, EVDEV_MINORS, true);
1004 if (!evdev_table[minor]) 962 if (minor < 0) {
1005 break; 963 error = minor;
1006 964 pr_err("failed to reserve new minor: %d\n", error);
1007 if (minor == EVDEV_MINORS) { 965 return error;
1008 pr_err("no more free evdev devices\n");
1009 return -ENFILE;
1010 } 966 }
1011 967
1012 evdev = kzalloc(sizeof(struct evdev), GFP_KERNEL); 968 evdev = kzalloc(sizeof(struct evdev), GFP_KERNEL);
1013 if (!evdev) 969 if (!evdev) {
1014 return -ENOMEM; 970 error = -ENOMEM;
971 goto err_free_minor;
972 }
1015 973
1016 INIT_LIST_HEAD(&evdev->client_list); 974 INIT_LIST_HEAD(&evdev->client_list);
1017 spin_lock_init(&evdev->client_lock); 975 spin_lock_init(&evdev->client_lock);
1018 mutex_init(&evdev->mutex); 976 mutex_init(&evdev->mutex);
1019 init_waitqueue_head(&evdev->wait); 977 init_waitqueue_head(&evdev->wait);
1020
1021 dev_set_name(&evdev->dev, "event%d", minor);
1022 evdev->exist = true; 978 evdev->exist = true;
1023 evdev->minor = minor; 979
980 dev_no = minor;
981 /* Normalize device number if it falls into legacy range */
982 if (dev_no < EVDEV_MINOR_BASE + EVDEV_MINORS)
983 dev_no -= EVDEV_MINOR_BASE;
984 dev_set_name(&evdev->dev, "event%d", dev_no);
1024 985
1025 evdev->handle.dev = input_get_device(dev); 986 evdev->handle.dev = input_get_device(dev);
1026 evdev->handle.name = dev_name(&evdev->dev); 987 evdev->handle.name = dev_name(&evdev->dev);
1027 evdev->handle.handler = handler; 988 evdev->handle.handler = handler;
1028 evdev->handle.private = evdev; 989 evdev->handle.private = evdev;
1029 990
1030 evdev->dev.devt = MKDEV(INPUT_MAJOR, EVDEV_MINOR_BASE + minor); 991 evdev->dev.devt = MKDEV(INPUT_MAJOR, minor);
1031 evdev->dev.class = &input_class; 992 evdev->dev.class = &input_class;
1032 evdev->dev.parent = &dev->dev; 993 evdev->dev.parent = &dev->dev;
1033 evdev->dev.release = evdev_free; 994 evdev->dev.release = evdev_free;
@@ -1037,7 +998,9 @@ static int evdev_connect(struct input_handler *handler, struct input_dev *dev,
1037 if (error) 998 if (error)
1038 goto err_free_evdev; 999 goto err_free_evdev;
1039 1000
1040 error = evdev_install_chrdev(evdev); 1001 cdev_init(&evdev->cdev, &evdev_fops);
1002 evdev->cdev.kobj.parent = &evdev->dev.kobj;
1003 error = cdev_add(&evdev->cdev, evdev->dev.devt, 1);
1041 if (error) 1004 if (error)
1042 goto err_unregister_handle; 1005 goto err_unregister_handle;
1043 1006
@@ -1053,6 +1016,8 @@ static int evdev_connect(struct input_handler *handler, struct input_dev *dev,
1053 input_unregister_handle(&evdev->handle); 1016 input_unregister_handle(&evdev->handle);
1054 err_free_evdev: 1017 err_free_evdev:
1055 put_device(&evdev->dev); 1018 put_device(&evdev->dev);
1019 err_free_minor:
1020 input_free_minor(minor);
1056 return error; 1021 return error;
1057} 1022}
1058 1023
@@ -1062,6 +1027,7 @@ static void evdev_disconnect(struct input_handle *handle)
1062 1027
1063 device_del(&evdev->dev); 1028 device_del(&evdev->dev);
1064 evdev_cleanup(evdev); 1029 evdev_cleanup(evdev);
1030 input_free_minor(MINOR(evdev->dev.devt));
1065 input_unregister_handle(handle); 1031 input_unregister_handle(handle);
1066 put_device(&evdev->dev); 1032 put_device(&evdev->dev);
1067} 1033}
@@ -1078,7 +1044,7 @@ static struct input_handler evdev_handler = {
1078 .events = evdev_events, 1044 .events = evdev_events,
1079 .connect = evdev_connect, 1045 .connect = evdev_connect,
1080 .disconnect = evdev_disconnect, 1046 .disconnect = evdev_disconnect,
1081 .fops = &evdev_fops, 1047 .legacy_minors = true,
1082 .minor = EVDEV_MINOR_BASE, 1048 .minor = EVDEV_MINOR_BASE,
1083 .name = "evdev", 1049 .name = "evdev",
1084 .id_table = evdev_ids, 1050 .id_table = evdev_ids,
diff --git a/drivers/input/input.c b/drivers/input/input.c
index ace3f7c4226d..53a0ddee7872 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -14,6 +14,7 @@
14 14
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/idr.h>
17#include <linux/input/mt.h> 18#include <linux/input/mt.h>
18#include <linux/module.h> 19#include <linux/module.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
@@ -32,7 +33,9 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
32MODULE_DESCRIPTION("Input core"); 33MODULE_DESCRIPTION("Input core");
33MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
34 35
35#define INPUT_DEVICES 256 36#define INPUT_MAX_CHAR_DEVICES 1024
37#define INPUT_FIRST_DYNAMIC_DEV 256
38static DEFINE_IDA(input_ida);
36 39
37static LIST_HEAD(input_dev_list); 40static LIST_HEAD(input_dev_list);
38static LIST_HEAD(input_handler_list); 41static LIST_HEAD(input_handler_list);
@@ -45,8 +48,6 @@ static LIST_HEAD(input_handler_list);
45 */ 48 */
46static DEFINE_MUTEX(input_mutex); 49static DEFINE_MUTEX(input_mutex);
47 50
48static struct input_handler *input_table[8];
49
50static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 }; 51static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
51 52
52static inline int is_event_supported(unsigned int code, 53static inline int is_event_supported(unsigned int code,
@@ -1218,7 +1219,7 @@ static int input_handlers_seq_show(struct seq_file *seq, void *v)
1218 seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name); 1219 seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name);
1219 if (handler->filter) 1220 if (handler->filter)
1220 seq_puts(seq, " (filter)"); 1221 seq_puts(seq, " (filter)");
1221 if (handler->fops) 1222 if (handler->legacy_minors)
1222 seq_printf(seq, " Minor=%d", handler->minor); 1223 seq_printf(seq, " Minor=%d", handler->minor);
1223 seq_putc(seq, '\n'); 1224 seq_putc(seq, '\n');
1224 1225
@@ -2016,22 +2017,14 @@ EXPORT_SYMBOL(input_unregister_device);
2016int input_register_handler(struct input_handler *handler) 2017int input_register_handler(struct input_handler *handler)
2017{ 2018{
2018 struct input_dev *dev; 2019 struct input_dev *dev;
2019 int retval; 2020 int error;
2020 2021
2021 retval = mutex_lock_interruptible(&input_mutex); 2022 error = mutex_lock_interruptible(&input_mutex);
2022 if (retval) 2023 if (error)
2023 return retval; 2024 return error;
2024 2025
2025 INIT_LIST_HEAD(&handler->h_list); 2026 INIT_LIST_HEAD(&handler->h_list);
2026 2027
2027 if (handler->fops != NULL) {
2028 if (input_table[handler->minor >> 5]) {
2029 retval = -EBUSY;
2030 goto out;
2031 }
2032 input_table[handler->minor >> 5] = handler;
2033 }
2034
2035 list_add_tail(&handler->node, &input_handler_list); 2028 list_add_tail(&handler->node, &input_handler_list);
2036 2029
2037 list_for_each_entry(dev, &input_dev_list, node) 2030 list_for_each_entry(dev, &input_dev_list, node)
@@ -2039,9 +2032,8 @@ int input_register_handler(struct input_handler *handler)
2039 2032
2040 input_wakeup_procfs_readers(); 2033 input_wakeup_procfs_readers();
2041 2034
2042 out:
2043 mutex_unlock(&input_mutex); 2035 mutex_unlock(&input_mutex);
2044 return retval; 2036 return 0;
2045} 2037}
2046EXPORT_SYMBOL(input_register_handler); 2038EXPORT_SYMBOL(input_register_handler);
2047 2039
@@ -2064,9 +2056,6 @@ void input_unregister_handler(struct input_handler *handler)
2064 2056
2065 list_del_init(&handler->node); 2057 list_del_init(&handler->node);
2066 2058
2067 if (handler->fops != NULL)
2068 input_table[handler->minor >> 5] = NULL;
2069
2070 input_wakeup_procfs_readers(); 2059 input_wakeup_procfs_readers();
2071 2060
2072 mutex_unlock(&input_mutex); 2061 mutex_unlock(&input_mutex);
@@ -2183,51 +2172,52 @@ void input_unregister_handle(struct input_handle *handle)
2183} 2172}
2184EXPORT_SYMBOL(input_unregister_handle); 2173EXPORT_SYMBOL(input_unregister_handle);
2185 2174
2186static int input_open_file(struct inode *inode, struct file *file) 2175/**
2176 * input_get_new_minor - allocates a new input minor number
2177 * @legacy_base: beginning or the legacy range to be searched
2178 * @legacy_num: size of legacy range
2179 * @allow_dynamic: whether we can also take ID from the dynamic range
2180 *
2181 * This function allocates a new device minor for from input major namespace.
2182 * Caller can request legacy minor by specifying @legacy_base and @legacy_num
2183 * parameters and whether ID can be allocated from dynamic range if there are
2184 * no free IDs in legacy range.
2185 */
2186int input_get_new_minor(int legacy_base, unsigned int legacy_num,
2187 bool allow_dynamic)
2187{ 2188{
2188 struct input_handler *handler;
2189 const struct file_operations *old_fops, *new_fops = NULL;
2190 int err;
2191
2192 err = mutex_lock_interruptible(&input_mutex);
2193 if (err)
2194 return err;
2195
2196 /* No load-on-demand here? */
2197 handler = input_table[iminor(inode) >> 5];
2198 if (handler)
2199 new_fops = fops_get(handler->fops);
2200
2201 mutex_unlock(&input_mutex);
2202
2203 /* 2189 /*
2204 * That's _really_ odd. Usually NULL ->open means "nothing special", 2190 * This function should be called from input handler's ->connect()
2205 * not "no device". Oh, well... 2191 * methods, which are serialized with input_mutex, so no additional
2192 * locking is needed here.
2206 */ 2193 */
2207 if (!new_fops || !new_fops->open) { 2194 if (legacy_base >= 0) {
2208 fops_put(new_fops); 2195 int minor = ida_simple_get(&input_ida,
2209 err = -ENODEV; 2196 legacy_base,
2210 goto out; 2197 legacy_base + legacy_num,
2198 GFP_KERNEL);
2199 if (minor >= 0 || !allow_dynamic)
2200 return minor;
2211 } 2201 }
2212 2202
2213 old_fops = file->f_op; 2203 return ida_simple_get(&input_ida,
2214 file->f_op = new_fops; 2204 INPUT_FIRST_DYNAMIC_DEV, INPUT_MAX_CHAR_DEVICES,
2215 2205 GFP_KERNEL);
2216 err = new_fops->open(inode, file);
2217 if (err) {
2218 fops_put(file->f_op);
2219 file->f_op = fops_get(old_fops);
2220 }
2221 fops_put(old_fops);
2222out:
2223 return err;
2224} 2206}
2207EXPORT_SYMBOL(input_get_new_minor);
2225 2208
2226static const struct file_operations input_fops = { 2209/**
2227 .owner = THIS_MODULE, 2210 * input_free_minor - release previously allocated minor
2228 .open = input_open_file, 2211 * @minor: minor to be released
2229 .llseek = noop_llseek, 2212 *
2230}; 2213 * This function releases previously allocated input minor so that it can be
2214 * reused later.
2215 */
2216void input_free_minor(unsigned int minor)
2217{
2218 ida_simple_remove(&input_ida, minor);
2219}
2220EXPORT_SYMBOL(input_free_minor);
2231 2221
2232static int __init input_init(void) 2222static int __init input_init(void)
2233{ 2223{
@@ -2243,7 +2233,8 @@ static int __init input_init(void)
2243 if (err) 2233 if (err)
2244 goto fail1; 2234 goto fail1;
2245 2235
2246 err = register_chrdev(INPUT_MAJOR, "input", &input_fops); 2236 err = register_chrdev_region(MKDEV(INPUT_MAJOR, 0),
2237 INPUT_MAX_CHAR_DEVICES, "input");
2247 if (err) { 2238 if (err) {
2248 pr_err("unable to register char major %d", INPUT_MAJOR); 2239 pr_err("unable to register char major %d", INPUT_MAJOR);
2249 goto fail2; 2240 goto fail2;
@@ -2259,7 +2250,8 @@ static int __init input_init(void)
2259static void __exit input_exit(void) 2250static void __exit input_exit(void)
2260{ 2251{
2261 input_proc_exit(); 2252 input_proc_exit();
2262 unregister_chrdev(INPUT_MAJOR, "input"); 2253 unregister_chrdev_region(MKDEV(INPUT_MAJOR, 0),
2254 INPUT_MAX_CHAR_DEVICES);
2263 class_unregister(&input_class); 2255 class_unregister(&input_class);
2264} 2256}
2265 2257
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 78f323ea1e4b..f362883c94e3 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -27,6 +27,7 @@
27#include <linux/poll.h> 27#include <linux/poll.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/device.h> 29#include <linux/device.h>
30#include <linux/cdev.h>
30 31
31MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); 32MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
32MODULE_DESCRIPTION("Joystick device interfaces"); 33MODULE_DESCRIPTION("Joystick device interfaces");
@@ -39,13 +40,13 @@ MODULE_LICENSE("GPL");
39 40
40struct joydev { 41struct joydev {
41 int open; 42 int open;
42 int minor;
43 struct input_handle handle; 43 struct input_handle handle;
44 wait_queue_head_t wait; 44 wait_queue_head_t wait;
45 struct list_head client_list; 45 struct list_head client_list;
46 spinlock_t client_lock; /* protects client_list */ 46 spinlock_t client_lock; /* protects client_list */
47 struct mutex mutex; 47 struct mutex mutex;
48 struct device dev; 48 struct device dev;
49 struct cdev cdev;
49 bool exist; 50 bool exist;
50 51
51 struct js_corr corr[ABS_CNT]; 52 struct js_corr corr[ABS_CNT];
@@ -70,9 +71,6 @@ struct joydev_client {
70 struct list_head node; 71 struct list_head node;
71}; 72};
72 73
73static struct joydev *joydev_table[JOYDEV_MINORS];
74static DEFINE_MUTEX(joydev_table_mutex);
75
76static int joydev_correct(int value, struct js_corr *corr) 74static int joydev_correct(int value, struct js_corr *corr)
77{ 75{
78 switch (corr->type) { 76 switch (corr->type) {
@@ -245,37 +243,20 @@ static int joydev_release(struct inode *inode, struct file *file)
245 kfree(client); 243 kfree(client);
246 244
247 joydev_close_device(joydev); 245 joydev_close_device(joydev);
248 put_device(&joydev->dev);
249 246
250 return 0; 247 return 0;
251} 248}
252 249
253static int joydev_open(struct inode *inode, struct file *file) 250static int joydev_open(struct inode *inode, struct file *file)
254{ 251{
252 struct joydev *joydev =
253 container_of(inode->i_cdev, struct joydev, cdev);
255 struct joydev_client *client; 254 struct joydev_client *client;
256 struct joydev *joydev;
257 int i = iminor(inode) - JOYDEV_MINOR_BASE;
258 int error; 255 int error;
259 256
260 if (i >= JOYDEV_MINORS)
261 return -ENODEV;
262
263 error = mutex_lock_interruptible(&joydev_table_mutex);
264 if (error)
265 return error;
266 joydev = joydev_table[i];
267 if (joydev)
268 get_device(&joydev->dev);
269 mutex_unlock(&joydev_table_mutex);
270
271 if (!joydev)
272 return -ENODEV;
273
274 client = kzalloc(sizeof(struct joydev_client), GFP_KERNEL); 257 client = kzalloc(sizeof(struct joydev_client), GFP_KERNEL);
275 if (!client) { 258 if (!client)
276 error = -ENOMEM; 259 return -ENOMEM;
277 goto err_put_joydev;
278 }
279 260
280 spin_lock_init(&client->buffer_lock); 261 spin_lock_init(&client->buffer_lock);
281 client->joydev = joydev; 262 client->joydev = joydev;
@@ -293,8 +274,6 @@ static int joydev_open(struct inode *inode, struct file *file)
293 err_free_client: 274 err_free_client:
294 joydev_detach_client(joydev, client); 275 joydev_detach_client(joydev, client);
295 kfree(client); 276 kfree(client);
296 err_put_joydev:
297 put_device(&joydev->dev);
298 return error; 277 return error;
299} 278}
300 279
@@ -742,19 +721,6 @@ static const struct file_operations joydev_fops = {
742 .llseek = no_llseek, 721 .llseek = no_llseek,
743}; 722};
744 723
745static int joydev_install_chrdev(struct joydev *joydev)
746{
747 joydev_table[joydev->minor] = joydev;
748 return 0;
749}
750
751static void joydev_remove_chrdev(struct joydev *joydev)
752{
753 mutex_lock(&joydev_table_mutex);
754 joydev_table[joydev->minor] = NULL;
755 mutex_unlock(&joydev_table_mutex);
756}
757
758/* 724/*
759 * Mark device non-existent. This disables writes, ioctls and 725 * Mark device non-existent. This disables writes, ioctls and
760 * prevents new users from opening the device. Already posted 726 * prevents new users from opening the device. Already posted
@@ -773,7 +739,8 @@ static void joydev_cleanup(struct joydev *joydev)
773 739
774 joydev_mark_dead(joydev); 740 joydev_mark_dead(joydev);
775 joydev_hangup(joydev); 741 joydev_hangup(joydev);
776 joydev_remove_chrdev(joydev); 742
743 cdev_del(&joydev->cdev);
777 744
778 /* joydev is marked dead so no one else accesses joydev->open */ 745 /* joydev is marked dead so no one else accesses joydev->open */
779 if (joydev->open) 746 if (joydev->open)
@@ -798,30 +765,33 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
798 const struct input_device_id *id) 765 const struct input_device_id *id)
799{ 766{
800 struct joydev *joydev; 767 struct joydev *joydev;
801 int i, j, t, minor; 768 int i, j, t, minor, dev_no;
802 int error; 769 int error;
803 770
804 for (minor = 0; minor < JOYDEV_MINORS; minor++) 771 minor = input_get_new_minor(JOYDEV_MINOR_BASE, JOYDEV_MINORS, true);
805 if (!joydev_table[minor]) 772 if (minor < 0) {
806 break; 773 error = minor;
807 774 pr_err("failed to reserve new minor: %d\n", error);
808 if (minor == JOYDEV_MINORS) { 775 return error;
809 pr_err("no more free joydev devices\n");
810 return -ENFILE;
811 } 776 }
812 777
813 joydev = kzalloc(sizeof(struct joydev), GFP_KERNEL); 778 joydev = kzalloc(sizeof(struct joydev), GFP_KERNEL);
814 if (!joydev) 779 if (!joydev) {
815 return -ENOMEM; 780 error = -ENOMEM;
781 goto err_free_minor;
782 }
816 783
817 INIT_LIST_HEAD(&joydev->client_list); 784 INIT_LIST_HEAD(&joydev->client_list);
818 spin_lock_init(&joydev->client_lock); 785 spin_lock_init(&joydev->client_lock);
819 mutex_init(&joydev->mutex); 786 mutex_init(&joydev->mutex);
820 init_waitqueue_head(&joydev->wait); 787 init_waitqueue_head(&joydev->wait);
821
822 dev_set_name(&joydev->dev, "js%d", minor);
823 joydev->exist = true; 788 joydev->exist = true;
824 joydev->minor = minor; 789
790 dev_no = minor;
791 /* Normalize device number if it falls into legacy range */
792 if (dev_no < JOYDEV_MINOR_BASE + JOYDEV_MINORS)
793 dev_no -= JOYDEV_MINOR_BASE;
794 dev_set_name(&joydev->dev, "js%d", dev_no);
825 795
826 joydev->handle.dev = input_get_device(dev); 796 joydev->handle.dev = input_get_device(dev);
827 joydev->handle.name = dev_name(&joydev->dev); 797 joydev->handle.name = dev_name(&joydev->dev);
@@ -875,7 +845,7 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
875 } 845 }
876 } 846 }
877 847
878 joydev->dev.devt = MKDEV(INPUT_MAJOR, JOYDEV_MINOR_BASE + minor); 848 joydev->dev.devt = MKDEV(INPUT_MAJOR, minor);
879 joydev->dev.class = &input_class; 849 joydev->dev.class = &input_class;
880 joydev->dev.parent = &dev->dev; 850 joydev->dev.parent = &dev->dev;
881 joydev->dev.release = joydev_free; 851 joydev->dev.release = joydev_free;
@@ -885,7 +855,9 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
885 if (error) 855 if (error)
886 goto err_free_joydev; 856 goto err_free_joydev;
887 857
888 error = joydev_install_chrdev(joydev); 858 cdev_init(&joydev->cdev, &joydev_fops);
859 joydev->cdev.kobj.parent = &joydev->dev.kobj;
860 error = cdev_add(&joydev->cdev, joydev->dev.devt, 1);
889 if (error) 861 if (error)
890 goto err_unregister_handle; 862 goto err_unregister_handle;
891 863
@@ -901,6 +873,8 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
901 input_unregister_handle(&joydev->handle); 873 input_unregister_handle(&joydev->handle);
902 err_free_joydev: 874 err_free_joydev:
903 put_device(&joydev->dev); 875 put_device(&joydev->dev);
876 err_free_minor:
877 input_free_minor(minor);
904 return error; 878 return error;
905} 879}
906 880
@@ -910,6 +884,7 @@ static void joydev_disconnect(struct input_handle *handle)
910 884
911 device_del(&joydev->dev); 885 device_del(&joydev->dev);
912 joydev_cleanup(joydev); 886 joydev_cleanup(joydev);
887 input_free_minor(MINOR(joydev->dev.devt));
913 input_unregister_handle(handle); 888 input_unregister_handle(handle);
914 put_device(&joydev->dev); 889 put_device(&joydev->dev);
915} 890}
@@ -961,7 +936,7 @@ static struct input_handler joydev_handler = {
961 .match = joydev_match, 936 .match = joydev_match,
962 .connect = joydev_connect, 937 .connect = joydev_connect,
963 .disconnect = joydev_disconnect, 938 .disconnect = joydev_disconnect,
964 .fops = &joydev_fops, 939 .legacy_minors = true,
965 .minor = JOYDEV_MINOR_BASE, 940 .minor = JOYDEV_MINOR_BASE,
966 .name = "joydev", 941 .name = "joydev",
967 .id_table = joydev_ids, 942 .id_table = joydev_ids,
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index b4b65af8612a..de0874054e9f 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -335,6 +335,7 @@ config KEYBOARD_LOCOMO
335config KEYBOARD_LPC32XX 335config KEYBOARD_LPC32XX
336 tristate "LPC32XX matrix key scanner support" 336 tristate "LPC32XX matrix key scanner support"
337 depends on ARCH_LPC32XX && OF 337 depends on ARCH_LPC32XX && OF
338 select INPUT_MATRIXKMAP
338 help 339 help
339 Say Y here if you want to use NXP LPC32XX SoC key scanner interface, 340 Say Y here if you want to use NXP LPC32XX SoC key scanner interface,
340 connected to a key matrix. 341 connected to a key matrix.
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 803ff6fe021e..cad9d5dd5973 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -368,6 +368,9 @@ static void pxa27x_keypad_config(struct pxa27x_keypad *keypad)
368 unsigned int mask = 0, direct_key_num = 0; 368 unsigned int mask = 0, direct_key_num = 0;
369 unsigned long kpc = 0; 369 unsigned long kpc = 0;
370 370
371 /* clear pending interrupt bit */
372 keypad_readl(KPC);
373
371 /* enable matrix keys with automatic scan */ 374 /* enable matrix keys with automatic scan */
372 if (pdata->matrix_key_rows && pdata->matrix_key_cols) { 375 if (pdata->matrix_key_rows && pdata->matrix_key_cols) {
373 kpc |= KPC_ASACT | KPC_MIE | KPC_ME | KPC_MS_ALL; 376 kpc |= KPC_ASACT | KPC_MIE | KPC_ME | KPC_MS_ALL;
diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c
index 277e26dc910e..9d7a111486f7 100644
--- a/drivers/input/keyboard/samsung-keypad.c
+++ b/drivers/input/keyboard/samsung-keypad.c
@@ -431,6 +431,12 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
431 goto err_unmap_base; 431 goto err_unmap_base;
432 } 432 }
433 433
434 error = clk_prepare(keypad->clk);
435 if (error) {
436 dev_err(&pdev->dev, "keypad clock prepare failed\n");
437 goto err_put_clk;
438 }
439
434 keypad->input_dev = input_dev; 440 keypad->input_dev = input_dev;
435 keypad->pdev = pdev; 441 keypad->pdev = pdev;
436 keypad->row_shift = row_shift; 442 keypad->row_shift = row_shift;
@@ -461,7 +467,7 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
461 keypad->keycodes, input_dev); 467 keypad->keycodes, input_dev);
462 if (error) { 468 if (error) {
463 dev_err(&pdev->dev, "failed to build keymap\n"); 469 dev_err(&pdev->dev, "failed to build keymap\n");
464 goto err_put_clk; 470 goto err_unprepare_clk;
465 } 471 }
466 472
467 input_set_capability(input_dev, EV_MSC, MSC_SCAN); 473 input_set_capability(input_dev, EV_MSC, MSC_SCAN);
@@ -503,6 +509,8 @@ err_free_irq:
503 pm_runtime_disable(&pdev->dev); 509 pm_runtime_disable(&pdev->dev);
504 device_init_wakeup(&pdev->dev, 0); 510 device_init_wakeup(&pdev->dev, 0);
505 platform_set_drvdata(pdev, NULL); 511 platform_set_drvdata(pdev, NULL);
512err_unprepare_clk:
513 clk_unprepare(keypad->clk);
506err_put_clk: 514err_put_clk:
507 clk_put(keypad->clk); 515 clk_put(keypad->clk);
508 samsung_keypad_dt_gpio_free(keypad); 516 samsung_keypad_dt_gpio_free(keypad);
@@ -531,6 +539,7 @@ static int __devexit samsung_keypad_remove(struct platform_device *pdev)
531 */ 539 */
532 free_irq(keypad->irq, keypad); 540 free_irq(keypad->irq, keypad);
533 541
542 clk_unprepare(keypad->clk);
534 clk_put(keypad->clk); 543 clk_put(keypad->clk);
535 samsung_keypad_dt_gpio_free(keypad); 544 samsung_keypad_dt_gpio_free(keypad);
536 545
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 02ca8680ea5b..6f7d99013031 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -311,7 +311,6 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
311 case XenbusStateReconfiguring: 311 case XenbusStateReconfiguring:
312 case XenbusStateReconfigured: 312 case XenbusStateReconfigured:
313 case XenbusStateUnknown: 313 case XenbusStateUnknown:
314 case XenbusStateClosed:
315 break; 314 break;
316 315
317 case XenbusStateInitWait: 316 case XenbusStateInitWait:
@@ -350,6 +349,10 @@ InitWait:
350 349
351 break; 350 break;
352 351
352 case XenbusStateClosed:
353 if (dev->state == XenbusStateClosed)
354 break;
355 /* Missed the backend's CLOSING state -- fallthrough */
353 case XenbusStateClosing: 356 case XenbusStateClosing:
354 xenbus_frontend_closed(dev); 357 xenbus_frontend_closed(dev);
355 break; 358 break;
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 3a78f235fa3e..2baff1b79a55 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -84,6 +84,10 @@
84#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262 84#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262
85#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263 85#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263
86#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264 86#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264
87/* MacbookPro10,2 (unibody, October 2012) */
88#define USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI 0x0259
89#define USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO 0x025a
90#define USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS 0x025b
87 91
88#define BCM5974_DEVICE(prod) { \ 92#define BCM5974_DEVICE(prod) { \
89 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ 93 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
@@ -137,6 +141,10 @@ static const struct usb_device_id bcm5974_table[] = {
137 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI), 141 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
138 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ISO), 142 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
139 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_JIS), 143 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
144 /* MacbookPro10,2 */
145 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI),
146 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO),
147 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS),
140 /* Terminating entry */ 148 /* Terminating entry */
141 {} 149 {}
142}; 150};
@@ -379,6 +387,19 @@ static const struct bcm5974_config bcm5974_config_table[] = {
379 { SN_COORD, -150, 6730 }, 387 { SN_COORD, -150, 6730 },
380 { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } 388 { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
381 }, 389 },
390 {
391 USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI,
392 USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO,
393 USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS,
394 HAS_INTEGRATED_BUTTON,
395 0x84, sizeof(struct bt_data),
396 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
397 { SN_PRESSURE, 0, 300 },
398 { SN_WIDTH, 0, 2048 },
399 { SN_COORD, -4750, 5280 },
400 { SN_COORD, -150, 6730 },
401 { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
402 },
382 {} 403 {}
383}; 404};
384 405
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 964e43d81e29..8f02e3d0e712 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -24,10 +24,8 @@
24#include <linux/random.h> 24#include <linux/random.h>
25#include <linux/major.h> 25#include <linux/major.h>
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/cdev.h>
27#include <linux/kernel.h> 28#include <linux/kernel.h>
28#ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
29#include <linux/miscdevice.h>
30#endif
31 29
32MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); 30MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
33MODULE_DESCRIPTION("Mouse (ExplorerPS/2) device interfaces"); 31MODULE_DESCRIPTION("Mouse (ExplorerPS/2) device interfaces");
@@ -61,17 +59,18 @@ struct mousedev_hw_data {
61 59
62struct mousedev { 60struct mousedev {
63 int open; 61 int open;
64 int minor;
65 struct input_handle handle; 62 struct input_handle handle;
66 wait_queue_head_t wait; 63 wait_queue_head_t wait;
67 struct list_head client_list; 64 struct list_head client_list;
68 spinlock_t client_lock; /* protects client_list */ 65 spinlock_t client_lock; /* protects client_list */
69 struct mutex mutex; 66 struct mutex mutex;
70 struct device dev; 67 struct device dev;
68 struct cdev cdev;
71 bool exist; 69 bool exist;
70 bool is_mixdev;
72 71
73 struct list_head mixdev_node; 72 struct list_head mixdev_node;
74 int mixdev_open; 73 bool opened_by_mixdev;
75 74
76 struct mousedev_hw_data packet; 75 struct mousedev_hw_data packet;
77 unsigned int pkt_count; 76 unsigned int pkt_count;
@@ -114,10 +113,6 @@ struct mousedev_client {
114static unsigned char mousedev_imps_seq[] = { 0xf3, 200, 0xf3, 100, 0xf3, 80 }; 113static unsigned char mousedev_imps_seq[] = { 0xf3, 200, 0xf3, 100, 0xf3, 80 };
115static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 }; 114static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 };
116 115
117static struct input_handler mousedev_handler;
118
119static struct mousedev *mousedev_table[MOUSEDEV_MINORS];
120static DEFINE_MUTEX(mousedev_table_mutex);
121static struct mousedev *mousedev_mix; 116static struct mousedev *mousedev_mix;
122static LIST_HEAD(mousedev_mix_list); 117static LIST_HEAD(mousedev_mix_list);
123 118
@@ -433,7 +428,7 @@ static int mousedev_open_device(struct mousedev *mousedev)
433 if (retval) 428 if (retval)
434 return retval; 429 return retval;
435 430
436 if (mousedev->minor == MOUSEDEV_MIX) 431 if (mousedev->is_mixdev)
437 mixdev_open_devices(); 432 mixdev_open_devices();
438 else if (!mousedev->exist) 433 else if (!mousedev->exist)
439 retval = -ENODEV; 434 retval = -ENODEV;
@@ -451,7 +446,7 @@ static void mousedev_close_device(struct mousedev *mousedev)
451{ 446{
452 mutex_lock(&mousedev->mutex); 447 mutex_lock(&mousedev->mutex);
453 448
454 if (mousedev->minor == MOUSEDEV_MIX) 449 if (mousedev->is_mixdev)
455 mixdev_close_devices(); 450 mixdev_close_devices();
456 else if (mousedev->exist && !--mousedev->open) 451 else if (mousedev->exist && !--mousedev->open)
457 input_close_device(&mousedev->handle); 452 input_close_device(&mousedev->handle);
@@ -472,11 +467,11 @@ static void mixdev_open_devices(void)
472 return; 467 return;
473 468
474 list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { 469 list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
475 if (!mousedev->mixdev_open) { 470 if (!mousedev->opened_by_mixdev) {
476 if (mousedev_open_device(mousedev)) 471 if (mousedev_open_device(mousedev))
477 continue; 472 continue;
478 473
479 mousedev->mixdev_open = 1; 474 mousedev->opened_by_mixdev = true;
480 } 475 }
481 } 476 }
482} 477}
@@ -494,8 +489,8 @@ static void mixdev_close_devices(void)
494 return; 489 return;
495 490
496 list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { 491 list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
497 if (mousedev->mixdev_open) { 492 if (mousedev->opened_by_mixdev) {
498 mousedev->mixdev_open = 0; 493 mousedev->opened_by_mixdev = false;
499 mousedev_close_device(mousedev); 494 mousedev_close_device(mousedev);
500 } 495 }
501 } 496 }
@@ -528,7 +523,6 @@ static int mousedev_release(struct inode *inode, struct file *file)
528 kfree(client); 523 kfree(client);
529 524
530 mousedev_close_device(mousedev); 525 mousedev_close_device(mousedev);
531 put_device(&mousedev->dev);
532 526
533 return 0; 527 return 0;
534} 528}
@@ -538,35 +532,17 @@ static int mousedev_open(struct inode *inode, struct file *file)
538 struct mousedev_client *client; 532 struct mousedev_client *client;
539 struct mousedev *mousedev; 533 struct mousedev *mousedev;
540 int error; 534 int error;
541 int i;
542 535
543#ifdef CONFIG_INPUT_MOUSEDEV_PSAUX 536#ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
544 if (imajor(inode) == MISC_MAJOR) 537 if (imajor(inode) == MISC_MAJOR)
545 i = MOUSEDEV_MIX; 538 mousedev = mousedev_mix;
546 else 539 else
547#endif 540#endif
548 i = iminor(inode) - MOUSEDEV_MINOR_BASE; 541 mousedev = container_of(inode->i_cdev, struct mousedev, cdev);
549
550 if (i >= MOUSEDEV_MINORS)
551 return -ENODEV;
552
553 error = mutex_lock_interruptible(&mousedev_table_mutex);
554 if (error)
555 return error;
556
557 mousedev = mousedev_table[i];
558 if (mousedev)
559 get_device(&mousedev->dev);
560 mutex_unlock(&mousedev_table_mutex);
561
562 if (!mousedev)
563 return -ENODEV;
564 542
565 client = kzalloc(sizeof(struct mousedev_client), GFP_KERNEL); 543 client = kzalloc(sizeof(struct mousedev_client), GFP_KERNEL);
566 if (!client) { 544 if (!client)
567 error = -ENOMEM; 545 return -ENOMEM;
568 goto err_put_mousedev;
569 }
570 546
571 spin_lock_init(&client->packet_lock); 547 spin_lock_init(&client->packet_lock);
572 client->pos_x = xres / 2; 548 client->pos_x = xres / 2;
@@ -579,13 +555,13 @@ static int mousedev_open(struct inode *inode, struct file *file)
579 goto err_free_client; 555 goto err_free_client;
580 556
581 file->private_data = client; 557 file->private_data = client;
558 nonseekable_open(inode, file);
559
582 return 0; 560 return 0;
583 561
584 err_free_client: 562 err_free_client:
585 mousedev_detach_client(mousedev, client); 563 mousedev_detach_client(mousedev, client);
586 kfree(client); 564 kfree(client);
587 err_put_mousedev:
588 put_device(&mousedev->dev);
589 return error; 565 return error;
590} 566}
591 567
@@ -785,29 +761,16 @@ static unsigned int mousedev_poll(struct file *file, poll_table *wait)
785} 761}
786 762
787static const struct file_operations mousedev_fops = { 763static const struct file_operations mousedev_fops = {
788 .owner = THIS_MODULE, 764 .owner = THIS_MODULE,
789 .read = mousedev_read, 765 .read = mousedev_read,
790 .write = mousedev_write, 766 .write = mousedev_write,
791 .poll = mousedev_poll, 767 .poll = mousedev_poll,
792 .open = mousedev_open, 768 .open = mousedev_open,
793 .release = mousedev_release, 769 .release = mousedev_release,
794 .fasync = mousedev_fasync, 770 .fasync = mousedev_fasync,
795 .llseek = noop_llseek, 771 .llseek = noop_llseek,
796}; 772};
797 773
798static int mousedev_install_chrdev(struct mousedev *mousedev)
799{
800 mousedev_table[mousedev->minor] = mousedev;
801 return 0;
802}
803
804static void mousedev_remove_chrdev(struct mousedev *mousedev)
805{
806 mutex_lock(&mousedev_table_mutex);
807 mousedev_table[mousedev->minor] = NULL;
808 mutex_unlock(&mousedev_table_mutex);
809}
810
811/* 774/*
812 * Mark device non-existent. This disables writes, ioctls and 775 * Mark device non-existent. This disables writes, ioctls and
813 * prevents new users from opening the device. Already posted 776 * prevents new users from opening the device. Already posted
@@ -842,24 +805,50 @@ static void mousedev_cleanup(struct mousedev *mousedev)
842 805
843 mousedev_mark_dead(mousedev); 806 mousedev_mark_dead(mousedev);
844 mousedev_hangup(mousedev); 807 mousedev_hangup(mousedev);
845 mousedev_remove_chrdev(mousedev); 808
809 cdev_del(&mousedev->cdev);
846 810
847 /* mousedev is marked dead so no one else accesses mousedev->open */ 811 /* mousedev is marked dead so no one else accesses mousedev->open */
848 if (mousedev->open) 812 if (mousedev->open)
849 input_close_device(handle); 813 input_close_device(handle);
850} 814}
851 815
816static int mousedev_reserve_minor(bool mixdev)
817{
818 int minor;
819
820 if (mixdev) {
821 minor = input_get_new_minor(MOUSEDEV_MIX, 1, false);
822 if (minor < 0)
823 pr_err("failed to reserve mixdev minor: %d\n", minor);
824 } else {
825 minor = input_get_new_minor(MOUSEDEV_MINOR_BASE,
826 MOUSEDEV_MINORS, true);
827 if (minor < 0)
828 pr_err("failed to reserve new minor: %d\n", minor);
829 }
830
831 return minor;
832}
833
852static struct mousedev *mousedev_create(struct input_dev *dev, 834static struct mousedev *mousedev_create(struct input_dev *dev,
853 struct input_handler *handler, 835 struct input_handler *handler,
854 int minor) 836 bool mixdev)
855{ 837{
856 struct mousedev *mousedev; 838 struct mousedev *mousedev;
839 int minor;
857 int error; 840 int error;
858 841
842 minor = mousedev_reserve_minor(mixdev);
843 if (minor < 0) {
844 error = minor;
845 goto err_out;
846 }
847
859 mousedev = kzalloc(sizeof(struct mousedev), GFP_KERNEL); 848 mousedev = kzalloc(sizeof(struct mousedev), GFP_KERNEL);
860 if (!mousedev) { 849 if (!mousedev) {
861 error = -ENOMEM; 850 error = -ENOMEM;
862 goto err_out; 851 goto err_free_minor;
863 } 852 }
864 853
865 INIT_LIST_HEAD(&mousedev->client_list); 854 INIT_LIST_HEAD(&mousedev->client_list);
@@ -867,16 +856,21 @@ static struct mousedev *mousedev_create(struct input_dev *dev,
867 spin_lock_init(&mousedev->client_lock); 856 spin_lock_init(&mousedev->client_lock);
868 mutex_init(&mousedev->mutex); 857 mutex_init(&mousedev->mutex);
869 lockdep_set_subclass(&mousedev->mutex, 858 lockdep_set_subclass(&mousedev->mutex,
870 minor == MOUSEDEV_MIX ? SINGLE_DEPTH_NESTING : 0); 859 mixdev ? SINGLE_DEPTH_NESTING : 0);
871 init_waitqueue_head(&mousedev->wait); 860 init_waitqueue_head(&mousedev->wait);
872 861
873 if (minor == MOUSEDEV_MIX) 862 if (mixdev) {
874 dev_set_name(&mousedev->dev, "mice"); 863 dev_set_name(&mousedev->dev, "mice");
875 else 864 } else {
876 dev_set_name(&mousedev->dev, "mouse%d", minor); 865 int dev_no = minor;
866 /* Normalize device number if it falls into legacy range */
867 if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS)
868 dev_no -= MOUSEDEV_MINOR_BASE;
869 dev_set_name(&mousedev->dev, "mouse%d", dev_no);
870 }
877 871
878 mousedev->minor = minor;
879 mousedev->exist = true; 872 mousedev->exist = true;
873 mousedev->is_mixdev = mixdev;
880 mousedev->handle.dev = input_get_device(dev); 874 mousedev->handle.dev = input_get_device(dev);
881 mousedev->handle.name = dev_name(&mousedev->dev); 875 mousedev->handle.name = dev_name(&mousedev->dev);
882 mousedev->handle.handler = handler; 876 mousedev->handle.handler = handler;
@@ -885,17 +879,19 @@ static struct mousedev *mousedev_create(struct input_dev *dev,
885 mousedev->dev.class = &input_class; 879 mousedev->dev.class = &input_class;
886 if (dev) 880 if (dev)
887 mousedev->dev.parent = &dev->dev; 881 mousedev->dev.parent = &dev->dev;
888 mousedev->dev.devt = MKDEV(INPUT_MAJOR, MOUSEDEV_MINOR_BASE + minor); 882 mousedev->dev.devt = MKDEV(INPUT_MAJOR, minor);
889 mousedev->dev.release = mousedev_free; 883 mousedev->dev.release = mousedev_free;
890 device_initialize(&mousedev->dev); 884 device_initialize(&mousedev->dev);
891 885
892 if (minor != MOUSEDEV_MIX) { 886 if (!mixdev) {
893 error = input_register_handle(&mousedev->handle); 887 error = input_register_handle(&mousedev->handle);
894 if (error) 888 if (error)
895 goto err_free_mousedev; 889 goto err_free_mousedev;
896 } 890 }
897 891
898 error = mousedev_install_chrdev(mousedev); 892 cdev_init(&mousedev->cdev, &mousedev_fops);
893 mousedev->cdev.kobj.parent = &mousedev->dev.kobj;
894 error = cdev_add(&mousedev->cdev, mousedev->dev.devt, 1);
899 if (error) 895 if (error)
900 goto err_unregister_handle; 896 goto err_unregister_handle;
901 897
@@ -908,10 +904,12 @@ static struct mousedev *mousedev_create(struct input_dev *dev,
908 err_cleanup_mousedev: 904 err_cleanup_mousedev:
909 mousedev_cleanup(mousedev); 905 mousedev_cleanup(mousedev);
910 err_unregister_handle: 906 err_unregister_handle:
911 if (minor != MOUSEDEV_MIX) 907 if (!mixdev)
912 input_unregister_handle(&mousedev->handle); 908 input_unregister_handle(&mousedev->handle);
913 err_free_mousedev: 909 err_free_mousedev:
914 put_device(&mousedev->dev); 910 put_device(&mousedev->dev);
911 err_free_minor:
912 input_free_minor(minor);
915 err_out: 913 err_out:
916 return ERR_PTR(error); 914 return ERR_PTR(error);
917} 915}
@@ -920,7 +918,8 @@ static void mousedev_destroy(struct mousedev *mousedev)
920{ 918{
921 device_del(&mousedev->dev); 919 device_del(&mousedev->dev);
922 mousedev_cleanup(mousedev); 920 mousedev_cleanup(mousedev);
923 if (mousedev->minor != MOUSEDEV_MIX) 921 input_free_minor(MINOR(mousedev->dev.devt));
922 if (!mousedev->is_mixdev)
924 input_unregister_handle(&mousedev->handle); 923 input_unregister_handle(&mousedev->handle);
925 put_device(&mousedev->dev); 924 put_device(&mousedev->dev);
926} 925}
@@ -938,7 +937,7 @@ static int mixdev_add_device(struct mousedev *mousedev)
938 if (retval) 937 if (retval)
939 goto out; 938 goto out;
940 939
941 mousedev->mixdev_open = 1; 940 mousedev->opened_by_mixdev = true;
942 } 941 }
943 942
944 get_device(&mousedev->dev); 943 get_device(&mousedev->dev);
@@ -953,8 +952,8 @@ static void mixdev_remove_device(struct mousedev *mousedev)
953{ 952{
954 mutex_lock(&mousedev_mix->mutex); 953 mutex_lock(&mousedev_mix->mutex);
955 954
956 if (mousedev->mixdev_open) { 955 if (mousedev->opened_by_mixdev) {
957 mousedev->mixdev_open = 0; 956 mousedev->opened_by_mixdev = false;
958 mousedev_close_device(mousedev); 957 mousedev_close_device(mousedev);
959 } 958 }
960 959
@@ -969,19 +968,9 @@ static int mousedev_connect(struct input_handler *handler,
969 const struct input_device_id *id) 968 const struct input_device_id *id)
970{ 969{
971 struct mousedev *mousedev; 970 struct mousedev *mousedev;
972 int minor;
973 int error; 971 int error;
974 972
975 for (minor = 0; minor < MOUSEDEV_MINORS; minor++) 973 mousedev = mousedev_create(dev, handler, false);
976 if (!mousedev_table[minor])
977 break;
978
979 if (minor == MOUSEDEV_MINORS) {
980 pr_err("no more free mousedev devices\n");
981 return -ENFILE;
982 }
983
984 mousedev = mousedev_create(dev, handler, minor);
985 if (IS_ERR(mousedev)) 974 if (IS_ERR(mousedev))
986 return PTR_ERR(mousedev); 975 return PTR_ERR(mousedev);
987 976
@@ -1054,27 +1043,53 @@ static const struct input_device_id mousedev_ids[] = {
1054MODULE_DEVICE_TABLE(input, mousedev_ids); 1043MODULE_DEVICE_TABLE(input, mousedev_ids);
1055 1044
1056static struct input_handler mousedev_handler = { 1045static struct input_handler mousedev_handler = {
1057 .event = mousedev_event, 1046 .event = mousedev_event,
1058 .connect = mousedev_connect, 1047 .connect = mousedev_connect,
1059 .disconnect = mousedev_disconnect, 1048 .disconnect = mousedev_disconnect,
1060 .fops = &mousedev_fops, 1049 .legacy_minors = true,
1061 .minor = MOUSEDEV_MINOR_BASE, 1050 .minor = MOUSEDEV_MINOR_BASE,
1062 .name = "mousedev", 1051 .name = "mousedev",
1063 .id_table = mousedev_ids, 1052 .id_table = mousedev_ids,
1064}; 1053};
1065 1054
1066#ifdef CONFIG_INPUT_MOUSEDEV_PSAUX 1055#ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
1056#include <linux/miscdevice.h>
1057
1067static struct miscdevice psaux_mouse = { 1058static struct miscdevice psaux_mouse = {
1068 PSMOUSE_MINOR, "psaux", &mousedev_fops 1059 .minor = PSMOUSE_MINOR,
1060 .name = "psaux",
1061 .fops = &mousedev_fops,
1069}; 1062};
1070static int psaux_registered; 1063
1064static bool psaux_registered;
1065
1066static void __init mousedev_psaux_register(void)
1067{
1068 int error;
1069
1070 error = misc_register(&psaux_mouse);
1071 if (error)
1072 pr_warn("could not register psaux device, error: %d\n",
1073 error);
1074 else
1075 psaux_registered = true;
1076}
1077
1078static void __exit mousedev_psaux_unregister(void)
1079{
1080 if (psaux_registered)
1081 misc_deregister(&psaux_mouse);
1082}
1083#else
1084static inline void mousedev_psaux_register(void) { }
1085static inline void mousedev_psaux_unregister(void) { }
1071#endif 1086#endif
1072 1087
1073static int __init mousedev_init(void) 1088static int __init mousedev_init(void)
1074{ 1089{
1075 int error; 1090 int error;
1076 1091
1077 mousedev_mix = mousedev_create(NULL, &mousedev_handler, MOUSEDEV_MIX); 1092 mousedev_mix = mousedev_create(NULL, &mousedev_handler, true);
1078 if (IS_ERR(mousedev_mix)) 1093 if (IS_ERR(mousedev_mix))
1079 return PTR_ERR(mousedev_mix); 1094 return PTR_ERR(mousedev_mix);
1080 1095
@@ -1084,14 +1099,7 @@ static int __init mousedev_init(void)
1084 return error; 1099 return error;
1085 } 1100 }
1086 1101
1087#ifdef CONFIG_INPUT_MOUSEDEV_PSAUX 1102 mousedev_psaux_register();
1088 error = misc_register(&psaux_mouse);
1089 if (error)
1090 pr_warn("could not register psaux device, error: %d\n",
1091 error);
1092 else
1093 psaux_registered = 1;
1094#endif
1095 1103
1096 pr_info("PS/2 mouse device common for all mice\n"); 1104 pr_info("PS/2 mouse device common for all mice\n");
1097 1105
@@ -1100,10 +1108,7 @@ static int __init mousedev_init(void)
1100 1108
1101static void __exit mousedev_exit(void) 1109static void __exit mousedev_exit(void)
1102{ 1110{
1103#ifdef CONFIG_INPUT_MOUSEDEV_PSAUX 1111 mousedev_psaux_unregister();
1104 if (psaux_registered)
1105 misc_deregister(&psaux_mouse);
1106#endif
1107 input_unregister_handler(&mousedev_handler); 1112 input_unregister_handler(&mousedev_handler);
1108 mousedev_destroy(mousedev_mix); 1113 mousedev_destroy(mousedev_mix);
1109} 1114}
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 0d3219f29744..858ad446de91 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -172,6 +172,76 @@ static void wacom_close(struct input_dev *dev)
172} 172}
173 173
174/* 174/*
175 * Calculate the resolution of the X or Y axis, given appropriate HID data.
176 * This function is little more than hidinput_calc_abs_res stripped down.
177 */
178static int wacom_calc_hid_res(int logical_extents, int physical_extents,
179 unsigned char unit, unsigned char exponent)
180{
181 int prev, unit_exponent;
182
183 /* Check if the extents are sane */
184 if (logical_extents <= 0 || physical_extents <= 0)
185 return 0;
186
187 /* Get signed value of nybble-sized twos-compliment exponent */
188 unit_exponent = exponent;
189 if (unit_exponent > 7)
190 unit_exponent -= 16;
191
192 /* Convert physical_extents to millimeters */
193 if (unit == 0x11) { /* If centimeters */
194 unit_exponent += 1;
195 } else if (unit == 0x13) { /* If inches */
196 prev = physical_extents;
197 physical_extents *= 254;
198 if (physical_extents < prev)
199 return 0;
200 unit_exponent -= 1;
201 } else {
202 return 0;
203 }
204
205 /* Apply negative unit exponent */
206 for (; unit_exponent < 0; unit_exponent++) {
207 prev = logical_extents;
208 logical_extents *= 10;
209 if (logical_extents < prev)
210 return 0;
211 }
212 /* Apply positive unit exponent */
213 for (; unit_exponent > 0; unit_exponent--) {
214 prev = physical_extents;
215 physical_extents *= 10;
216 if (physical_extents < prev)
217 return 0;
218 }
219
220 /* Calculate resolution */
221 return logical_extents / physical_extents;
222}
223
224/*
225 * The physical dimension specified by the HID descriptor is likely not in
226 * the "100th of a mm" units expected by wacom_calculate_touch_res. This
227 * function adjusts the value of [xy]_phy based on the unit and exponent
228 * provided by the HID descriptor. If an error occurs durring conversion
229 * (e.g. from the unit being left unspecified) [xy]_phy is not modified.
230 */
231static void wacom_fix_phy_from_hid(struct wacom_features *features)
232{
233 int xres = wacom_calc_hid_res(features->x_max, features->x_phy,
234 features->unit, features->unitExpo);
235 int yres = wacom_calc_hid_res(features->y_max, features->y_phy,
236 features->unit, features->unitExpo);
237
238 if (xres > 0 && yres > 0) {
239 features->x_phy = (100 * features->x_max) / xres;
240 features->y_phy = (100 * features->y_max) / yres;
241 }
242}
243
244/*
175 * Static values for max X/Y and resolution of Pen interface is stored in 245 * Static values for max X/Y and resolution of Pen interface is stored in
176 * features. This mean physical size of active area can be computed. 246 * features. This mean physical size of active area can be computed.
177 * This is useful to do when Pen and Touch have same active area of tablet. 247 * This is useful to do when Pen and Touch have same active area of tablet.
@@ -321,7 +391,7 @@ static int wacom_parse_hid(struct usb_interface *intf,
321 features->pktlen = WACOM_PKGLEN_TPC2FG; 391 features->pktlen = WACOM_PKGLEN_TPC2FG;
322 } 392 }
323 393
324 if (features->type == MTSCREEN) 394 if (features->type == MTSCREEN || features->type == WACOM_24HDT)
325 features->pktlen = WACOM_PKGLEN_MTOUCH; 395 features->pktlen = WACOM_PKGLEN_MTOUCH;
326 396
327 if (features->type == BAMBOO_PT) { 397 if (features->type == BAMBOO_PT) {
@@ -332,6 +402,14 @@ static int wacom_parse_hid(struct usb_interface *intf,
332 features->x_max = 402 features->x_max =
333 get_unaligned_le16(&report[i + 8]); 403 get_unaligned_le16(&report[i + 8]);
334 i += 15; 404 i += 15;
405 } else if (features->type == WACOM_24HDT) {
406 features->x_max =
407 get_unaligned_le16(&report[i + 3]);
408 features->x_phy =
409 get_unaligned_le16(&report[i + 8]);
410 features->unit = report[i - 1];
411 features->unitExpo = report[i - 3];
412 i += 12;
335 } else { 413 } else {
336 features->x_max = 414 features->x_max =
337 get_unaligned_le16(&report[i + 3]); 415 get_unaligned_le16(&report[i + 3]);
@@ -364,6 +442,12 @@ static int wacom_parse_hid(struct usb_interface *intf,
364 features->y_phy = 442 features->y_phy =
365 get_unaligned_le16(&report[i + 6]); 443 get_unaligned_le16(&report[i + 6]);
366 i += 7; 444 i += 7;
445 } else if (type == WACOM_24HDT) {
446 features->y_max =
447 get_unaligned_le16(&report[i + 3]);
448 features->y_phy =
449 get_unaligned_le16(&report[i - 2]);
450 i += 7;
367 } else if (type == BAMBOO_PT) { 451 } else if (type == BAMBOO_PT) {
368 features->y_phy = 452 features->y_phy =
369 get_unaligned_le16(&report[i + 3]); 453 get_unaligned_le16(&report[i + 3]);
@@ -432,56 +516,55 @@ static int wacom_parse_hid(struct usb_interface *intf,
432 return result; 516 return result;
433} 517}
434 518
435static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_features *features) 519static int wacom_set_device_mode(struct usb_interface *intf, int report_id, int length, int mode)
436{ 520{
437 unsigned char *rep_data; 521 unsigned char *rep_data;
438 int limit = 0, report_id = 2; 522 int error = -ENOMEM, limit = 0;
439 int error = -ENOMEM;
440 523
441 rep_data = kmalloc(4, GFP_KERNEL); 524 rep_data = kzalloc(length, GFP_KERNEL);
442 if (!rep_data) 525 if (!rep_data)
443 return error; 526 return error;
444 527
445 /* ask to report Wacom data */ 528 rep_data[0] = report_id;
529 rep_data[1] = mode;
530
531 do {
532 error = wacom_set_report(intf, WAC_HID_FEATURE_REPORT,
533 report_id, rep_data, length, 1);
534 if (error >= 0)
535 error = wacom_get_report(intf, WAC_HID_FEATURE_REPORT,
536 report_id, rep_data, length, 1);
537 } while ((error < 0 || rep_data[1] != mode) && limit++ < WAC_MSG_RETRIES);
538
539 kfree(rep_data);
540
541 return error < 0 ? error : 0;
542}
543
544/*
545 * Switch the tablet into its most-capable mode. Wacom tablets are
546 * typically configured to power-up in a mode which sends mouse-like
547 * reports to the OS. To get absolute position, pressure data, etc.
548 * from the tablet, it is necessary to switch the tablet out of this
549 * mode and into one which sends the full range of tablet data.
550 */
551static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_features *features)
552{
446 if (features->device_type == BTN_TOOL_FINGER) { 553 if (features->device_type == BTN_TOOL_FINGER) {
447 /* if it is an MT Tablet PC touch */
448 if (features->type > TABLETPC) { 554 if (features->type > TABLETPC) {
449 do { 555 /* MT Tablet PC touch */
450 rep_data[0] = 3; 556 return wacom_set_device_mode(intf, 3, 4, 4);
451 rep_data[1] = 4; 557 }
452 rep_data[2] = 0; 558 else if (features->type == WACOM_24HDT) {
453 rep_data[3] = 0; 559 return wacom_set_device_mode(intf, 18, 3, 2);
454 report_id = 3; 560 }
455 error = wacom_set_report(intf, 561 } else if (features->device_type == BTN_TOOL_PEN) {
456 WAC_HID_FEATURE_REPORT, 562 if (features->type <= BAMBOO_PT && features->type != WIRELESS) {
457 report_id, 563 return wacom_set_device_mode(intf, 2, 2, 2);
458 rep_data, 4, 1);
459 if (error >= 0)
460 error = wacom_get_report(intf,
461 WAC_HID_FEATURE_REPORT,
462 report_id,
463 rep_data, 4, 1);
464 } while ((error < 0 || rep_data[1] != 4) &&
465 limit++ < WAC_MSG_RETRIES);
466 } 564 }
467 } else if (features->type <= BAMBOO_PT &&
468 features->type != WIRELESS &&
469 features->device_type == BTN_TOOL_PEN) {
470 do {
471 rep_data[0] = 2;
472 rep_data[1] = 2;
473 error = wacom_set_report(intf, WAC_HID_FEATURE_REPORT,
474 report_id, rep_data, 2, 1);
475 if (error >= 0)
476 error = wacom_get_report(intf,
477 WAC_HID_FEATURE_REPORT,
478 report_id, rep_data, 2, 1);
479 } while ((error < 0 || rep_data[1] != 2) && limit++ < WAC_MSG_RETRIES);
480 } 565 }
481 566
482 kfree(rep_data); 567 return 0;
483
484 return error < 0 ? error : 0;
485} 568}
486 569
487static int wacom_retrieve_hid_descriptor(struct usb_interface *intf, 570static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
@@ -531,6 +614,7 @@ static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
531 error = wacom_parse_hid(intf, hid_desc, features); 614 error = wacom_parse_hid(intf, hid_desc, features);
532 if (error) 615 if (error)
533 goto out; 616 goto out;
617 wacom_fix_phy_from_hid(features);
534 618
535 out: 619 out:
536 return error; 620 return error;
@@ -546,6 +630,30 @@ struct wacom_usbdev_data {
546static LIST_HEAD(wacom_udev_list); 630static LIST_HEAD(wacom_udev_list);
547static DEFINE_MUTEX(wacom_udev_list_lock); 631static DEFINE_MUTEX(wacom_udev_list_lock);
548 632
633static struct usb_device *wacom_get_sibling(struct usb_device *dev, int vendor, int product)
634{
635 int port1;
636 struct usb_device *sibling;
637
638 if (vendor == 0 && product == 0)
639 return dev;
640
641 if (dev->parent == NULL)
642 return NULL;
643
644 usb_hub_for_each_child(dev->parent, port1, sibling) {
645 struct usb_device_descriptor *d;
646 if (sibling == NULL)
647 continue;
648
649 d = &sibling->descriptor;
650 if (d->idVendor == vendor && d->idProduct == product)
651 return sibling;
652 }
653
654 return NULL;
655}
656
549static struct wacom_usbdev_data *wacom_get_usbdev_data(struct usb_device *dev) 657static struct wacom_usbdev_data *wacom_get_usbdev_data(struct usb_device *dev)
550{ 658{
551 struct wacom_usbdev_data *data; 659 struct wacom_usbdev_data *data;
@@ -1190,13 +1298,19 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
1190 strlcpy(wacom_wac->name, features->name, sizeof(wacom_wac->name)); 1298 strlcpy(wacom_wac->name, features->name, sizeof(wacom_wac->name));
1191 1299
1192 if (features->quirks & WACOM_QUIRK_MULTI_INPUT) { 1300 if (features->quirks & WACOM_QUIRK_MULTI_INPUT) {
1301 struct usb_device *other_dev;
1302
1193 /* Append the device type to the name */ 1303 /* Append the device type to the name */
1194 strlcat(wacom_wac->name, 1304 strlcat(wacom_wac->name,
1195 features->device_type == BTN_TOOL_PEN ? 1305 features->device_type == BTN_TOOL_PEN ?
1196 " Pen" : " Finger", 1306 " Pen" : " Finger",
1197 sizeof(wacom_wac->name)); 1307 sizeof(wacom_wac->name));
1198 1308
1199 error = wacom_add_shared_data(wacom_wac, dev); 1309
1310 other_dev = wacom_get_sibling(dev, features->oVid, features->oPid);
1311 if (other_dev == NULL || wacom_get_usbdev_data(other_dev) == NULL)
1312 other_dev = dev;
1313 error = wacom_add_shared_data(wacom_wac, other_dev);
1200 if (error) 1314 if (error)
1201 goto fail3; 1315 goto fail3;
1202 } 1316 }
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 08b462b6c0d8..0a67031ffc13 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -25,6 +25,11 @@
25#define WACOM_INTUOS_RES 100 25#define WACOM_INTUOS_RES 100
26#define WACOM_INTUOS3_RES 200 26#define WACOM_INTUOS3_RES 200
27 27
28/* Scale factor relating reported contact size to logical contact area.
29 * 2^14/pi is a good approximation on Intuos5 and 3rd-gen Bamboo
30 */
31#define WACOM_CONTACT_AREA_SCALE 2607
32
28static int wacom_penpartner_irq(struct wacom_wac *wacom) 33static int wacom_penpartner_irq(struct wacom_wac *wacom)
29{ 34{
30 unsigned char *data = wacom->data; 35 unsigned char *data = wacom->data;
@@ -326,7 +331,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
326 331
327 /* Enter report */ 332 /* Enter report */
328 if ((data[1] & 0xfc) == 0xc0) { 333 if ((data[1] & 0xfc) == 0xc0) {
329 if (features->type >= INTUOS5S && features->type <= INTUOS5L) 334 if (features->quirks == WACOM_QUIRK_MULTI_INPUT)
330 wacom->shared->stylus_in_proximity = true; 335 wacom->shared->stylus_in_proximity = true;
331 336
332 /* serial number of the tool */ 337 /* serial number of the tool */
@@ -414,7 +419,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
414 419
415 /* Exit report */ 420 /* Exit report */
416 if ((data[1] & 0xfe) == 0x80) { 421 if ((data[1] & 0xfe) == 0x80) {
417 if (features->type >= INTUOS5S && features->type <= INTUOS5L) 422 if (features->quirks == WACOM_QUIRK_MULTI_INPUT)
418 wacom->shared->stylus_in_proximity = false; 423 wacom->shared->stylus_in_proximity = false;
419 424
420 /* 425 /*
@@ -801,6 +806,70 @@ static int find_slot_from_contactid(struct wacom_wac *wacom, int contactid)
801 return -1; 806 return -1;
802} 807}
803 808
809static int int_dist(int x1, int y1, int x2, int y2)
810{
811 int x = x2 - x1;
812 int y = y2 - y1;
813
814 return int_sqrt(x*x + y*y);
815}
816
817static int wacom_24hdt_irq(struct wacom_wac *wacom)
818{
819 struct input_dev *input = wacom->input;
820 char *data = wacom->data;
821 int i;
822 int current_num_contacts = data[61];
823 int contacts_to_send = 0;
824
825 /*
826 * First packet resets the counter since only the first
827 * packet in series will have non-zero current_num_contacts.
828 */
829 if (current_num_contacts)
830 wacom->num_contacts_left = current_num_contacts;
831
832 /* There are at most 4 contacts per packet */
833 contacts_to_send = min(4, wacom->num_contacts_left);
834
835 for (i = 0; i < contacts_to_send; i++) {
836 int offset = (WACOM_BYTES_PER_24HDT_PACKET * i) + 1;
837 bool touch = data[offset] & 0x1 && !wacom->shared->stylus_in_proximity;
838 int id = data[offset + 1];
839 int slot = find_slot_from_contactid(wacom, id);
840
841 if (slot < 0)
842 continue;
843 input_mt_slot(input, slot);
844 input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
845
846 if (touch) {
847 int t_x = le16_to_cpup((__le16 *)&data[offset + 2]);
848 int c_x = le16_to_cpup((__le16 *)&data[offset + 4]);
849 int t_y = le16_to_cpup((__le16 *)&data[offset + 6]);
850 int c_y = le16_to_cpup((__le16 *)&data[offset + 8]);
851 int w = le16_to_cpup((__le16 *)&data[offset + 10]);
852 int h = le16_to_cpup((__le16 *)&data[offset + 12]);
853
854 input_report_abs(input, ABS_MT_POSITION_X, t_x);
855 input_report_abs(input, ABS_MT_POSITION_Y, t_y);
856 input_report_abs(input, ABS_MT_TOUCH_MAJOR, min(w,h));
857 input_report_abs(input, ABS_MT_WIDTH_MAJOR, min(w, h) + int_dist(t_x, t_y, c_x, c_y));
858 input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h));
859 input_report_abs(input, ABS_MT_ORIENTATION, w > h);
860 }
861 wacom->slots[slot] = touch ? id : -1;
862 }
863
864 input_mt_report_pointer_emulation(input, true);
865
866 wacom->num_contacts_left -= contacts_to_send;
867 if (wacom->num_contacts_left <= 0)
868 wacom->num_contacts_left = 0;
869
870 return 1;
871}
872
804static int wacom_mt_touch(struct wacom_wac *wacom) 873static int wacom_mt_touch(struct wacom_wac *wacom)
805{ 874{
806 struct input_dev *input = wacom->input; 875 struct input_dev *input = wacom->input;
@@ -1043,11 +1112,19 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
1043 if (touch) { 1112 if (touch) {
1044 int x = (data[2] << 4) | (data[4] >> 4); 1113 int x = (data[2] << 4) | (data[4] >> 4);
1045 int y = (data[3] << 4) | (data[4] & 0x0f); 1114 int y = (data[3] << 4) | (data[4] & 0x0f);
1046 int w = data[6]; 1115 int a = data[5];
1116
1117 // "a" is a scaled-down area which we assume is roughly
1118 // circular and which can be described as: a=(pi*r^2)/C.
1119 int x_res = input_abs_get_res(input, ABS_X);
1120 int y_res = input_abs_get_res(input, ABS_Y);
1121 int width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
1122 int height = width * y_res / x_res;
1047 1123
1048 input_report_abs(input, ABS_MT_POSITION_X, x); 1124 input_report_abs(input, ABS_MT_POSITION_X, x);
1049 input_report_abs(input, ABS_MT_POSITION_Y, y); 1125 input_report_abs(input, ABS_MT_POSITION_Y, y);
1050 input_report_abs(input, ABS_MT_TOUCH_MAJOR, w); 1126 input_report_abs(input, ABS_MT_TOUCH_MAJOR, width);
1127 input_report_abs(input, ABS_MT_TOUCH_MINOR, height);
1051 } 1128 }
1052} 1129}
1053 1130
@@ -1242,6 +1319,10 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
1242 sync = wacom_intuos_irq(wacom_wac); 1319 sync = wacom_intuos_irq(wacom_wac);
1243 break; 1320 break;
1244 1321
1322 case WACOM_24HDT:
1323 sync = wacom_24hdt_irq(wacom_wac);
1324 break;
1325
1245 case INTUOS5S: 1326 case INTUOS5S:
1246 case INTUOS5: 1327 case INTUOS5:
1247 case INTUOS5L: 1328 case INTUOS5L:
@@ -1327,7 +1408,8 @@ void wacom_setup_device_quirks(struct wacom_features *features)
1327 1408
1328 /* these device have multiple inputs */ 1409 /* these device have multiple inputs */
1329 if (features->type >= WIRELESS || 1410 if (features->type >= WIRELESS ||
1330 (features->type >= INTUOS5S && features->type <= INTUOS5L)) 1411 (features->type >= INTUOS5S && features->type <= INTUOS5L) ||
1412 (features->oVid && features->oPid))
1331 features->quirks |= WACOM_QUIRK_MULTI_INPUT; 1413 features->quirks |= WACOM_QUIRK_MULTI_INPUT;
1332 1414
1333 /* quirk for bamboo touch with 2 low res touches */ 1415 /* quirk for bamboo touch with 2 low res touches */
@@ -1436,6 +1518,9 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1436 1518
1437 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 1519 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
1438 input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0); 1520 input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
1521
1522 __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
1523
1439 wacom_setup_cintiq(wacom_wac); 1524 wacom_setup_cintiq(wacom_wac);
1440 break; 1525 break;
1441 1526
@@ -1533,7 +1618,9 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1533 input_mt_init_slots(input_dev, features->touch_max, 0); 1618 input_mt_init_slots(input_dev, features->touch_max, 0);
1534 1619
1535 input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 1620 input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
1536 0, 255, 0, 0); 1621 0, features->x_max, 0, 0);
1622 input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR,
1623 0, features->y_max, 0, 0);
1537 1624
1538 input_set_abs_params(input_dev, ABS_MT_POSITION_X, 1625 input_set_abs_params(input_dev, ABS_MT_POSITION_X,
1539 0, features->x_max, 1626 0, features->x_max,
@@ -1560,6 +1647,15 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1560 __set_bit(INPUT_PROP_POINTER, input_dev->propbit); 1647 __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1561 break; 1648 break;
1562 1649
1650 case WACOM_24HDT:
1651 if (features->device_type == BTN_TOOL_FINGER) {
1652 input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, features->x_max, 0, 0);
1653 input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR, 0, features->x_max, 0, 0);
1654 input_set_abs_params(input_dev, ABS_MT_WIDTH_MINOR, 0, features->y_max, 0, 0);
1655 input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
1656 }
1657 /* fall through */
1658
1563 case MTSCREEN: 1659 case MTSCREEN:
1564 if (features->device_type == BTN_TOOL_FINGER) { 1660 if (features->device_type == BTN_TOOL_FINGER) {
1565 wacom_wac->slots = kmalloc(features->touch_max * 1661 wacom_wac->slots = kmalloc(features->touch_max *
@@ -1641,7 +1737,10 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1641 1737
1642 input_set_abs_params(input_dev, 1738 input_set_abs_params(input_dev,
1643 ABS_MT_TOUCH_MAJOR, 1739 ABS_MT_TOUCH_MAJOR,
1644 0, 255, 0, 0); 1740 0, features->x_max, 0, 0);
1741 input_set_abs_params(input_dev,
1742 ABS_MT_TOUCH_MINOR,
1743 0, features->y_max, 0, 0);
1645 } 1744 }
1646 1745
1647 input_set_abs_params(input_dev, ABS_MT_POSITION_X, 1746 input_set_abs_params(input_dev, ABS_MT_POSITION_X,
@@ -1851,8 +1950,11 @@ static const struct wacom_features wacom_features_0xF4 =
1851 { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, 1950 { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
1852 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 1951 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
1853static const struct wacom_features wacom_features_0xF8 = 1952static const struct wacom_features wacom_features_0xF8 =
1854 { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, 1953 { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, /* Pen */
1855 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 1954 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
1955static const struct wacom_features wacom_features_0xF6 =
1956 { "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */
1957 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10 };
1856static const struct wacom_features wacom_features_0x3F = 1958static const struct wacom_features wacom_features_0x3F =
1857 { "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023, 1959 { "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023,
1858 63, CINTIQ, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 1960 63, CINTIQ, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
@@ -2095,6 +2197,7 @@ const struct usb_device_id wacom_ids[] = {
2095 { USB_DEVICE_WACOM(0x47) }, 2197 { USB_DEVICE_WACOM(0x47) },
2096 { USB_DEVICE_WACOM(0xF4) }, 2198 { USB_DEVICE_WACOM(0xF4) },
2097 { USB_DEVICE_WACOM(0xF8) }, 2199 { USB_DEVICE_WACOM(0xF8) },
2200 { USB_DEVICE_WACOM(0xF6) },
2098 { USB_DEVICE_WACOM(0xFA) }, 2201 { USB_DEVICE_WACOM(0xFA) },
2099 { USB_DEVICE_LENOVO(0x6004) }, 2202 { USB_DEVICE_LENOVO(0x6004) },
2100 { } 2203 { }
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index 96c185cc301e..345f1e76975e 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -29,6 +29,7 @@
29 29
30/* wacom data size per MT contact */ 30/* wacom data size per MT contact */
31#define WACOM_BYTES_PER_MT_PACKET 11 31#define WACOM_BYTES_PER_MT_PACKET 11
32#define WACOM_BYTES_PER_24HDT_PACKET 14
32 33
33/* device IDs */ 34/* device IDs */
34#define STYLUS_DEVICE_ID 0x02 35#define STYLUS_DEVICE_ID 0x02
@@ -49,6 +50,7 @@
49#define WACOM_REPORT_TPCHID 15 50#define WACOM_REPORT_TPCHID 15
50#define WACOM_REPORT_TPCST 16 51#define WACOM_REPORT_TPCST 16
51#define WACOM_REPORT_TPC1FGE 18 52#define WACOM_REPORT_TPC1FGE 18
53#define WACOM_REPORT_24HDT 1
52 54
53/* device quirks */ 55/* device quirks */
54#define WACOM_QUIRK_MULTI_INPUT 0x0001 56#define WACOM_QUIRK_MULTI_INPUT 0x0001
@@ -81,6 +83,7 @@ enum {
81 WACOM_MO, 83 WACOM_MO,
82 WIRELESS, 84 WIRELESS,
83 BAMBOO_PT, 85 BAMBOO_PT,
86 WACOM_24HDT,
84 TABLETPC, /* add new TPC below */ 87 TABLETPC, /* add new TPC below */
85 TABLETPCE, 88 TABLETPCE,
86 TABLETPC2FG, 89 TABLETPC2FG,
@@ -109,6 +112,8 @@ struct wacom_features {
109 int distance_fuzz; 112 int distance_fuzz;
110 unsigned quirks; 113 unsigned quirks;
111 unsigned touch_max; 114 unsigned touch_max;
115 int oVid;
116 int oPid;
112}; 117};
113 118
114struct wacom_shared { 119struct wacom_shared {
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 1ba232cbc09d..f7668b24c378 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -239,7 +239,7 @@ config TOUCHSCREEN_EETI
239 239
240config TOUCHSCREEN_EGALAX 240config TOUCHSCREEN_EGALAX
241 tristate "EETI eGalax multi-touch panel support" 241 tristate "EETI eGalax multi-touch panel support"
242 depends on I2C 242 depends on I2C && OF
243 help 243 help
244 Say Y here to enable support for I2C connected EETI 244 Say Y here to enable support for I2C connected EETI
245 eGalax multi-touch panels. 245 eGalax multi-touch panels.
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index e92615d0b1b0..1df2396af008 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -320,10 +320,8 @@ static bool mxt_object_writable(unsigned int type)
320static void mxt_dump_message(struct device *dev, 320static void mxt_dump_message(struct device *dev,
321 struct mxt_message *message) 321 struct mxt_message *message)
322{ 322{
323 dev_dbg(dev, "reportid: %u\tmessage: %02x %02x %02x %02x %02x %02x %02x\n", 323 dev_dbg(dev, "reportid: %u\tmessage: %*ph\n",
324 message->reportid, message->message[0], message->message[1], 324 message->reportid, 7, message->message);
325 message->message[2], message->message[3], message->message[4],
326 message->message[5], message->message[6]);
327} 325}
328 326
329static int mxt_check_bootloader(struct i2c_client *client, 327static int mxt_check_bootloader(struct i2c_client *client,
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index c1e3460f1195..13fa62fdfb0b 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -28,6 +28,7 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/bitops.h> 29#include <linux/bitops.h>
30#include <linux/input/mt.h> 30#include <linux/input/mt.h>
31#include <linux/of_gpio.h>
31 32
32/* 33/*
33 * Mouse Mode: some panel may configure the controller to mouse mode, 34 * Mouse Mode: some panel may configure the controller to mouse mode,
@@ -122,9 +123,17 @@ static irqreturn_t egalax_ts_interrupt(int irq, void *dev_id)
122/* wake up controller by an falling edge of interrupt gpio. */ 123/* wake up controller by an falling edge of interrupt gpio. */
123static int egalax_wake_up_device(struct i2c_client *client) 124static int egalax_wake_up_device(struct i2c_client *client)
124{ 125{
125 int gpio = irq_to_gpio(client->irq); 126 struct device_node *np = client->dev.of_node;
127 int gpio;
126 int ret; 128 int ret;
127 129
130 if (!np)
131 return -ENODEV;
132
133 gpio = of_get_named_gpio(np, "wakeup-gpios", 0);
134 if (!gpio_is_valid(gpio))
135 return -ENODEV;
136
128 ret = gpio_request(gpio, "egalax_irq"); 137 ret = gpio_request(gpio, "egalax_irq");
129 if (ret < 0) { 138 if (ret < 0) {
130 dev_err(&client->dev, 139 dev_err(&client->dev,
@@ -181,7 +190,11 @@ static int __devinit egalax_ts_probe(struct i2c_client *client,
181 ts->input_dev = input_dev; 190 ts->input_dev = input_dev;
182 191
183 /* controller may be in sleep, wake it up. */ 192 /* controller may be in sleep, wake it up. */
184 egalax_wake_up_device(client); 193 error = egalax_wake_up_device(client);
194 if (error) {
195 dev_err(&client->dev, "Failed to wake up the controller\n");
196 goto err_free_dev;
197 }
185 198
186 ret = egalax_firmware_version(client); 199 ret = egalax_firmware_version(client);
187 if (ret < 0) { 200 if (ret < 0) {
@@ -274,11 +287,17 @@ static int egalax_ts_resume(struct device *dev)
274 287
275static SIMPLE_DEV_PM_OPS(egalax_ts_pm_ops, egalax_ts_suspend, egalax_ts_resume); 288static SIMPLE_DEV_PM_OPS(egalax_ts_pm_ops, egalax_ts_suspend, egalax_ts_resume);
276 289
290static struct of_device_id egalax_ts_dt_ids[] = {
291 { .compatible = "eeti,egalax_ts" },
292 { /* sentinel */ }
293};
294
277static struct i2c_driver egalax_ts_driver = { 295static struct i2c_driver egalax_ts_driver = {
278 .driver = { 296 .driver = {
279 .name = "egalax_ts", 297 .name = "egalax_ts",
280 .owner = THIS_MODULE, 298 .owner = THIS_MODULE,
281 .pm = &egalax_ts_pm_ops, 299 .pm = &egalax_ts_pm_ops,
300 .of_match_table = of_match_ptr(egalax_ts_dt_ids),
282 }, 301 },
283 .id_table = egalax_ts_id, 302 .id_table = egalax_ts_id,
284 .probe = egalax_ts_probe, 303 .probe = egalax_ts_probe,
diff --git a/drivers/input/touchscreen/tsc40.c b/drivers/input/touchscreen/tsc40.c
index 63209aaa55f0..eb96f168fb9d 100644
--- a/drivers/input/touchscreen/tsc40.c
+++ b/drivers/input/touchscreen/tsc40.c
@@ -107,7 +107,6 @@ static int tsc_connect(struct serio *serio, struct serio_driver *drv)
107 __set_bit(BTN_TOUCH, input_dev->keybit); 107 __set_bit(BTN_TOUCH, input_dev->keybit);
108 input_set_abs_params(ptsc->dev, ABS_X, 0, 0x3ff, 0, 0); 108 input_set_abs_params(ptsc->dev, ABS_X, 0, 0x3ff, 0, 0);
109 input_set_abs_params(ptsc->dev, ABS_Y, 0, 0x3ff, 0, 0); 109 input_set_abs_params(ptsc->dev, ABS_Y, 0, 0x3ff, 0, 0);
110 input_set_abs_params(ptsc->dev, ABS_PRESSURE, 0, 0, 0, 0);
111 110
112 serio_set_drvdata(serio, ptsc); 111 serio_set_drvdata(serio, ptsc);
113 112
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 18b0d99bd4d6..81837b0710a9 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1599,21 +1599,46 @@ static void __init free_on_init_error(void)
1599#endif 1599#endif
1600} 1600}
1601 1601
1602/* SB IOAPIC is always on this device in AMD systems */
1603#define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
1604
1602static bool __init check_ioapic_information(void) 1605static bool __init check_ioapic_information(void)
1603{ 1606{
1607 bool ret, has_sb_ioapic;
1604 int idx; 1608 int idx;
1605 1609
1606 for (idx = 0; idx < nr_ioapics; idx++) { 1610 has_sb_ioapic = false;
1607 int id = mpc_ioapic_id(idx); 1611 ret = false;
1608 1612
1609 if (get_ioapic_devid(id) < 0) { 1613 for (idx = 0; idx < nr_ioapics; idx++) {
1610 pr_err(FW_BUG "AMD-Vi: IO-APIC[%d] not in IVRS table\n", id); 1614 int devid, id = mpc_ioapic_id(idx);
1611 pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug\n"); 1615
1612 return false; 1616 devid = get_ioapic_devid(id);
1617 if (devid < 0) {
1618 pr_err(FW_BUG "AMD-Vi: IOAPIC[%d] not in IVRS table\n", id);
1619 ret = false;
1620 } else if (devid == IOAPIC_SB_DEVID) {
1621 has_sb_ioapic = true;
1622 ret = true;
1613 } 1623 }
1614 } 1624 }
1615 1625
1616 return true; 1626 if (!has_sb_ioapic) {
1627 /*
1628 * We expect the SB IOAPIC to be listed in the IVRS
1629 * table. The system timer is connected to the SB IOAPIC
1630 * and if we don't have it in the list the system will
1631 * panic at boot time. This situation usually happens
1632 * when the BIOS is buggy and provides us the wrong
1633 * device id for the IOAPIC in the system.
1634 */
1635 pr_err(FW_BUG "AMD-Vi: No southbridge IOAPIC found in IVRS table\n");
1636 }
1637
1638 if (!ret)
1639 pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug(s)\n");
1640
1641 return ret;
1617} 1642}
1618 1643
1619static void __init free_dma_resources(void) 1644static void __init free_dma_resources(void)
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 0b4d62e0c645..a649f146d17b 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -200,7 +200,7 @@ enum {
200 200
201#define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12) 201#define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12)
202#define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22) 202#define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22)
203#define SMMU_PDN_TO_ADDR(addr) ((pdn) << 22) 203#define SMMU_PDN_TO_ADDR(pdn) ((pdn) << 22)
204 204
205#define _READABLE (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT) 205#define _READABLE (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT)
206#define _WRITABLE (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT) 206#define _WRITABLE (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT)
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 527588708948..c44950d3eb7b 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -617,7 +617,13 @@ static void int_in_work(struct work_struct *work)
617 if (rc == 0) 617 if (rc == 0)
618 /* success, resubmit interrupt read URB */ 618 /* success, resubmit interrupt read URB */
619 rc = usb_submit_urb(urb, GFP_ATOMIC); 619 rc = usb_submit_urb(urb, GFP_ATOMIC);
620 if (rc != 0 && rc != -ENODEV) { 620
621 switch (rc) {
622 case 0: /* success */
623 case -ENODEV: /* device gone */
624 case -EINVAL: /* URB already resubmitted, or terminal badness */
625 break;
626 default: /* failure: try to recover by resetting the device */
621 dev_err(cs->dev, "clear halt failed: %s\n", get_usb_rcmsg(rc)); 627 dev_err(cs->dev, "clear halt failed: %s\n", get_usb_rcmsg(rc));
622 rc = usb_lock_device_for_reset(ucs->udev, ucs->interface); 628 rc = usb_lock_device_for_reset(ucs->udev, ucs->interface);
623 if (rc == 0) { 629 if (rc == 0) {
@@ -2442,7 +2448,9 @@ static void gigaset_disconnect(struct usb_interface *interface)
2442} 2448}
2443 2449
2444/* gigaset_suspend 2450/* gigaset_suspend
2445 * This function is called before the USB connection is suspended. 2451 * This function is called before the USB connection is suspended
2452 * or before the USB device is reset.
2453 * In the latter case, message == PMSG_ON.
2446 */ 2454 */
2447static int gigaset_suspend(struct usb_interface *intf, pm_message_t message) 2455static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
2448{ 2456{
@@ -2498,7 +2506,12 @@ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
2498 del_timer_sync(&ucs->timer_atrdy); 2506 del_timer_sync(&ucs->timer_atrdy);
2499 del_timer_sync(&ucs->timer_cmd_in); 2507 del_timer_sync(&ucs->timer_cmd_in);
2500 del_timer_sync(&ucs->timer_int_in); 2508 del_timer_sync(&ucs->timer_int_in);
2501 cancel_work_sync(&ucs->int_in_wq); 2509
2510 /* don't try to cancel int_in_wq from within reset as it
2511 * might be the one requesting the reset
2512 */
2513 if (message.event != PM_EVENT_ON)
2514 cancel_work_sync(&ucs->int_in_wq);
2502 2515
2503 gig_dbg(DEBUG_SUSPEND, "suspend complete"); 2516 gig_dbg(DEBUG_SUSPEND, "suspend complete");
2504 return 0; 2517 return 0;
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index a1e760150821..61d78fa03b1a 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -595,7 +595,7 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
595 j = ipc->num / (sizeof(long) * 8); 595 j = ipc->num / (sizeof(long) * 8);
596 i = ipc->num % (sizeof(long) * 8); 596 i = ipc->num % (sizeof(long) * 8);
597 if (j < 8) 597 if (j < 8)
598 protos[j] |= (0x1 << i); 598 protos[j] |= (1UL << i);
599 ipc = ipc->next; 599 ipc = ipc->next;
600 } 600 }
601 if ((r = set_arg(argp, protos, 8 * sizeof(long)))) 601 if ((r = set_arg(argp, protos, 8 * sizeof(long))))
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 16578d3b52bb..f508defc0d96 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -63,6 +63,17 @@ config LEDS_LM3533
63 hardware-accelerated blinking with maximum on and off periods of 9.8 63 hardware-accelerated blinking with maximum on and off periods of 9.8
64 and 77 seconds respectively. 64 and 77 seconds respectively.
65 65
66config LEDS_LM3642
67 tristate "LED support for LM3642 Chip"
68 depends on LEDS_CLASS && I2C
69 select REGMAP_I2C
70 help
71 This option enables support for LEDs connected to LM3642.
72 The LM3642 is a 4MHz fixed-frequency synchronous boost
73 converter plus 1.5A constant current driver for a high-current
74 white LED.
75
76
66config LEDS_LOCOMO 77config LEDS_LOCOMO
67 tristate "LED Support for Locomo device" 78 tristate "LED Support for Locomo device"
68 depends on LEDS_CLASS 79 depends on LEDS_CLASS
@@ -192,11 +203,12 @@ config LEDS_LP5521
192 programming the engines. 203 programming the engines.
193 204
194config LEDS_LP5523 205config LEDS_LP5523
195 tristate "LED Support for N.S. LP5523 LED driver chip" 206 tristate "LED Support for TI/National LP5523/55231 LED driver chip"
196 depends on LEDS_CLASS && I2C 207 depends on LEDS_CLASS && I2C
197 help 208 help
198 If you say yes here you get support for the National Semiconductor 209 If you say yes here you get support for TI/National Semiconductor
199 LP5523 LED driver. It is 9 channel chip with programmable engines. 210 LP5523/55231 LED driver.
211 It is 9 channel chip with programmable engines.
200 Driver provides direct control via LED class and interface for 212 Driver provides direct control via LED class and interface for
201 programming the engines. 213 programming the engines.
202 214
@@ -422,13 +434,13 @@ config LEDS_MAX8997
422 This option enables support for on-chip LED drivers on 434 This option enables support for on-chip LED drivers on
423 MAXIM MAX8997 PMIC. 435 MAXIM MAX8997 PMIC.
424 436
425config LEDS_LM3556 437config LEDS_LM355x
426 tristate "LED support for LM3556 Chip" 438 tristate "LED support for LM355x Chips, LM3554 and LM3556"
427 depends on LEDS_CLASS && I2C 439 depends on LEDS_CLASS && I2C
428 select REGMAP_I2C 440 select REGMAP_I2C
429 help 441 help
430 This option enables support for LEDs connected to LM3556. 442 This option enables support for LEDs connected to LM355x.
431 LM3556 includes Torch, Flash and Indicator functions. 443 LM355x includes Torch, Flash and Indicator functions.
432 444
433config LEDS_OT200 445config LEDS_OT200
434 tristate "LED support for the Bachmann OT200" 446 tristate "LED support for the Bachmann OT200"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index a9b627c4f8ba..3fb9641b6194 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
11obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o 11obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
12obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o 12obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o
13obj-$(CONFIG_LEDS_LM3533) += leds-lm3533.o 13obj-$(CONFIG_LEDS_LM3533) += leds-lm3533.o
14obj-$(CONFIG_LEDS_LM3642) += leds-lm3642.o
14obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o 15obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
15obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o 16obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
16obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o 17obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
@@ -48,7 +49,7 @@ obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o
48obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o 49obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o
49obj-$(CONFIG_LEDS_RENESAS_TPU) += leds-renesas-tpu.o 50obj-$(CONFIG_LEDS_RENESAS_TPU) += leds-renesas-tpu.o
50obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o 51obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o
51obj-$(CONFIG_LEDS_LM3556) += leds-lm3556.o 52obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o
52obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o 53obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o
53 54
54# LED SPI Drivers 55# LED SPI Drivers
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index c599095bc005..48cce18e9d6d 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -124,6 +124,16 @@ static void led_timer_function(unsigned long data)
124 mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay)); 124 mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay));
125} 125}
126 126
127static void set_brightness_delayed(struct work_struct *ws)
128{
129 struct led_classdev *led_cdev =
130 container_of(ws, struct led_classdev, set_brightness_work);
131
132 led_stop_software_blink(led_cdev);
133
134 __led_set_brightness(led_cdev, led_cdev->delayed_set_value);
135}
136
127/** 137/**
128 * led_classdev_suspend - suspend an led_classdev. 138 * led_classdev_suspend - suspend an led_classdev.
129 * @led_cdev: the led_classdev to suspend. 139 * @led_cdev: the led_classdev to suspend.
@@ -191,6 +201,8 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
191 201
192 led_update_brightness(led_cdev); 202 led_update_brightness(led_cdev);
193 203
204 INIT_WORK(&led_cdev->set_brightness_work, set_brightness_delayed);
205
194 init_timer(&led_cdev->blink_timer); 206 init_timer(&led_cdev->blink_timer);
195 led_cdev->blink_timer.function = led_timer_function; 207 led_cdev->blink_timer.function = led_timer_function;
196 led_cdev->blink_timer.data = (unsigned long)led_cdev; 208 led_cdev->blink_timer.data = (unsigned long)led_cdev;
@@ -221,7 +233,10 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
221 up_write(&led_cdev->trigger_lock); 233 up_write(&led_cdev->trigger_lock);
222#endif 234#endif
223 235
236 cancel_work_sync(&led_cdev->set_brightness_work);
237
224 /* Stop blinking */ 238 /* Stop blinking */
239 led_stop_software_blink(led_cdev);
225 led_set_brightness(led_cdev, LED_OFF); 240 led_set_brightness(led_cdev, LED_OFF);
226 241
227 device_unregister(led_cdev->dev); 242 device_unregister(led_cdev->dev);
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index 2ab05af3de31..ce8921a753a3 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -103,13 +103,23 @@ void led_blink_set_oneshot(struct led_classdev *led_cdev,
103} 103}
104EXPORT_SYMBOL(led_blink_set_oneshot); 104EXPORT_SYMBOL(led_blink_set_oneshot);
105 105
106void led_set_brightness(struct led_classdev *led_cdev, 106void led_stop_software_blink(struct led_classdev *led_cdev)
107 enum led_brightness brightness)
108{ 107{
109 /* stop and clear soft-blink timer */
110 del_timer_sync(&led_cdev->blink_timer); 108 del_timer_sync(&led_cdev->blink_timer);
111 led_cdev->blink_delay_on = 0; 109 led_cdev->blink_delay_on = 0;
112 led_cdev->blink_delay_off = 0; 110 led_cdev->blink_delay_off = 0;
111}
112EXPORT_SYMBOL_GPL(led_stop_software_blink);
113
114void led_set_brightness(struct led_classdev *led_cdev,
115 enum led_brightness brightness)
116{
117 /* delay brightness setting if need to stop soft-blink timer */
118 if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) {
119 led_cdev->delayed_set_value = brightness;
120 schedule_work(&led_cdev->set_brightness_work);
121 return;
122 }
113 123
114 __led_set_brightness(led_cdev, brightness); 124 __led_set_brightness(led_cdev, brightness);
115} 125}
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 363975b3c925..262eb4193710 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -102,6 +102,12 @@ EXPORT_SYMBOL_GPL(led_trigger_show);
102void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig) 102void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
103{ 103{
104 unsigned long flags; 104 unsigned long flags;
105 char *event = NULL;
106 char *envp[2];
107 const char *name;
108
109 name = trig ? trig->name : "none";
110 event = kasprintf(GFP_KERNEL, "TRIGGER=%s", name);
105 111
106 /* Remove any existing trigger */ 112 /* Remove any existing trigger */
107 if (led_cdev->trigger) { 113 if (led_cdev->trigger) {
@@ -109,6 +115,8 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
109 list_del(&led_cdev->trig_list); 115 list_del(&led_cdev->trig_list);
110 write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock, 116 write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock,
111 flags); 117 flags);
118 cancel_work_sync(&led_cdev->set_brightness_work);
119 led_stop_software_blink(led_cdev);
112 if (led_cdev->trigger->deactivate) 120 if (led_cdev->trigger->deactivate)
113 led_cdev->trigger->deactivate(led_cdev); 121 led_cdev->trigger->deactivate(led_cdev);
114 led_cdev->trigger = NULL; 122 led_cdev->trigger = NULL;
@@ -122,6 +130,13 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
122 if (trig->activate) 130 if (trig->activate)
123 trig->activate(led_cdev); 131 trig->activate(led_cdev);
124 } 132 }
133
134 if (event) {
135 envp[0] = event;
136 envp[1] = NULL;
137 kobject_uevent_env(&led_cdev->dev->kobj, KOBJ_CHANGE, envp);
138 kfree(event);
139 }
125} 140}
126EXPORT_SYMBOL_GPL(led_trigger_set); 141EXPORT_SYMBOL_GPL(led_trigger_set);
127 142
@@ -224,7 +239,7 @@ void led_trigger_event(struct led_trigger *trig,
224 struct led_classdev *led_cdev; 239 struct led_classdev *led_cdev;
225 240
226 led_cdev = list_entry(entry, struct led_classdev, trig_list); 241 led_cdev = list_entry(entry, struct led_classdev, trig_list);
227 __led_set_brightness(led_cdev, brightness); 242 led_set_brightness(led_cdev, brightness);
228 } 243 }
229 read_unlock(&trig->leddev_list_lock); 244 read_unlock(&trig->leddev_list_lock);
230} 245}
diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
index 1ed1677c916f..e024b0b1c3b1 100644
--- a/drivers/leds/leds-clevo-mail.c
+++ b/drivers/leds/leds-clevo-mail.c
@@ -31,7 +31,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
31} 31}
32 32
33/* 33/*
34 * struct mail_led_whitelist - List of known good models 34 * struct clevo_mail_led_dmi_table - List of known good models
35 * 35 *
36 * Contains the known good models this driver is compatible with. 36 * Contains the known good models this driver is compatible with.
37 * When adding a new model try to be as strict as possible. This 37 * When adding a new model try to be as strict as possible. This
@@ -39,7 +39,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
39 * detected as working, but in reality it is not) as low as 39 * detected as working, but in reality it is not) as low as
40 * possible. 40 * possible.
41 */ 41 */
42static struct dmi_system_id __initdata mail_led_whitelist[] = { 42static struct dmi_system_id __initdata clevo_mail_led_dmi_table[] = {
43 { 43 {
44 .callback = clevo_mail_led_dmi_callback, 44 .callback = clevo_mail_led_dmi_callback,
45 .ident = "Clevo D410J", 45 .ident = "Clevo D410J",
@@ -59,11 +59,10 @@ static struct dmi_system_id __initdata mail_led_whitelist[] = {
59 }, 59 },
60 { 60 {
61 .callback = clevo_mail_led_dmi_callback, 61 .callback = clevo_mail_led_dmi_callback,
62 .ident = "Positivo Mobile", 62 .ident = "Clevo M5x0V",
63 .matches = { 63 .matches = {
64 DMI_MATCH(DMI_BOARD_VENDOR, "CLEVO Co. "), 64 DMI_MATCH(DMI_BOARD_VENDOR, "CLEVO Co. "),
65 DMI_MATCH(DMI_BOARD_NAME, "M5X0V "), 65 DMI_MATCH(DMI_BOARD_NAME, "M5X0V "),
66 DMI_MATCH(DMI_PRODUCT_NAME, "Positivo Mobile"),
67 DMI_MATCH(DMI_PRODUCT_VERSION, "VT6198") 66 DMI_MATCH(DMI_PRODUCT_VERSION, "VT6198")
68 } 67 }
69 }, 68 },
@@ -89,6 +88,7 @@ static struct dmi_system_id __initdata mail_led_whitelist[] = {
89 }, 88 },
90 { } 89 { }
91}; 90};
91MODULE_DEVICE_TABLE(dmi, clevo_mail_led_dmi_table);
92 92
93static void clevo_mail_led_set(struct led_classdev *led_cdev, 93static void clevo_mail_led_set(struct led_classdev *led_cdev,
94 enum led_brightness value) 94 enum led_brightness value)
@@ -180,7 +180,7 @@ static int __init clevo_mail_led_init(void)
180 180
181 /* Check with the help of DMI if we are running on supported hardware */ 181 /* Check with the help of DMI if we are running on supported hardware */
182 if (!nodetect) { 182 if (!nodetect) {
183 count = dmi_check_system(mail_led_whitelist); 183 count = dmi_check_system(clevo_mail_led_dmi_table);
184 } else { 184 } else {
185 count = 1; 185 count = 1;
186 printk(KERN_ERR KBUILD_MODNAME ": Skipping DMI detection. " 186 printk(KERN_ERR KBUILD_MODNAME ": Skipping DMI detection. "
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index c032b2180340..087d1e66f4f7 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -20,6 +20,7 @@
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/workqueue.h> 21#include <linux/workqueue.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/pinctrl/consumer.h>
23 24
24struct gpio_led_data { 25struct gpio_led_data {
25 struct led_classdev cdev; 26 struct led_classdev cdev;
@@ -170,11 +171,10 @@ static struct gpio_leds_priv * __devinit gpio_leds_create_of(struct platform_dev
170{ 171{
171 struct device_node *np = pdev->dev.of_node, *child; 172 struct device_node *np = pdev->dev.of_node, *child;
172 struct gpio_leds_priv *priv; 173 struct gpio_leds_priv *priv;
173 int count = 0, ret; 174 int count, ret;
174 175
175 /* count LEDs in this device, so we know how much to allocate */ 176 /* count LEDs in this device, so we know how much to allocate */
176 for_each_child_of_node(np, child) 177 count = of_get_child_count(np);
177 count++;
178 if (!count) 178 if (!count)
179 return NULL; 179 return NULL;
180 180
@@ -228,7 +228,6 @@ static struct gpio_leds_priv * __devinit gpio_leds_create_of(struct platform_dev
228{ 228{
229 return NULL; 229 return NULL;
230} 230}
231#define of_gpio_leds_match NULL
232#endif /* CONFIG_OF_GPIO */ 231#endif /* CONFIG_OF_GPIO */
233 232
234 233
@@ -236,8 +235,14 @@ static int __devinit gpio_led_probe(struct platform_device *pdev)
236{ 235{
237 struct gpio_led_platform_data *pdata = pdev->dev.platform_data; 236 struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
238 struct gpio_leds_priv *priv; 237 struct gpio_leds_priv *priv;
238 struct pinctrl *pinctrl;
239 int i, ret = 0; 239 int i, ret = 0;
240 240
241 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
242 if (IS_ERR(pinctrl))
243 dev_warn(&pdev->dev,
244 "pins are not configured from the driver\n");
245
241 if (pdata && pdata->num_leds) { 246 if (pdata && pdata->num_leds) {
242 priv = devm_kzalloc(&pdev->dev, 247 priv = devm_kzalloc(&pdev->dev,
243 sizeof_gpio_leds_priv(pdata->num_leds), 248 sizeof_gpio_leds_priv(pdata->num_leds),
@@ -270,13 +275,13 @@ static int __devinit gpio_led_probe(struct platform_device *pdev)
270 275
271static int __devexit gpio_led_remove(struct platform_device *pdev) 276static int __devexit gpio_led_remove(struct platform_device *pdev)
272{ 277{
273 struct gpio_leds_priv *priv = dev_get_drvdata(&pdev->dev); 278 struct gpio_leds_priv *priv = platform_get_drvdata(pdev);
274 int i; 279 int i;
275 280
276 for (i = 0; i < priv->num_leds; i++) 281 for (i = 0; i < priv->num_leds; i++)
277 delete_gpio_led(&priv->leds[i]); 282 delete_gpio_led(&priv->leds[i]);
278 283
279 dev_set_drvdata(&pdev->dev, NULL); 284 platform_set_drvdata(pdev, NULL);
280 285
281 return 0; 286 return 0;
282} 287}
@@ -287,7 +292,7 @@ static struct platform_driver gpio_led_driver = {
287 .driver = { 292 .driver = {
288 .name = "leds-gpio", 293 .name = "leds-gpio",
289 .owner = THIS_MODULE, 294 .owner = THIS_MODULE,
290 .of_match_table = of_gpio_leds_match, 295 .of_match_table = of_match_ptr(of_gpio_leds_match),
291 }, 296 },
292}; 297};
293 298
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 23637bdb275d..b26306f6724d 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -150,7 +150,7 @@ static int lm3530_get_mode_from_str(const char *str)
150 if (sysfs_streq(str, mode_map[i].mode)) 150 if (sysfs_streq(str, mode_map[i].mode))
151 return mode_map[i].mode_val; 151 return mode_map[i].mode_val;
152 152
153 return -1; 153 return -EINVAL;
154} 154}
155 155
156static void lm3530_als_configure(struct lm3530_platform_data *pdata, 156static void lm3530_als_configure(struct lm3530_platform_data *pdata,
@@ -358,7 +358,7 @@ static ssize_t lm3530_mode_set(struct device *dev, struct device_attribute
358 mode = lm3530_get_mode_from_str(buf); 358 mode = lm3530_get_mode_from_str(buf);
359 if (mode < 0) { 359 if (mode < 0) {
360 dev_err(dev, "Invalid mode\n"); 360 dev_err(dev, "Invalid mode\n");
361 return -EINVAL; 361 return mode;
362 } 362 }
363 363
364 drvdata->mode = mode; 364 drvdata->mode = mode;
@@ -416,7 +416,7 @@ static int __devinit lm3530_probe(struct i2c_client *client,
416 416
417 i2c_set_clientdata(client, drvdata); 417 i2c_set_clientdata(client, drvdata);
418 418
419 drvdata->regulator = regulator_get(&client->dev, "vin"); 419 drvdata->regulator = devm_regulator_get(&client->dev, "vin");
420 if (IS_ERR(drvdata->regulator)) { 420 if (IS_ERR(drvdata->regulator)) {
421 dev_err(&client->dev, "regulator get failed\n"); 421 dev_err(&client->dev, "regulator get failed\n");
422 err = PTR_ERR(drvdata->regulator); 422 err = PTR_ERR(drvdata->regulator);
@@ -429,15 +429,13 @@ static int __devinit lm3530_probe(struct i2c_client *client,
429 if (err < 0) { 429 if (err < 0) {
430 dev_err(&client->dev, 430 dev_err(&client->dev,
431 "Register Init failed: %d\n", err); 431 "Register Init failed: %d\n", err);
432 err = -ENODEV; 432 return err;
433 goto err_reg_init;
434 } 433 }
435 } 434 }
436 err = led_classdev_register(&client->dev, &drvdata->led_dev); 435 err = led_classdev_register(&client->dev, &drvdata->led_dev);
437 if (err < 0) { 436 if (err < 0) {
438 dev_err(&client->dev, "Register led class failed: %d\n", err); 437 dev_err(&client->dev, "Register led class failed: %d\n", err);
439 err = -ENODEV; 438 return err;
440 goto err_class_register;
441 } 439 }
442 440
443 err = device_create_file(drvdata->led_dev.dev, &dev_attr_mode); 441 err = device_create_file(drvdata->led_dev.dev, &dev_attr_mode);
@@ -451,9 +449,6 @@ static int __devinit lm3530_probe(struct i2c_client *client,
451 449
452err_create_file: 450err_create_file:
453 led_classdev_unregister(&drvdata->led_dev); 451 led_classdev_unregister(&drvdata->led_dev);
454err_class_register:
455err_reg_init:
456 regulator_put(drvdata->regulator);
457 return err; 452 return err;
458} 453}
459 454
@@ -465,7 +460,6 @@ static int __devexit lm3530_remove(struct i2c_client *client)
465 460
466 if (drvdata->enable) 461 if (drvdata->enable)
467 regulator_disable(drvdata->regulator); 462 regulator_disable(drvdata->regulator);
468 regulator_put(drvdata->regulator);
469 led_classdev_unregister(&drvdata->led_dev); 463 led_classdev_unregister(&drvdata->led_dev);
470 return 0; 464 return 0;
471} 465}
diff --git a/drivers/leds/leds-lm3556.c b/drivers/leds/leds-lm3556.c
deleted file mode 100644
index 3062abd9a532..000000000000
--- a/drivers/leds/leds-lm3556.c
+++ /dev/null
@@ -1,512 +0,0 @@
1/*
2 * Simple driver for Texas Instruments LM3556 LED Flash driver chip (Rev0x03)
3 * Copyright (C) 2012 Texas Instruments
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Please refer Documentation/leds/leds-lm3556.txt file.
10 */
11#include <linux/module.h>
12#include <linux/delay.h>
13#include <linux/i2c.h>
14#include <linux/leds.h>
15#include <linux/slab.h>
16#include <linux/platform_device.h>
17#include <linux/fs.h>
18#include <linux/regmap.h>
19#include <linux/platform_data/leds-lm3556.h>
20
21#define REG_FILT_TIME (0x0)
22#define REG_IVFM_MODE (0x1)
23#define REG_NTC (0x2)
24#define REG_INDIC_TIME (0x3)
25#define REG_INDIC_BLINK (0x4)
26#define REG_INDIC_PERIOD (0x5)
27#define REG_TORCH_TIME (0x6)
28#define REG_CONF (0x7)
29#define REG_FLASH (0x8)
30#define REG_I_CTRL (0x9)
31#define REG_ENABLE (0xA)
32#define REG_FLAG (0xB)
33#define REG_MAX (0xB)
34
35#define IVFM_FILTER_TIME_SHIFT (3)
36#define UVLO_EN_SHIFT (7)
37#define HYSTERSIS_SHIFT (5)
38#define IVM_D_TH_SHIFT (2)
39#define IVFM_ADJ_MODE_SHIFT (0)
40#define NTC_EVENT_LVL_SHIFT (5)
41#define NTC_TRIP_TH_SHIFT (2)
42#define NTC_BIAS_I_LVL_SHIFT (0)
43#define INDIC_RAMP_UP_TIME_SHIFT (3)
44#define INDIC_RAMP_DN_TIME_SHIFT (0)
45#define INDIC_N_BLANK_SHIFT (4)
46#define INDIC_PULSE_TIME_SHIFT (0)
47#define INDIC_N_PERIOD_SHIFT (0)
48#define TORCH_RAMP_UP_TIME_SHIFT (3)
49#define TORCH_RAMP_DN_TIME_SHIFT (0)
50#define STROBE_USUAGE_SHIFT (7)
51#define STROBE_PIN_POLARITY_SHIFT (6)
52#define TORCH_PIN_POLARITY_SHIFT (5)
53#define TX_PIN_POLARITY_SHIFT (4)
54#define TX_EVENT_LVL_SHIFT (3)
55#define IVFM_EN_SHIFT (2)
56#define NTC_MODE_SHIFT (1)
57#define INDIC_MODE_SHIFT (0)
58#define INDUCTOR_I_LIMIT_SHIFT (6)
59#define FLASH_RAMP_TIME_SHIFT (3)
60#define FLASH_TOUT_TIME_SHIFT (0)
61#define TORCH_I_SHIFT (4)
62#define FLASH_I_SHIFT (0)
63#define NTC_EN_SHIFT (7)
64#define TX_PIN_EN_SHIFT (6)
65#define STROBE_PIN_EN_SHIFT (5)
66#define TORCH_PIN_EN_SHIFT (4)
67#define PRECHG_MODE_EN_SHIFT (3)
68#define PASS_MODE_ONLY_EN_SHIFT (2)
69#define MODE_BITS_SHIFT (0)
70
71#define IVFM_FILTER_TIME_MASK (0x3)
72#define UVLO_EN_MASK (0x1)
73#define HYSTERSIS_MASK (0x3)
74#define IVM_D_TH_MASK (0x7)
75#define IVFM_ADJ_MODE_MASK (0x3)
76#define NTC_EVENT_LVL_MASK (0x1)
77#define NTC_TRIP_TH_MASK (0x7)
78#define NTC_BIAS_I_LVL_MASK (0x3)
79#define INDIC_RAMP_UP_TIME_MASK (0x7)
80#define INDIC_RAMP_DN_TIME_MASK (0x7)
81#define INDIC_N_BLANK_MASK (0x7)
82#define INDIC_PULSE_TIME_MASK (0x7)
83#define INDIC_N_PERIOD_MASK (0x7)
84#define TORCH_RAMP_UP_TIME_MASK (0x7)
85#define TORCH_RAMP_DN_TIME_MASK (0x7)
86#define STROBE_USUAGE_MASK (0x1)
87#define STROBE_PIN_POLARITY_MASK (0x1)
88#define TORCH_PIN_POLARITY_MASK (0x1)
89#define TX_PIN_POLARITY_MASK (0x1)
90#define TX_EVENT_LVL_MASK (0x1)
91#define IVFM_EN_MASK (0x1)
92#define NTC_MODE_MASK (0x1)
93#define INDIC_MODE_MASK (0x1)
94#define INDUCTOR_I_LIMIT_MASK (0x3)
95#define FLASH_RAMP_TIME_MASK (0x7)
96#define FLASH_TOUT_TIME_MASK (0x7)
97#define TORCH_I_MASK (0x7)
98#define FLASH_I_MASK (0xF)
99#define NTC_EN_MASK (0x1)
100#define TX_PIN_EN_MASK (0x1)
101#define STROBE_PIN_EN_MASK (0x1)
102#define TORCH_PIN_EN_MASK (0x1)
103#define PRECHG_MODE_EN_MASK (0x1)
104#define PASS_MODE_ONLY_EN_MASK (0x1)
105#define MODE_BITS_MASK (0x13)
106#define EX_PIN_CONTROL_MASK (0xF1)
107#define EX_PIN_ENABLE_MASK (0x70)
108
109enum lm3556_indic_pulse_time {
110 PULSE_TIME_0_MS = 0,
111 PULSE_TIME_32_MS,
112 PULSE_TIME_64_MS,
113 PULSE_TIME_92_MS,
114 PULSE_TIME_128_MS,
115 PULSE_TIME_160_MS,
116 PULSE_TIME_196_MS,
117 PULSE_TIME_224_MS,
118 PULSE_TIME_256_MS,
119 PULSE_TIME_288_MS,
120 PULSE_TIME_320_MS,
121 PULSE_TIME_352_MS,
122 PULSE_TIME_384_MS,
123 PULSE_TIME_416_MS,
124 PULSE_TIME_448_MS,
125 PULSE_TIME_480_MS,
126};
127
128enum lm3556_indic_n_blank {
129 INDIC_N_BLANK_0 = 0,
130 INDIC_N_BLANK_1,
131 INDIC_N_BLANK_2,
132 INDIC_N_BLANK_3,
133 INDIC_N_BLANK_4,
134 INDIC_N_BLANK_5,
135 INDIC_N_BLANK_6,
136 INDIC_N_BLANK_7,
137 INDIC_N_BLANK_8,
138 INDIC_N_BLANK_9,
139 INDIC_N_BLANK_10,
140 INDIC_N_BLANK_11,
141 INDIC_N_BLANK_12,
142 INDIC_N_BLANK_13,
143 INDIC_N_BLANK_14,
144 INDIC_N_BLANK_15,
145};
146
147enum lm3556_indic_period {
148 INDIC_PERIOD_0 = 0,
149 INDIC_PERIOD_1,
150 INDIC_PERIOD_2,
151 INDIC_PERIOD_3,
152 INDIC_PERIOD_4,
153 INDIC_PERIOD_5,
154 INDIC_PERIOD_6,
155 INDIC_PERIOD_7,
156};
157
158enum lm3556_mode {
159 MODES_STASNDBY = 0,
160 MODES_INDIC,
161 MODES_TORCH,
162 MODES_FLASH
163};
164
165#define INDIC_PATTERN_SIZE 4
166
167struct indicator {
168 u8 blinking;
169 u8 period_cnt;
170};
171
172struct lm3556_chip_data {
173 struct device *dev;
174
175 struct led_classdev cdev_flash;
176 struct led_classdev cdev_torch;
177 struct led_classdev cdev_indicator;
178
179 struct lm3556_platform_data *pdata;
180 struct regmap *regmap;
181 struct mutex lock;
182
183 unsigned int last_flag;
184};
185
186/* indicator pattern */
187static struct indicator indicator_pattern[INDIC_PATTERN_SIZE] = {
188 [0] = {(INDIC_N_BLANK_1 << INDIC_N_BLANK_SHIFT)
189 | PULSE_TIME_32_MS, INDIC_PERIOD_1},
190 [1] = {(INDIC_N_BLANK_15 << INDIC_N_BLANK_SHIFT)
191 | PULSE_TIME_32_MS, INDIC_PERIOD_2},
192 [2] = {(INDIC_N_BLANK_10 << INDIC_N_BLANK_SHIFT)
193 | PULSE_TIME_32_MS, INDIC_PERIOD_4},
194 [3] = {(INDIC_N_BLANK_5 << INDIC_N_BLANK_SHIFT)
195 | PULSE_TIME_32_MS, INDIC_PERIOD_7},
196};
197
198/* chip initialize */
199static int __devinit lm3556_chip_init(struct lm3556_chip_data *chip)
200{
201 unsigned int reg_val;
202 int ret;
203 struct lm3556_platform_data *pdata = chip->pdata;
204
205 /* set config register */
206 ret = regmap_read(chip->regmap, REG_CONF, &reg_val);
207 if (ret < 0) {
208 dev_err(chip->dev, "Failed to read REG_CONF Register\n");
209 goto out;
210 }
211
212 reg_val &= (~EX_PIN_CONTROL_MASK);
213 reg_val |= ((pdata->torch_pin_polarity & 0x01)
214 << TORCH_PIN_POLARITY_SHIFT);
215 reg_val |= ((pdata->strobe_usuage & 0x01) << STROBE_USUAGE_SHIFT);
216 reg_val |= ((pdata->strobe_pin_polarity & 0x01)
217 << STROBE_PIN_POLARITY_SHIFT);
218 reg_val |= ((pdata->tx_pin_polarity & 0x01) << TX_PIN_POLARITY_SHIFT);
219 reg_val |= ((pdata->indicator_mode & 0x01) << INDIC_MODE_SHIFT);
220
221 ret = regmap_write(chip->regmap, REG_CONF, reg_val);
222 if (ret < 0) {
223 dev_err(chip->dev, "Failed to write REG_CONF Regisgter\n");
224 goto out;
225 }
226
227 /* set enable register */
228 ret = regmap_read(chip->regmap, REG_ENABLE, &reg_val);
229 if (ret < 0) {
230 dev_err(chip->dev, "Failed to read REG_ENABLE Register\n");
231 goto out;
232 }
233
234 reg_val &= (~EX_PIN_ENABLE_MASK);
235 reg_val |= ((pdata->torch_pin_en & 0x01) << TORCH_PIN_EN_SHIFT);
236 reg_val |= ((pdata->strobe_pin_en & 0x01) << STROBE_PIN_EN_SHIFT);
237 reg_val |= ((pdata->tx_pin_en & 0x01) << TX_PIN_EN_SHIFT);
238
239 ret = regmap_write(chip->regmap, REG_ENABLE, reg_val);
240 if (ret < 0) {
241 dev_err(chip->dev, "Failed to write REG_ENABLE Regisgter\n");
242 goto out;
243 }
244
245out:
246 return ret;
247}
248
249/* chip control */
250static int lm3556_control(struct lm3556_chip_data *chip,
251 u8 brightness, enum lm3556_mode opmode)
252{
253 int ret;
254 struct lm3556_platform_data *pdata = chip->pdata;
255
256 ret = regmap_read(chip->regmap, REG_FLAG, &chip->last_flag);
257 if (ret < 0) {
258 dev_err(chip->dev, "Failed to read REG_FLAG Register\n");
259 goto out;
260 }
261
262 if (chip->last_flag)
263 dev_info(chip->dev, "Last FLAG is 0x%x\n", chip->last_flag);
264
265 /* brightness 0 means off-state */
266 if (!brightness)
267 opmode = MODES_STASNDBY;
268
269 switch (opmode) {
270 case MODES_TORCH:
271 ret = regmap_update_bits(chip->regmap, REG_I_CTRL,
272 TORCH_I_MASK << TORCH_I_SHIFT,
273 (brightness - 1) << TORCH_I_SHIFT);
274
275 if (pdata->torch_pin_en)
276 opmode |= (TORCH_PIN_EN_MASK << TORCH_PIN_EN_SHIFT);
277 break;
278
279 case MODES_FLASH:
280 ret = regmap_update_bits(chip->regmap, REG_I_CTRL,
281 FLASH_I_MASK << FLASH_I_SHIFT,
282 (brightness - 1) << FLASH_I_SHIFT);
283 break;
284
285 case MODES_INDIC:
286 ret = regmap_update_bits(chip->regmap, REG_I_CTRL,
287 TORCH_I_MASK << TORCH_I_SHIFT,
288 (brightness - 1) << TORCH_I_SHIFT);
289 break;
290
291 case MODES_STASNDBY:
292 if (pdata->torch_pin_en)
293 opmode |= (TORCH_PIN_EN_MASK << TORCH_PIN_EN_SHIFT);
294 break;
295
296 default:
297 return ret;
298 }
299 if (ret < 0) {
300 dev_err(chip->dev, "Failed to write REG_I_CTRL Register\n");
301 goto out;
302 }
303 ret = regmap_update_bits(chip->regmap, REG_ENABLE,
304 MODE_BITS_MASK << MODE_BITS_SHIFT,
305 opmode << MODE_BITS_SHIFT);
306
307out:
308 return ret;
309}
310
311/* torch */
312static void lm3556_torch_brightness_set(struct led_classdev *cdev,
313 enum led_brightness brightness)
314{
315 struct lm3556_chip_data *chip =
316 container_of(cdev, struct lm3556_chip_data, cdev_torch);
317
318 mutex_lock(&chip->lock);
319 lm3556_control(chip, brightness, MODES_TORCH);
320 mutex_unlock(&chip->lock);
321}
322
323/* flash */
324static void lm3556_strobe_brightness_set(struct led_classdev *cdev,
325 enum led_brightness brightness)
326{
327 struct lm3556_chip_data *chip =
328 container_of(cdev, struct lm3556_chip_data, cdev_flash);
329
330 mutex_lock(&chip->lock);
331 lm3556_control(chip, brightness, MODES_FLASH);
332 mutex_unlock(&chip->lock);
333}
334
335/* indicator */
336static void lm3556_indicator_brightness_set(struct led_classdev *cdev,
337 enum led_brightness brightness)
338{
339 struct lm3556_chip_data *chip =
340 container_of(cdev, struct lm3556_chip_data, cdev_indicator);
341
342 mutex_lock(&chip->lock);
343 lm3556_control(chip, brightness, MODES_INDIC);
344 mutex_unlock(&chip->lock);
345}
346
347/* indicator pattern */
348static ssize_t lm3556_indicator_pattern_store(struct device *dev,
349 struct device_attribute *devAttr,
350 const char *buf, size_t size)
351{
352 ssize_t ret;
353 struct led_classdev *led_cdev = dev_get_drvdata(dev);
354 struct lm3556_chip_data *chip =
355 container_of(led_cdev, struct lm3556_chip_data, cdev_indicator);
356 unsigned int state;
357
358 ret = kstrtouint(buf, 10, &state);
359 if (ret)
360 goto out;
361 if (state > INDIC_PATTERN_SIZE - 1)
362 state = INDIC_PATTERN_SIZE - 1;
363
364 ret = regmap_write(chip->regmap, REG_INDIC_BLINK,
365 indicator_pattern[state].blinking);
366 if (ret < 0) {
367 dev_err(chip->dev, "Failed to write REG_ENABLE Regisgter\n");
368 goto out;
369 }
370
371 ret = regmap_write(chip->regmap, REG_INDIC_PERIOD,
372 indicator_pattern[state].period_cnt);
373 if (ret < 0) {
374 dev_err(chip->dev, "Failed to write REG_ENABLE Regisgter\n");
375 goto out;
376 }
377
378 return size;
379out:
380 dev_err(chip->dev, "Indicator pattern doesn't saved\n");
381 return size;
382}
383
384static DEVICE_ATTR(pattern, 0666, NULL, lm3556_indicator_pattern_store);
385
386static const struct regmap_config lm3556_regmap = {
387 .reg_bits = 8,
388 .val_bits = 8,
389 .max_register = REG_MAX,
390};
391
392/* module initialize */
393static int __devinit lm3556_probe(struct i2c_client *client,
394 const struct i2c_device_id *id)
395{
396 struct lm3556_platform_data *pdata = client->dev.platform_data;
397 struct lm3556_chip_data *chip;
398
399 int err;
400
401 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
402 dev_err(&client->dev, "i2c functionality check fail.\n");
403 return -EOPNOTSUPP;
404 }
405
406 if (pdata == NULL) {
407 dev_err(&client->dev, "Needs Platform Data.\n");
408 return -ENODATA;
409 }
410
411 chip =
412 devm_kzalloc(&client->dev, sizeof(struct lm3556_chip_data),
413 GFP_KERNEL);
414 if (!chip)
415 return -ENOMEM;
416
417 chip->dev = &client->dev;
418 chip->pdata = pdata;
419
420 chip->regmap = devm_regmap_init_i2c(client, &lm3556_regmap);
421 if (IS_ERR(chip->regmap)) {
422 err = PTR_ERR(chip->regmap);
423 dev_err(&client->dev, "Failed to allocate register map: %d\n",
424 err);
425 return err;
426 }
427
428 mutex_init(&chip->lock);
429 i2c_set_clientdata(client, chip);
430
431 err = lm3556_chip_init(chip);
432 if (err < 0)
433 goto err_out;
434
435 /* flash */
436 chip->cdev_flash.name = "flash";
437 chip->cdev_flash.max_brightness = 16;
438 chip->cdev_flash.brightness_set = lm3556_strobe_brightness_set;
439 err = led_classdev_register((struct device *)
440 &client->dev, &chip->cdev_flash);
441 if (err < 0)
442 goto err_out;
443 /* torch */
444 chip->cdev_torch.name = "torch";
445 chip->cdev_torch.max_brightness = 8;
446 chip->cdev_torch.brightness_set = lm3556_torch_brightness_set;
447 err = led_classdev_register((struct device *)
448 &client->dev, &chip->cdev_torch);
449 if (err < 0)
450 goto err_create_torch_file;
451 /* indicator */
452 chip->cdev_indicator.name = "indicator";
453 chip->cdev_indicator.max_brightness = 8;
454 chip->cdev_indicator.brightness_set = lm3556_indicator_brightness_set;
455 err = led_classdev_register((struct device *)
456 &client->dev, &chip->cdev_indicator);
457 if (err < 0)
458 goto err_create_indicator_file;
459
460 err = device_create_file(chip->cdev_indicator.dev, &dev_attr_pattern);
461 if (err < 0)
462 goto err_create_pattern_file;
463
464 dev_info(&client->dev, "LM3556 is initialized\n");
465 return 0;
466
467err_create_pattern_file:
468 led_classdev_unregister(&chip->cdev_indicator);
469err_create_indicator_file:
470 led_classdev_unregister(&chip->cdev_torch);
471err_create_torch_file:
472 led_classdev_unregister(&chip->cdev_flash);
473err_out:
474 return err;
475}
476
477static int __devexit lm3556_remove(struct i2c_client *client)
478{
479 struct lm3556_chip_data *chip = i2c_get_clientdata(client);
480
481 device_remove_file(chip->cdev_indicator.dev, &dev_attr_pattern);
482 led_classdev_unregister(&chip->cdev_indicator);
483 led_classdev_unregister(&chip->cdev_torch);
484 led_classdev_unregister(&chip->cdev_flash);
485 regmap_write(chip->regmap, REG_ENABLE, 0);
486 return 0;
487}
488
489static const struct i2c_device_id lm3556_id[] = {
490 {LM3556_NAME, 0},
491 {}
492};
493
494MODULE_DEVICE_TABLE(i2c, lm3556_id);
495
496static struct i2c_driver lm3556_i2c_driver = {
497 .driver = {
498 .name = LM3556_NAME,
499 .owner = THIS_MODULE,
500 .pm = NULL,
501 },
502 .probe = lm3556_probe,
503 .remove = __devexit_p(lm3556_remove),
504 .id_table = lm3556_id,
505};
506
507module_i2c_driver(lm3556_i2c_driver);
508
509MODULE_DESCRIPTION("Texas Instruments Flash Lighting driver for LM3556");
510MODULE_AUTHOR("Daniel Jeong <daniel.jeong@ti.com>");
511MODULE_AUTHOR("G.Shark Jeong <gshark.jeong@gmail.com>");
512MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
new file mode 100644
index 000000000000..065ec015d67a
--- /dev/null
+++ b/drivers/leds/leds-lm355x.c
@@ -0,0 +1,572 @@
1/*
2* Simple driver for Texas Instruments LM355x LED Flash driver chip
3* Copyright (C) 2012 Texas Instruments
4*
5* This program is free software; you can redistribute it and/or modify
6* it under the terms of the GNU General Public License version 2 as
7* published by the Free Software Foundation.
8*/
9
10#include <linux/module.h>
11#include <linux/delay.h>
12#include <linux/i2c.h>
13#include <linux/gpio.h>
14#include <linux/leds.h>
15#include <linux/slab.h>
16#include <linux/platform_device.h>
17#include <linux/fs.h>
18#include <linux/regmap.h>
19#include <linux/workqueue.h>
20#include <linux/platform_data/leds-lm355x.h>
21
22enum lm355x_type {
23 CHIP_LM3554 = 0,
24 CHIP_LM3556,
25};
26
27enum lm355x_regs {
28 REG_FLAG = 0,
29 REG_TORCH_CFG,
30 REG_TORCH_CTRL,
31 REG_STROBE_CFG,
32 REG_FLASH_CTRL,
33 REG_INDI_CFG,
34 REG_INDI_CTRL,
35 REG_OPMODE,
36 REG_MAX,
37};
38
39/* operation mode */
40enum lm355x_mode {
41 MODE_SHDN = 0,
42 MODE_INDIC,
43 MODE_TORCH,
44 MODE_FLASH
45};
46
47/* register map info. */
48struct lm355x_reg_data {
49 u8 regno;
50 u8 mask;
51 u8 shift;
52};
53
54struct lm355x_chip_data {
55 struct device *dev;
56 enum lm355x_type type;
57
58 struct led_classdev cdev_flash;
59 struct led_classdev cdev_torch;
60 struct led_classdev cdev_indicator;
61
62 struct work_struct work_flash;
63 struct work_struct work_torch;
64 struct work_struct work_indicator;
65
66 u8 br_flash;
67 u8 br_torch;
68 u8 br_indicator;
69
70 struct lm355x_platform_data *pdata;
71 struct regmap *regmap;
72 struct mutex lock;
73
74 unsigned int last_flag;
75 struct lm355x_reg_data *regs;
76};
77
78/* specific indicator function for lm3556 */
79enum lm3556_indic_pulse_time {
80 PULSE_TIME_0_MS = 0,
81 PULSE_TIME_32_MS,
82 PULSE_TIME_64_MS,
83 PULSE_TIME_92_MS,
84 PULSE_TIME_128_MS,
85 PULSE_TIME_160_MS,
86 PULSE_TIME_196_MS,
87 PULSE_TIME_224_MS,
88 PULSE_TIME_256_MS,
89 PULSE_TIME_288_MS,
90 PULSE_TIME_320_MS,
91 PULSE_TIME_352_MS,
92 PULSE_TIME_384_MS,
93 PULSE_TIME_416_MS,
94 PULSE_TIME_448_MS,
95 PULSE_TIME_480_MS,
96};
97
98enum lm3556_indic_n_blank {
99 INDIC_N_BLANK_0 = 0,
100 INDIC_N_BLANK_1,
101 INDIC_N_BLANK_2,
102 INDIC_N_BLANK_3,
103 INDIC_N_BLANK_4,
104 INDIC_N_BLANK_5,
105 INDIC_N_BLANK_6,
106 INDIC_N_BLANK_7,
107 INDIC_N_BLANK_8,
108 INDIC_N_BLANK_9,
109 INDIC_N_BLANK_10,
110 INDIC_N_BLANK_11,
111 INDIC_N_BLANK_12,
112 INDIC_N_BLANK_13,
113 INDIC_N_BLANK_14,
114 INDIC_N_BLANK_15,
115};
116
117enum lm3556_indic_period {
118 INDIC_PERIOD_0 = 0,
119 INDIC_PERIOD_1,
120 INDIC_PERIOD_2,
121 INDIC_PERIOD_3,
122 INDIC_PERIOD_4,
123 INDIC_PERIOD_5,
124 INDIC_PERIOD_6,
125 INDIC_PERIOD_7,
126};
127
128#define INDIC_PATTERN_SIZE 4
129
130struct indicator {
131 u8 blinking;
132 u8 period_cnt;
133};
134
135/* indicator pattern data only for lm3556 */
136static struct indicator indicator_pattern[INDIC_PATTERN_SIZE] = {
137 [0] = {(INDIC_N_BLANK_1 << 4) | PULSE_TIME_32_MS, INDIC_PERIOD_1},
138 [1] = {(INDIC_N_BLANK_15 << 4) | PULSE_TIME_32_MS, INDIC_PERIOD_2},
139 [2] = {(INDIC_N_BLANK_10 << 4) | PULSE_TIME_32_MS, INDIC_PERIOD_4},
140 [3] = {(INDIC_N_BLANK_5 << 4) | PULSE_TIME_32_MS, INDIC_PERIOD_7},
141};
142
143static struct lm355x_reg_data lm3554_regs[REG_MAX] = {
144 [REG_FLAG] = {0xD0, 0xBF, 0},
145 [REG_TORCH_CFG] = {0xE0, 0x80, 7},
146 [REG_TORCH_CTRL] = {0xA0, 0x38, 3},
147 [REG_STROBE_CFG] = {0xE0, 0x04, 2},
148 [REG_FLASH_CTRL] = {0xB0, 0x78, 3},
149 [REG_INDI_CFG] = {0xE0, 0x08, 3},
150 [REG_INDI_CTRL] = {0xA0, 0xC0, 6},
151 [REG_OPMODE] = {0xA0, 0x03, 0},
152};
153
154static struct lm355x_reg_data lm3556_regs[REG_MAX] = {
155 [REG_FLAG] = {0x0B, 0xFF, 0},
156 [REG_TORCH_CFG] = {0x0A, 0x10, 4},
157 [REG_TORCH_CTRL] = {0x09, 0x70, 4},
158 [REG_STROBE_CFG] = {0x0A, 0x20, 5},
159 [REG_FLASH_CTRL] = {0x09, 0x0F, 0},
160 [REG_INDI_CFG] = {0xFF, 0xFF, 0},
161 [REG_INDI_CTRL] = {0x09, 0x70, 4},
162 [REG_OPMODE] = {0x0A, 0x03, 0},
163};
164
165static char lm355x_name[][I2C_NAME_SIZE] = {
166 [CHIP_LM3554] = LM3554_NAME,
167 [CHIP_LM3556] = LM3556_NAME,
168};
169
170/* chip initialize */
171static int __devinit lm355x_chip_init(struct lm355x_chip_data *chip)
172{
173 int ret;
174 unsigned int reg_val;
175 struct lm355x_platform_data *pdata = chip->pdata;
176
177 /* input and output pins configuration */
178 switch (chip->type) {
179 case CHIP_LM3554:
180 reg_val = pdata->pin_tx2 | pdata->ntc_pin;
181 ret = regmap_update_bits(chip->regmap, 0xE0, 0x28, reg_val);
182 if (ret < 0)
183 goto out;
184 reg_val = pdata->pass_mode;
185 ret = regmap_update_bits(chip->regmap, 0xA0, 0x04, reg_val);
186 if (ret < 0)
187 goto out;
188 break;
189
190 case CHIP_LM3556:
191 reg_val = pdata->pin_tx2 | pdata->ntc_pin | pdata->pass_mode;
192 ret = regmap_update_bits(chip->regmap, 0x0A, 0xC4, reg_val);
193 if (ret < 0)
194 goto out;
195 break;
196 default:
197 return -ENODATA;
198 }
199
200 return ret;
201out:
202 dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
203 return ret;
204}
205
206/* chip control */
207static void lm355x_control(struct lm355x_chip_data *chip,
208 u8 brightness, enum lm355x_mode opmode)
209{
210 int ret;
211 unsigned int reg_val;
212 struct lm355x_platform_data *pdata = chip->pdata;
213 struct lm355x_reg_data *preg = chip->regs;
214
215 ret = regmap_read(chip->regmap, preg[REG_FLAG].regno, &chip->last_flag);
216 if (ret < 0)
217 goto out;
218 if (chip->last_flag & preg[REG_FLAG].mask)
219 dev_info(chip->dev, "%s Last FLAG is 0x%x\n",
220 lm355x_name[chip->type],
221 chip->last_flag & preg[REG_FLAG].mask);
222 /* brightness 0 means shutdown */
223 if (!brightness)
224 opmode = MODE_SHDN;
225
226 switch (opmode) {
227 case MODE_TORCH:
228 ret =
229 regmap_update_bits(chip->regmap, preg[REG_TORCH_CTRL].regno,
230 preg[REG_TORCH_CTRL].mask,
231 (brightness - 1)
232 << preg[REG_TORCH_CTRL].shift);
233 if (ret < 0)
234 goto out;
235
236 if (pdata->pin_tx1 != LM355x_PIN_TORCH_DISABLE) {
237 ret =
238 regmap_update_bits(chip->regmap,
239 preg[REG_TORCH_CFG].regno,
240 preg[REG_TORCH_CFG].mask,
241 0x01 <<
242 preg[REG_TORCH_CFG].shift);
243 if (ret < 0)
244 goto out;
245 opmode = MODE_SHDN;
246 dev_info(chip->dev,
247 "torch brt is set - ext. torch pin mode\n");
248 }
249 break;
250
251 case MODE_FLASH:
252
253 ret =
254 regmap_update_bits(chip->regmap, preg[REG_FLASH_CTRL].regno,
255 preg[REG_FLASH_CTRL].mask,
256 (brightness - 1)
257 << preg[REG_FLASH_CTRL].shift);
258 if (ret < 0)
259 goto out;
260
261 if (pdata->pin_strobe != LM355x_PIN_STROBE_DISABLE) {
262 if (chip->type == CHIP_LM3554)
263 reg_val = 0x00;
264 else
265 reg_val = 0x01;
266 ret =
267 regmap_update_bits(chip->regmap,
268 preg[REG_STROBE_CFG].regno,
269 preg[REG_STROBE_CFG].mask,
270 reg_val <<
271 preg[REG_STROBE_CFG].shift);
272 if (ret < 0)
273 goto out;
274 opmode = MODE_SHDN;
275 dev_info(chip->dev,
276 "flash brt is set - ext. strobe pin mode\n");
277 }
278 break;
279
280 case MODE_INDIC:
281 ret =
282 regmap_update_bits(chip->regmap, preg[REG_INDI_CTRL].regno,
283 preg[REG_INDI_CTRL].mask,
284 (brightness - 1)
285 << preg[REG_INDI_CTRL].shift);
286 if (ret < 0)
287 goto out;
288
289 if (pdata->pin_tx2 != LM355x_PIN_TX_DISABLE) {
290 ret =
291 regmap_update_bits(chip->regmap,
292 preg[REG_INDI_CFG].regno,
293 preg[REG_INDI_CFG].mask,
294 0x01 <<
295 preg[REG_INDI_CFG].shift);
296 if (ret < 0)
297 goto out;
298 opmode = MODE_SHDN;
299 }
300 break;
301 case MODE_SHDN:
302 break;
303 default:
304 return;
305 }
306 /* operation mode control */
307 ret = regmap_update_bits(chip->regmap, preg[REG_OPMODE].regno,
308 preg[REG_OPMODE].mask,
309 opmode << preg[REG_OPMODE].shift);
310 if (ret < 0)
311 goto out;
312 return;
313out:
314 dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
315 return;
316}
317
318/* torch */
319static void lm355x_deferred_torch_brightness_set(struct work_struct *work)
320{
321 struct lm355x_chip_data *chip =
322 container_of(work, struct lm355x_chip_data, work_torch);
323
324 mutex_lock(&chip->lock);
325 lm355x_control(chip, chip->br_torch, MODE_TORCH);
326 mutex_unlock(&chip->lock);
327}
328
329static void lm355x_torch_brightness_set(struct led_classdev *cdev,
330 enum led_brightness brightness)
331{
332 struct lm355x_chip_data *chip =
333 container_of(cdev, struct lm355x_chip_data, cdev_torch);
334
335 chip->br_torch = brightness;
336 schedule_work(&chip->work_torch);
337}
338
339/* flash */
340static void lm355x_deferred_strobe_brightness_set(struct work_struct *work)
341{
342 struct lm355x_chip_data *chip =
343 container_of(work, struct lm355x_chip_data, work_flash);
344
345 mutex_lock(&chip->lock);
346 lm355x_control(chip, chip->br_flash, MODE_FLASH);
347 mutex_unlock(&chip->lock);
348}
349
350static void lm355x_strobe_brightness_set(struct led_classdev *cdev,
351 enum led_brightness brightness)
352{
353 struct lm355x_chip_data *chip =
354 container_of(cdev, struct lm355x_chip_data, cdev_flash);
355
356 chip->br_flash = brightness;
357 schedule_work(&chip->work_flash);
358}
359
360/* indicator */
361static void lm355x_deferred_indicator_brightness_set(struct work_struct *work)
362{
363 struct lm355x_chip_data *chip =
364 container_of(work, struct lm355x_chip_data, work_indicator);
365
366 mutex_lock(&chip->lock);
367 lm355x_control(chip, chip->br_indicator, MODE_INDIC);
368 mutex_unlock(&chip->lock);
369}
370
371static void lm355x_indicator_brightness_set(struct led_classdev *cdev,
372 enum led_brightness brightness)
373{
374 struct lm355x_chip_data *chip =
375 container_of(cdev, struct lm355x_chip_data, cdev_indicator);
376
377 chip->br_indicator = brightness;
378 schedule_work(&chip->work_indicator);
379}
380
381/* indicator pattern only for lm3556*/
382static ssize_t lm3556_indicator_pattern_store(struct device *dev,
383 struct device_attribute *devAttr,
384 const char *buf, size_t size)
385{
386 ssize_t ret;
387 struct led_classdev *led_cdev = dev_get_drvdata(dev);
388 struct lm355x_chip_data *chip =
389 container_of(led_cdev, struct lm355x_chip_data, cdev_indicator);
390 unsigned int state;
391
392 ret = kstrtouint(buf, 10, &state);
393 if (ret)
394 goto out;
395 if (state > INDIC_PATTERN_SIZE - 1)
396 state = INDIC_PATTERN_SIZE - 1;
397
398 ret = regmap_write(chip->regmap, 0x04,
399 indicator_pattern[state].blinking);
400 if (ret < 0)
401 goto out;
402
403 ret = regmap_write(chip->regmap, 0x05,
404 indicator_pattern[state].period_cnt);
405 if (ret < 0)
406 goto out;
407
408 return size;
409out:
410 dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
411 return size;
412}
413
414static DEVICE_ATTR(pattern, 0666, NULL, lm3556_indicator_pattern_store);
415
416static const struct regmap_config lm355x_regmap = {
417 .reg_bits = 8,
418 .val_bits = 8,
419 .max_register = 0xFF,
420};
421
422/* module initialize */
423static int __devinit lm355x_probe(struct i2c_client *client,
424 const struct i2c_device_id *id)
425{
426 struct lm355x_platform_data *pdata = client->dev.platform_data;
427 struct lm355x_chip_data *chip;
428
429 int err;
430
431 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
432 dev_err(&client->dev, "i2c functionality check fail.\n");
433 return -EOPNOTSUPP;
434 }
435
436 if (pdata == NULL) {
437 dev_err(&client->dev, "needs Platform Data.\n");
438 return -ENODATA;
439 }
440
441 chip = devm_kzalloc(&client->dev,
442 sizeof(struct lm355x_chip_data), GFP_KERNEL);
443 if (!chip)
444 return -ENOMEM;
445
446 chip->dev = &client->dev;
447 chip->type = id->driver_data;
448 switch (id->driver_data) {
449 case CHIP_LM3554:
450 chip->regs = lm3554_regs;
451 break;
452 case CHIP_LM3556:
453 chip->regs = lm3556_regs;
454 break;
455 default:
456 return -ENOSYS;
457 }
458 chip->pdata = pdata;
459
460 chip->regmap = devm_regmap_init_i2c(client, &lm355x_regmap);
461 if (IS_ERR(chip->regmap)) {
462 err = PTR_ERR(chip->regmap);
463 dev_err(&client->dev,
464 "Failed to allocate register map: %d\n", err);
465 return err;
466 }
467
468 mutex_init(&chip->lock);
469 i2c_set_clientdata(client, chip);
470
471 err = lm355x_chip_init(chip);
472 if (err < 0)
473 goto err_out;
474
475 /* flash */
476 INIT_WORK(&chip->work_flash, lm355x_deferred_strobe_brightness_set);
477 chip->cdev_flash.name = "flash";
478 chip->cdev_flash.max_brightness = 16;
479 chip->cdev_flash.brightness_set = lm355x_strobe_brightness_set;
480 err = led_classdev_register((struct device *)
481 &client->dev, &chip->cdev_flash);
482 if (err < 0)
483 goto err_out;
484 /* torch */
485 INIT_WORK(&chip->work_torch, lm355x_deferred_torch_brightness_set);
486 chip->cdev_torch.name = "torch";
487 chip->cdev_torch.max_brightness = 8;
488 chip->cdev_torch.brightness_set = lm355x_torch_brightness_set;
489 err = led_classdev_register((struct device *)
490 &client->dev, &chip->cdev_torch);
491 if (err < 0)
492 goto err_create_torch_file;
493 /* indicator */
494 INIT_WORK(&chip->work_indicator,
495 lm355x_deferred_indicator_brightness_set);
496 chip->cdev_indicator.name = "indicator";
497 if (id->driver_data == CHIP_LM3554)
498 chip->cdev_indicator.max_brightness = 4;
499 else
500 chip->cdev_indicator.max_brightness = 8;
501 chip->cdev_indicator.brightness_set = lm355x_indicator_brightness_set;
502 err = led_classdev_register((struct device *)
503 &client->dev, &chip->cdev_indicator);
504 if (err < 0)
505 goto err_create_indicator_file;
506 /* indicator pattern control only for LM3554 */
507 if (id->driver_data == CHIP_LM3556) {
508 err =
509 device_create_file(chip->cdev_indicator.dev,
510 &dev_attr_pattern);
511 if (err < 0)
512 goto err_create_pattern_file;
513 }
514
515 dev_info(&client->dev, "%s is initialized\n",
516 lm355x_name[id->driver_data]);
517 return 0;
518
519err_create_pattern_file:
520 led_classdev_unregister(&chip->cdev_indicator);
521err_create_indicator_file:
522 led_classdev_unregister(&chip->cdev_torch);
523err_create_torch_file:
524 led_classdev_unregister(&chip->cdev_flash);
525err_out:
526 return err;
527}
528
529static int __devexit lm355x_remove(struct i2c_client *client)
530{
531 struct lm355x_chip_data *chip = i2c_get_clientdata(client);
532 struct lm355x_reg_data *preg = chip->regs;
533
534 regmap_write(chip->regmap, preg[REG_OPMODE].regno, 0);
535 if (chip->type == CHIP_LM3556)
536 device_remove_file(chip->cdev_indicator.dev, &dev_attr_pattern);
537 led_classdev_unregister(&chip->cdev_indicator);
538 flush_work(&chip->work_indicator);
539 led_classdev_unregister(&chip->cdev_torch);
540 flush_work(&chip->work_torch);
541 led_classdev_unregister(&chip->cdev_flash);
542 flush_work(&chip->work_flash);
543 dev_info(&client->dev, "%s is removed\n", lm355x_name[chip->type]);
544
545 return 0;
546}
547
548static const struct i2c_device_id lm355x_id[] = {
549 {LM3554_NAME, CHIP_LM3554},
550 {LM3556_NAME, CHIP_LM3556},
551 {}
552};
553
554MODULE_DEVICE_TABLE(i2c, lm355x_id);
555
556static struct i2c_driver lm355x_i2c_driver = {
557 .driver = {
558 .name = LM355x_NAME,
559 .owner = THIS_MODULE,
560 .pm = NULL,
561 },
562 .probe = lm355x_probe,
563 .remove = __devexit_p(lm355x_remove),
564 .id_table = lm355x_id,
565};
566
567module_i2c_driver(lm355x_i2c_driver);
568
569MODULE_DESCRIPTION("Texas Instruments Flash Lighting driver for LM355x");
570MODULE_AUTHOR("Daniel Jeong <daniel.jeong@ti.com>");
571MODULE_AUTHOR("G.Shark Jeong <gshark.jeong@gmail.com>");
572MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c
new file mode 100644
index 000000000000..3285006e9888
--- /dev/null
+++ b/drivers/leds/leds-lm3642.c
@@ -0,0 +1,462 @@
1/*
2* Simple driver for Texas Instruments LM3642 LED Flash driver chip
3* Copyright (C) 2012 Texas Instruments
4*
5* This program is free software; you can redistribute it and/or modify
6* it under the terms of the GNU General Public License version 2 as
7* published by the Free Software Foundation.
8*
9*/
10#include <linux/module.h>
11#include <linux/delay.h>
12#include <linux/i2c.h>
13#include <linux/leds.h>
14#include <linux/slab.h>
15#include <linux/platform_device.h>
16#include <linux/fs.h>
17#include <linux/regmap.h>
18#include <linux/workqueue.h>
19#include <linux/platform_data/leds-lm3642.h>
20
21#define REG_FILT_TIME (0x0)
22#define REG_IVFM_MODE (0x1)
23#define REG_TORCH_TIME (0x6)
24#define REG_FLASH (0x8)
25#define REG_I_CTRL (0x9)
26#define REG_ENABLE (0xA)
27#define REG_FLAG (0xB)
28#define REG_MAX (0xB)
29
30#define UVLO_EN_SHIFT (7)
31#define IVM_D_TH_SHIFT (2)
32#define TORCH_RAMP_UP_TIME_SHIFT (3)
33#define TORCH_RAMP_DN_TIME_SHIFT (0)
34#define INDUCTOR_I_LIMIT_SHIFT (6)
35#define FLASH_RAMP_TIME_SHIFT (3)
36#define FLASH_TOUT_TIME_SHIFT (0)
37#define TORCH_I_SHIFT (4)
38#define FLASH_I_SHIFT (0)
39#define IVFM_SHIFT (7)
40#define TX_PIN_EN_SHIFT (6)
41#define STROBE_PIN_EN_SHIFT (5)
42#define TORCH_PIN_EN_SHIFT (4)
43#define MODE_BITS_SHIFT (0)
44
45#define UVLO_EN_MASK (0x1)
46#define IVM_D_TH_MASK (0x7)
47#define TORCH_RAMP_UP_TIME_MASK (0x7)
48#define TORCH_RAMP_DN_TIME_MASK (0x7)
49#define INDUCTOR_I_LIMIT_MASK (0x1)
50#define FLASH_RAMP_TIME_MASK (0x7)
51#define FLASH_TOUT_TIME_MASK (0x7)
52#define TORCH_I_MASK (0x7)
53#define FLASH_I_MASK (0xF)
54#define IVFM_MASK (0x1)
55#define TX_PIN_EN_MASK (0x1)
56#define STROBE_PIN_EN_MASK (0x1)
57#define TORCH_PIN_EN_MASK (0x1)
58#define MODE_BITS_MASK (0x73)
59#define EX_PIN_CONTROL_MASK (0x71)
60#define EX_PIN_ENABLE_MASK (0x70)
61
62enum lm3642_mode {
63 MODES_STASNDBY = 0,
64 MODES_INDIC,
65 MODES_TORCH,
66 MODES_FLASH
67};
68
69struct lm3642_chip_data {
70 struct device *dev;
71
72 struct led_classdev cdev_flash;
73 struct led_classdev cdev_torch;
74 struct led_classdev cdev_indicator;
75
76 struct work_struct work_flash;
77 struct work_struct work_torch;
78 struct work_struct work_indicator;
79
80 u8 br_flash;
81 u8 br_torch;
82 u8 br_indicator;
83
84 enum lm3642_torch_pin_enable torch_pin;
85 enum lm3642_strobe_pin_enable strobe_pin;
86 enum lm3642_tx_pin_enable tx_pin;
87
88 struct lm3642_platform_data *pdata;
89 struct regmap *regmap;
90 struct mutex lock;
91
92 unsigned int last_flag;
93};
94
95/* chip initialize */
96static int __devinit lm3642_chip_init(struct lm3642_chip_data *chip)
97{
98 int ret;
99 struct lm3642_platform_data *pdata = chip->pdata;
100
101 /* set enable register */
102 ret = regmap_update_bits(chip->regmap, REG_ENABLE, EX_PIN_ENABLE_MASK,
103 pdata->tx_pin);
104 if (ret < 0)
105 dev_err(chip->dev, "Failed to update REG_ENABLE Register\n");
106 return ret;
107}
108
109/* chip control */
110static int lm3642_control(struct lm3642_chip_data *chip,
111 u8 brightness, enum lm3642_mode opmode)
112{
113 int ret;
114
115 ret = regmap_read(chip->regmap, REG_FLAG, &chip->last_flag);
116 if (ret < 0) {
117 dev_err(chip->dev, "Failed to read REG_FLAG Register\n");
118 goto out;
119 }
120
121 if (chip->last_flag)
122 dev_info(chip->dev, "Last FLAG is 0x%x\n", chip->last_flag);
123
124 /* brightness 0 means off-state */
125 if (!brightness)
126 opmode = MODES_STASNDBY;
127
128 switch (opmode) {
129 case MODES_TORCH:
130 ret = regmap_update_bits(chip->regmap, REG_I_CTRL,
131 TORCH_I_MASK << TORCH_I_SHIFT,
132 (brightness - 1) << TORCH_I_SHIFT);
133
134 if (chip->torch_pin)
135 opmode |= (TORCH_PIN_EN_MASK << TORCH_PIN_EN_SHIFT);
136 break;
137
138 case MODES_FLASH:
139 ret = regmap_update_bits(chip->regmap, REG_I_CTRL,
140 FLASH_I_MASK << FLASH_I_SHIFT,
141 (brightness - 1) << FLASH_I_SHIFT);
142
143 if (chip->strobe_pin)
144 opmode |= (STROBE_PIN_EN_MASK << STROBE_PIN_EN_SHIFT);
145 break;
146
147 case MODES_INDIC:
148 ret = regmap_update_bits(chip->regmap, REG_I_CTRL,
149 TORCH_I_MASK << TORCH_I_SHIFT,
150 (brightness - 1) << TORCH_I_SHIFT);
151 break;
152
153 case MODES_STASNDBY:
154
155 break;
156
157 default:
158 return ret;
159 }
160 if (ret < 0) {
161 dev_err(chip->dev, "Failed to write REG_I_CTRL Register\n");
162 goto out;
163 }
164
165 if (chip->tx_pin)
166 opmode |= (TX_PIN_EN_MASK << TX_PIN_EN_SHIFT);
167
168 ret = regmap_update_bits(chip->regmap, REG_ENABLE,
169 MODE_BITS_MASK << MODE_BITS_SHIFT,
170 opmode << MODE_BITS_SHIFT);
171out:
172 return ret;
173}
174
175/* torch */
176
177/* torch pin config for lm3642*/
178static ssize_t lm3642_torch_pin_store(struct device *dev,
179 struct device_attribute *devAttr,
180 const char *buf, size_t size)
181{
182 ssize_t ret;
183 struct led_classdev *led_cdev = dev_get_drvdata(dev);
184 struct lm3642_chip_data *chip =
185 container_of(led_cdev, struct lm3642_chip_data, cdev_indicator);
186 unsigned int state;
187
188 ret = kstrtouint(buf, 10, &state);
189 if (ret)
190 goto out_strtoint;
191 if (state != 0)
192 state = 0x01 << TORCH_PIN_EN_SHIFT;
193
194 chip->torch_pin = state;
195 ret = regmap_update_bits(chip->regmap, REG_ENABLE,
196 TORCH_PIN_EN_MASK << TORCH_PIN_EN_SHIFT,
197 state);
198 if (ret < 0)
199 goto out;
200
201 return size;
202out:
203 dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
204 return size;
205out_strtoint:
206 dev_err(chip->dev, "%s: fail to change str to int\n", __func__);
207 return size;
208}
209
210static DEVICE_ATTR(torch_pin, 0666, NULL, lm3642_torch_pin_store);
211
212static void lm3642_deferred_torch_brightness_set(struct work_struct *work)
213{
214 struct lm3642_chip_data *chip =
215 container_of(work, struct lm3642_chip_data, work_torch);
216
217 mutex_lock(&chip->lock);
218 lm3642_control(chip, chip->br_torch, MODES_TORCH);
219 mutex_unlock(&chip->lock);
220}
221
222static void lm3642_torch_brightness_set(struct led_classdev *cdev,
223 enum led_brightness brightness)
224{
225 struct lm3642_chip_data *chip =
226 container_of(cdev, struct lm3642_chip_data, cdev_torch);
227
228 chip->br_torch = brightness;
229 schedule_work(&chip->work_torch);
230}
231
232/* flash */
233
234/* strobe pin config for lm3642*/
235static ssize_t lm3642_strobe_pin_store(struct device *dev,
236 struct device_attribute *devAttr,
237 const char *buf, size_t size)
238{
239 ssize_t ret;
240 struct led_classdev *led_cdev = dev_get_drvdata(dev);
241 struct lm3642_chip_data *chip =
242 container_of(led_cdev, struct lm3642_chip_data, cdev_indicator);
243 unsigned int state;
244
245 ret = kstrtouint(buf, 10, &state);
246 if (ret)
247 goto out_strtoint;
248 if (state != 0)
249 state = 0x01 << STROBE_PIN_EN_SHIFT;
250
251 chip->strobe_pin = state;
252 ret = regmap_update_bits(chip->regmap, REG_ENABLE,
253 STROBE_PIN_EN_MASK << STROBE_PIN_EN_SHIFT,
254 state);
255 if (ret < 0)
256 goto out;
257
258 return size;
259out:
260 dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
261 return size;
262out_strtoint:
263 dev_err(chip->dev, "%s: fail to change str to int\n", __func__);
264 return size;
265}
266
267static DEVICE_ATTR(strobe_pin, 0666, NULL, lm3642_strobe_pin_store);
268
269static void lm3642_deferred_strobe_brightness_set(struct work_struct *work)
270{
271 struct lm3642_chip_data *chip =
272 container_of(work, struct lm3642_chip_data, work_flash);
273
274 mutex_lock(&chip->lock);
275 lm3642_control(chip, chip->br_flash, MODES_FLASH);
276 mutex_unlock(&chip->lock);
277}
278
279static void lm3642_strobe_brightness_set(struct led_classdev *cdev,
280 enum led_brightness brightness)
281{
282 struct lm3642_chip_data *chip =
283 container_of(cdev, struct lm3642_chip_data, cdev_flash);
284
285 chip->br_flash = brightness;
286 schedule_work(&chip->work_flash);
287}
288
289/* indicator */
290static void lm3642_deferred_indicator_brightness_set(struct work_struct *work)
291{
292 struct lm3642_chip_data *chip =
293 container_of(work, struct lm3642_chip_data, work_indicator);
294
295 mutex_lock(&chip->lock);
296 lm3642_control(chip, chip->br_indicator, MODES_INDIC);
297 mutex_unlock(&chip->lock);
298}
299
300static void lm3642_indicator_brightness_set(struct led_classdev *cdev,
301 enum led_brightness brightness)
302{
303 struct lm3642_chip_data *chip =
304 container_of(cdev, struct lm3642_chip_data, cdev_indicator);
305
306 chip->br_indicator = brightness;
307 schedule_work(&chip->work_indicator);
308}
309
310static const struct regmap_config lm3642_regmap = {
311 .reg_bits = 8,
312 .val_bits = 8,
313 .max_register = REG_MAX,
314};
315
316static int __devinit lm3642_probe(struct i2c_client *client,
317 const struct i2c_device_id *id)
318{
319 struct lm3642_platform_data *pdata = client->dev.platform_data;
320 struct lm3642_chip_data *chip;
321
322 int err;
323
324 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
325 dev_err(&client->dev, "i2c functionality check fail.\n");
326 return -EOPNOTSUPP;
327 }
328
329 if (pdata == NULL) {
330 dev_err(&client->dev, "needs Platform Data.\n");
331 return -ENODATA;
332 }
333
334 chip = devm_kzalloc(&client->dev,
335 sizeof(struct lm3642_chip_data), GFP_KERNEL);
336 if (!chip)
337 return -ENOMEM;
338
339 chip->dev = &client->dev;
340 chip->pdata = pdata;
341
342 chip->tx_pin = pdata->tx_pin;
343 chip->torch_pin = pdata->torch_pin;
344 chip->strobe_pin = pdata->strobe_pin;
345
346 chip->regmap = devm_regmap_init_i2c(client, &lm3642_regmap);
347 if (IS_ERR(chip->regmap)) {
348 err = PTR_ERR(chip->regmap);
349 dev_err(&client->dev, "Failed to allocate register map: %d\n",
350 err);
351 return err;
352 }
353
354 mutex_init(&chip->lock);
355 i2c_set_clientdata(client, chip);
356
357 err = lm3642_chip_init(chip);
358 if (err < 0)
359 goto err_out;
360
361 /* flash */
362 INIT_WORK(&chip->work_flash, lm3642_deferred_strobe_brightness_set);
363 chip->cdev_flash.name = "flash";
364 chip->cdev_flash.max_brightness = 16;
365 chip->cdev_flash.brightness_set = lm3642_strobe_brightness_set;
366 err = led_classdev_register((struct device *)
367 &client->dev, &chip->cdev_flash);
368 if (err < 0) {
369 dev_err(chip->dev, "failed to register flash\n");
370 goto err_out;
371 }
372 err = device_create_file(chip->cdev_flash.dev, &dev_attr_strobe_pin);
373 if (err < 0) {
374 dev_err(chip->dev, "failed to create strobe-pin file\n");
375 goto err_create_flash_pin_file;
376 }
377
378 /* torch */
379 INIT_WORK(&chip->work_torch, lm3642_deferred_torch_brightness_set);
380 chip->cdev_torch.name = "torch";
381 chip->cdev_torch.max_brightness = 8;
382 chip->cdev_torch.brightness_set = lm3642_torch_brightness_set;
383 err = led_classdev_register((struct device *)
384 &client->dev, &chip->cdev_torch);
385 if (err < 0) {
386 dev_err(chip->dev, "failed to register torch\n");
387 goto err_create_torch_file;
388 }
389 err = device_create_file(chip->cdev_torch.dev, &dev_attr_torch_pin);
390 if (err < 0) {
391 dev_err(chip->dev, "failed to create torch-pin file\n");
392 goto err_create_torch_pin_file;
393 }
394
395 /* indicator */
396 INIT_WORK(&chip->work_indicator,
397 lm3642_deferred_indicator_brightness_set);
398 chip->cdev_indicator.name = "indicator";
399 chip->cdev_indicator.max_brightness = 8;
400 chip->cdev_indicator.brightness_set = lm3642_indicator_brightness_set;
401 err = led_classdev_register((struct device *)
402 &client->dev, &chip->cdev_indicator);
403 if (err < 0) {
404 dev_err(chip->dev, "failed to register indicator\n");
405 goto err_create_indicator_file;
406 }
407
408 dev_info(&client->dev, "LM3642 is initialized\n");
409 return 0;
410
411err_create_indicator_file:
412 device_remove_file(chip->cdev_torch.dev, &dev_attr_torch_pin);
413err_create_torch_pin_file:
414 led_classdev_unregister(&chip->cdev_torch);
415err_create_torch_file:
416 device_remove_file(chip->cdev_flash.dev, &dev_attr_strobe_pin);
417err_create_flash_pin_file:
418 led_classdev_unregister(&chip->cdev_flash);
419err_out:
420 return err;
421}
422
423static int __devexit lm3642_remove(struct i2c_client *client)
424{
425 struct lm3642_chip_data *chip = i2c_get_clientdata(client);
426
427 led_classdev_unregister(&chip->cdev_indicator);
428 flush_work(&chip->work_indicator);
429 device_remove_file(chip->cdev_torch.dev, &dev_attr_torch_pin);
430 led_classdev_unregister(&chip->cdev_torch);
431 flush_work(&chip->work_torch);
432 device_remove_file(chip->cdev_flash.dev, &dev_attr_strobe_pin);
433 led_classdev_unregister(&chip->cdev_flash);
434 flush_work(&chip->work_flash);
435 regmap_write(chip->regmap, REG_ENABLE, 0);
436 return 0;
437}
438
439static const struct i2c_device_id lm3642_id[] = {
440 {LM3642_NAME, 0},
441 {}
442};
443
444MODULE_DEVICE_TABLE(i2c, lm3642_id);
445
446static struct i2c_driver lm3642_i2c_driver = {
447 .driver = {
448 .name = LM3642_NAME,
449 .owner = THIS_MODULE,
450 .pm = NULL,
451 },
452 .probe = lm3642_probe,
453 .remove = __devexit_p(lm3642_remove),
454 .id_table = lm3642_id,
455};
456
457module_i2c_driver(lm3642_i2c_driver);
458
459MODULE_DESCRIPTION("Texas Instruments Flash Lighting driver for LM3642");
460MODULE_AUTHOR("Daniel Jeong <daniel.jeong@ti.com>");
461MODULE_AUTHOR("G.Shark Jeong <gshark.jeong@gmail.com>");
462MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index fbc12acada95..97994ffdc014 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -104,6 +104,11 @@
104#define LED_ACTIVE(mux, led) (!!(mux & (0x0001 << led))) 104#define LED_ACTIVE(mux, led) (!!(mux & (0x0001 << led)))
105#define SHIFT_MASK(id) (((id) - 1) * 2) 105#define SHIFT_MASK(id) (((id) - 1) * 2)
106 106
107enum lp5523_chip_id {
108 LP5523,
109 LP55231,
110};
111
107struct lp5523_engine { 112struct lp5523_engine {
108 int id; 113 int id;
109 u8 mode; 114 u8 mode;
@@ -150,7 +155,7 @@ static inline struct lp5523_chip *led_to_lp5523(struct lp5523_led *led)
150 leds[led->id]); 155 leds[led->id]);
151} 156}
152 157
153static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode); 158static void lp5523_set_mode(struct lp5523_engine *engine, u8 mode);
154static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode); 159static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode);
155static int lp5523_load_program(struct lp5523_engine *engine, const u8 *pattern); 160static int lp5523_load_program(struct lp5523_engine *engine, const u8 *pattern);
156 161
@@ -177,7 +182,7 @@ static int lp5523_detect(struct i2c_client *client)
177 int ret; 182 int ret;
178 u8 buf; 183 u8 buf;
179 184
180 ret = lp5523_write(client, LP5523_REG_ENABLE, 0x40); 185 ret = lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE);
181 if (ret) 186 if (ret)
182 return ret; 187 return ret;
183 ret = lp5523_read(client, LP5523_REG_ENABLE, &buf); 188 ret = lp5523_read(client, LP5523_REG_ENABLE, &buf);
@@ -338,7 +343,8 @@ static int lp5523_mux_parse(const char *buf, u16 *mux, size_t len)
338{ 343{
339 int i; 344 int i;
340 u16 tmp_mux = 0; 345 u16 tmp_mux = 0;
341 len = len < LP5523_LEDS ? len : LP5523_LEDS; 346
347 len = min_t(int, len, LP5523_LEDS);
342 for (i = 0; i < len; i++) { 348 for (i = 0; i < len; i++) {
343 switch (buf[i]) { 349 switch (buf[i]) {
344 case '1': 350 case '1':
@@ -546,6 +552,9 @@ static int lp5523_do_store_load(struct lp5523_engine *engine,
546 unsigned cmd; 552 unsigned cmd;
547 u8 pattern[LP5523_PROGRAM_LENGTH] = {0}; 553 u8 pattern[LP5523_PROGRAM_LENGTH] = {0};
548 554
555 if (engine->mode != LP5523_CMD_LOAD)
556 return -EINVAL;
557
549 while ((offset < len - 1) && (i < LP5523_PROGRAM_LENGTH)) { 558 while ((offset < len - 1) && (i < LP5523_PROGRAM_LENGTH)) {
550 /* separate sscanfs because length is working only for %s */ 559 /* separate sscanfs because length is working only for %s */
551 ret = sscanf(buf + offset, "%2s%n ", c, &nrchars); 560 ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
@@ -563,12 +572,7 @@ static int lp5523_do_store_load(struct lp5523_engine *engine,
563 goto fail; 572 goto fail;
564 573
565 mutex_lock(&chip->lock); 574 mutex_lock(&chip->lock);
566 575 ret = lp5523_load_program(engine, pattern);
567 if (engine->mode == LP5523_CMD_LOAD)
568 ret = lp5523_load_program(engine, pattern);
569 else
570 ret = -EINVAL;
571
572 mutex_unlock(&chip->lock); 576 mutex_unlock(&chip->lock);
573 577
574 if (ret) { 578 if (ret) {
@@ -755,6 +759,7 @@ static struct attribute *lp5523_attributes[] = {
755 &dev_attr_engine2_leds.attr, 759 &dev_attr_engine2_leds.attr,
756 &dev_attr_engine3_load.attr, 760 &dev_attr_engine3_load.attr,
757 &dev_attr_engine3_leds.attr, 761 &dev_attr_engine3_leds.attr,
762 NULL,
758}; 763};
759 764
760static const struct attribute_group lp5523_group = { 765static const struct attribute_group lp5523_group = {
@@ -789,26 +794,28 @@ static void lp5523_unregister_sysfs(struct i2c_client *client)
789/*--------------------------------------------------------------*/ 794/*--------------------------------------------------------------*/
790/* Set chip operating mode */ 795/* Set chip operating mode */
791/*--------------------------------------------------------------*/ 796/*--------------------------------------------------------------*/
792static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode) 797static void lp5523_set_mode(struct lp5523_engine *engine, u8 mode)
793{ 798{
794 int ret = 0;
795
796 /* if in that mode already do nothing, except for run */ 799 /* if in that mode already do nothing, except for run */
797 if (mode == engine->mode && mode != LP5523_CMD_RUN) 800 if (mode == engine->mode && mode != LP5523_CMD_RUN)
798 return 0; 801 return;
799 802
800 if (mode == LP5523_CMD_RUN) { 803 switch (mode) {
801 ret = lp5523_run_program(engine); 804 case LP5523_CMD_RUN:
802 } else if (mode == LP5523_CMD_LOAD) { 805 lp5523_run_program(engine);
806 break;
807 case LP5523_CMD_LOAD:
803 lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED); 808 lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
804 lp5523_set_engine_mode(engine, LP5523_CMD_LOAD); 809 lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
805 } else if (mode == LP5523_CMD_DISABLED) { 810 break;
811 case LP5523_CMD_DISABLED:
806 lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED); 812 lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
813 break;
814 default:
815 return;
807 } 816 }
808 817
809 engine->mode = mode; 818 engine->mode = mode;
810
811 return ret;
812} 819}
813 820
814/*--------------------------------------------------------------*/ 821/*--------------------------------------------------------------*/
@@ -827,7 +834,8 @@ static int __init lp5523_init_engine(struct lp5523_engine *engine, int id)
827} 834}
828 835
829static int __devinit lp5523_init_led(struct lp5523_led *led, struct device *dev, 836static int __devinit lp5523_init_led(struct lp5523_led *led, struct device *dev,
830 int chan, struct lp5523_platform_data *pdata) 837 int chan, struct lp5523_platform_data *pdata,
838 const char *chip_name)
831{ 839{
832 char name[32]; 840 char name[32];
833 int res; 841 int res;
@@ -846,10 +854,14 @@ static int __devinit lp5523_init_led(struct lp5523_led *led, struct device *dev,
846 return -EINVAL; 854 return -EINVAL;
847 } 855 }
848 856
849 snprintf(name, sizeof(name), "%s:channel%d", 857 if (pdata->led_config[chan].name) {
850 pdata->label ?: "lp5523", chan); 858 led->cdev.name = pdata->led_config[chan].name;
859 } else {
860 snprintf(name, sizeof(name), "%s:channel%d",
861 pdata->label ? : chip_name, chan);
862 led->cdev.name = name;
863 }
851 864
852 led->cdev.name = name;
853 led->cdev.brightness_set = lp5523_set_brightness; 865 led->cdev.brightness_set = lp5523_set_brightness;
854 res = led_classdev_register(dev, &led->cdev); 866 res = led_classdev_register(dev, &led->cdev);
855 if (res < 0) { 867 if (res < 0) {
@@ -917,7 +929,7 @@ static int __devinit lp5523_probe(struct i2c_client *client,
917 if (ret) 929 if (ret)
918 goto fail1; 930 goto fail1;
919 931
920 dev_info(&client->dev, "LP5523 Programmable led chip found\n"); 932 dev_info(&client->dev, "%s Programmable led chip found\n", id->name);
921 933
922 /* Initialize engines */ 934 /* Initialize engines */
923 for (i = 0; i < ARRAY_SIZE(chip->engines); i++) { 935 for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
@@ -945,7 +957,8 @@ static int __devinit lp5523_probe(struct i2c_client *client,
945 INIT_WORK(&chip->leds[led].brightness_work, 957 INIT_WORK(&chip->leds[led].brightness_work,
946 lp5523_led_brightness_work); 958 lp5523_led_brightness_work);
947 959
948 ret = lp5523_init_led(&chip->leds[led], &client->dev, i, pdata); 960 ret = lp5523_init_led(&chip->leds[led], &client->dev, i, pdata,
961 id->name);
949 if (ret) { 962 if (ret) {
950 dev_err(&client->dev, "error initializing leds\n"); 963 dev_err(&client->dev, "error initializing leds\n");
951 goto fail2; 964 goto fail2;
@@ -970,7 +983,7 @@ static int __devinit lp5523_probe(struct i2c_client *client,
970fail2: 983fail2:
971 for (i = 0; i < chip->num_leds; i++) { 984 for (i = 0; i < chip->num_leds; i++) {
972 led_classdev_unregister(&chip->leds[i].cdev); 985 led_classdev_unregister(&chip->leds[i].cdev);
973 cancel_work_sync(&chip->leds[i].brightness_work); 986 flush_work(&chip->leds[i].brightness_work);
974 } 987 }
975fail1: 988fail1:
976 if (pdata->enable) 989 if (pdata->enable)
@@ -985,11 +998,14 @@ static int lp5523_remove(struct i2c_client *client)
985 struct lp5523_chip *chip = i2c_get_clientdata(client); 998 struct lp5523_chip *chip = i2c_get_clientdata(client);
986 int i; 999 int i;
987 1000
1001 /* Disable engine mode */
1002 lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_DISABLED);
1003
988 lp5523_unregister_sysfs(client); 1004 lp5523_unregister_sysfs(client);
989 1005
990 for (i = 0; i < chip->num_leds; i++) { 1006 for (i = 0; i < chip->num_leds; i++) {
991 led_classdev_unregister(&chip->leds[i].cdev); 1007 led_classdev_unregister(&chip->leds[i].cdev);
992 cancel_work_sync(&chip->leds[i].brightness_work); 1008 flush_work(&chip->leds[i].brightness_work);
993 } 1009 }
994 1010
995 if (chip->pdata->enable) 1011 if (chip->pdata->enable)
@@ -1000,7 +1016,8 @@ static int lp5523_remove(struct i2c_client *client)
1000} 1016}
1001 1017
1002static const struct i2c_device_id lp5523_id[] = { 1018static const struct i2c_device_id lp5523_id[] = {
1003 { "lp5523", 0 }, 1019 { "lp5523", LP5523 },
1020 { "lp55231", LP55231 },
1004 { } 1021 { }
1005}; 1022};
1006 1023
@@ -1008,7 +1025,7 @@ MODULE_DEVICE_TABLE(i2c, lp5523_id);
1008 1025
1009static struct i2c_driver lp5523_driver = { 1026static struct i2c_driver lp5523_driver = {
1010 .driver = { 1027 .driver = {
1011 .name = "lp5523", 1028 .name = "lp5523x",
1012 }, 1029 },
1013 .probe = lp5523_probe, 1030 .probe = lp5523_probe,
1014 .remove = lp5523_remove, 1031 .remove = lp5523_remove,
diff --git a/drivers/leds/leds-pca9633.c b/drivers/leds/leds-pca9633.c
index edcd706c5631..2f2f9c43535d 100644
--- a/drivers/leds/leds-pca9633.c
+++ b/drivers/leds/leds-pca9633.c
@@ -22,6 +22,7 @@
22#include <linux/i2c.h> 22#include <linux/i2c.h>
23#include <linux/workqueue.h> 23#include <linux/workqueue.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/platform_data/leds-pca9633.h>
25 26
26/* LED select registers determine the source that drives LED outputs */ 27/* LED select registers determine the source that drives LED outputs */
27#define PCA9633_LED_OFF 0x0 /* LED driver off */ 28#define PCA9633_LED_OFF 0x0 /* LED driver off */
@@ -96,13 +97,13 @@ static int __devinit pca9633_probe(struct i2c_client *client,
96 const struct i2c_device_id *id) 97 const struct i2c_device_id *id)
97{ 98{
98 struct pca9633_led *pca9633; 99 struct pca9633_led *pca9633;
99 struct led_platform_data *pdata; 100 struct pca9633_platform_data *pdata;
100 int i, err; 101 int i, err;
101 102
102 pdata = client->dev.platform_data; 103 pdata = client->dev.platform_data;
103 104
104 if (pdata) { 105 if (pdata) {
105 if (pdata->num_leds <= 0 || pdata->num_leds > 4) { 106 if (pdata->leds.num_leds <= 0 || pdata->leds.num_leds > 4) {
106 dev_err(&client->dev, "board info must claim at most 4 LEDs"); 107 dev_err(&client->dev, "board info must claim at most 4 LEDs");
107 return -EINVAL; 108 return -EINVAL;
108 } 109 }
@@ -119,14 +120,14 @@ static int __devinit pca9633_probe(struct i2c_client *client,
119 pca9633[i].led_num = i; 120 pca9633[i].led_num = i;
120 121
121 /* Platform data can specify LED names and default triggers */ 122 /* Platform data can specify LED names and default triggers */
122 if (pdata && i < pdata->num_leds) { 123 if (pdata && i < pdata->leds.num_leds) {
123 if (pdata->leds[i].name) 124 if (pdata->leds.leds[i].name)
124 snprintf(pca9633[i].name, 125 snprintf(pca9633[i].name,
125 sizeof(pca9633[i].name), "pca9633:%s", 126 sizeof(pca9633[i].name), "pca9633:%s",
126 pdata->leds[i].name); 127 pdata->leds.leds[i].name);
127 if (pdata->leds[i].default_trigger) 128 if (pdata->leds.leds[i].default_trigger)
128 pca9633[i].led_cdev.default_trigger = 129 pca9633[i].led_cdev.default_trigger =
129 pdata->leds[i].default_trigger; 130 pdata->leds.leds[i].default_trigger;
130 } else { 131 } else {
131 snprintf(pca9633[i].name, sizeof(pca9633[i].name), 132 snprintf(pca9633[i].name, sizeof(pca9633[i].name),
132 "pca9633:%d", i); 133 "pca9633:%d", i);
@@ -145,6 +146,10 @@ static int __devinit pca9633_probe(struct i2c_client *client,
145 /* Disable LED all-call address and set normal mode */ 146 /* Disable LED all-call address and set normal mode */
146 i2c_smbus_write_byte_data(client, PCA9633_MODE1, 0x00); 147 i2c_smbus_write_byte_data(client, PCA9633_MODE1, 0x00);
147 148
149 /* Configure output: open-drain or totem pole (push-pull) */
150 if (pdata && pdata->outdrv == PCA9633_OPEN_DRAIN)
151 i2c_smbus_write_byte_data(client, PCA9633_MODE2, 0x01);
152
148 /* Turn off LEDs */ 153 /* Turn off LEDs */
149 i2c_smbus_write_byte_data(client, PCA9633_LEDOUT, 0x00); 154 i2c_smbus_write_byte_data(client, PCA9633_LEDOUT, 0x00);
150 155
diff --git a/drivers/leds/leds-wm8350.c b/drivers/leds/leds-wm8350.c
index 4c62113f7a77..88f23f845595 100644
--- a/drivers/leds/leds-wm8350.c
+++ b/drivers/leds/leds-wm8350.c
@@ -201,7 +201,7 @@ static int wm8350_led_probe(struct platform_device *pdev)
201 struct regulator *isink, *dcdc; 201 struct regulator *isink, *dcdc;
202 struct wm8350_led *led; 202 struct wm8350_led *led;
203 struct wm8350_led_platform_data *pdata = pdev->dev.platform_data; 203 struct wm8350_led_platform_data *pdata = pdev->dev.platform_data;
204 int ret, i; 204 int i;
205 205
206 if (pdata == NULL) { 206 if (pdata == NULL) {
207 dev_err(&pdev->dev, "no platform data\n"); 207 dev_err(&pdev->dev, "no platform data\n");
@@ -214,24 +214,21 @@ static int wm8350_led_probe(struct platform_device *pdev)
214 return -EINVAL; 214 return -EINVAL;
215 } 215 }
216 216
217 isink = regulator_get(&pdev->dev, "led_isink"); 217 isink = devm_regulator_get(&pdev->dev, "led_isink");
218 if (IS_ERR(isink)) { 218 if (IS_ERR(isink)) {
219 printk(KERN_ERR "%s: can't get ISINK\n", __func__); 219 printk(KERN_ERR "%s: can't get ISINK\n", __func__);
220 return PTR_ERR(isink); 220 return PTR_ERR(isink);
221 } 221 }
222 222
223 dcdc = regulator_get(&pdev->dev, "led_vcc"); 223 dcdc = devm_regulator_get(&pdev->dev, "led_vcc");
224 if (IS_ERR(dcdc)) { 224 if (IS_ERR(dcdc)) {
225 printk(KERN_ERR "%s: can't get DCDC\n", __func__); 225 printk(KERN_ERR "%s: can't get DCDC\n", __func__);
226 ret = PTR_ERR(dcdc); 226 return PTR_ERR(dcdc);
227 goto err_isink;
228 } 227 }
229 228
230 led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL); 229 led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
231 if (led == NULL) { 230 if (led == NULL)
232 ret = -ENOMEM; 231 return -ENOMEM;
233 goto err_dcdc;
234 }
235 232
236 led->cdev.brightness_set = wm8350_led_set; 233 led->cdev.brightness_set = wm8350_led_set;
237 led->cdev.default_trigger = pdata->default_trigger; 234 led->cdev.default_trigger = pdata->default_trigger;
@@ -257,17 +254,7 @@ static int wm8350_led_probe(struct platform_device *pdev)
257 led->value = LED_OFF; 254 led->value = LED_OFF;
258 platform_set_drvdata(pdev, led); 255 platform_set_drvdata(pdev, led);
259 256
260 ret = led_classdev_register(&pdev->dev, &led->cdev); 257 return led_classdev_register(&pdev->dev, &led->cdev);
261 if (ret < 0)
262 goto err_dcdc;
263
264 return 0;
265
266 err_dcdc:
267 regulator_put(dcdc);
268 err_isink:
269 regulator_put(isink);
270 return ret;
271} 258}
272 259
273static int wm8350_led_remove(struct platform_device *pdev) 260static int wm8350_led_remove(struct platform_device *pdev)
@@ -277,8 +264,6 @@ static int wm8350_led_remove(struct platform_device *pdev)
277 led_classdev_unregister(&led->cdev); 264 led_classdev_unregister(&led->cdev);
278 flush_work(&led->work); 265 flush_work(&led->work);
279 wm8350_led_disable(led); 266 wm8350_led_disable(led);
280 regulator_put(led->dcdc);
281 regulator_put(led->isink);
282 return 0; 267 return 0;
283} 268}
284 269
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index d02acd496126..4c50365344a9 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -32,6 +32,8 @@ static inline int led_get_brightness(struct led_classdev *led_cdev)
32 return led_cdev->brightness; 32 return led_cdev->brightness;
33} 33}
34 34
35void led_stop_software_blink(struct led_classdev *led_cdev);
36
35extern struct rw_semaphore leds_list_lock; 37extern struct rw_semaphore leds_list_lock;
36extern struct list_head leds_list; 38extern struct list_head leds_list;
37 39
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index d949b781f6f8..91a02eeeb319 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -216,6 +216,13 @@ config DM_BUFIO
216 as a cache, holding recently-read blocks in memory and performing 216 as a cache, holding recently-read blocks in memory and performing
217 delayed writes. 217 delayed writes.
218 218
219config DM_BIO_PRISON
220 tristate
221 depends on BLK_DEV_DM && EXPERIMENTAL
222 ---help---
223 Some bio locking schemes used by other device-mapper targets
224 including thin provisioning.
225
219source "drivers/md/persistent-data/Kconfig" 226source "drivers/md/persistent-data/Kconfig"
220 227
221config DM_CRYPT 228config DM_CRYPT
@@ -247,6 +254,7 @@ config DM_THIN_PROVISIONING
247 tristate "Thin provisioning target (EXPERIMENTAL)" 254 tristate "Thin provisioning target (EXPERIMENTAL)"
248 depends on BLK_DEV_DM && EXPERIMENTAL 255 depends on BLK_DEV_DM && EXPERIMENTAL
249 select DM_PERSISTENT_DATA 256 select DM_PERSISTENT_DATA
257 select DM_BIO_PRISON
250 ---help--- 258 ---help---
251 Provides thin provisioning and snapshots that share a data store. 259 Provides thin provisioning and snapshots that share a data store.
252 260
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 8b2e0dffe82e..94dce8b49324 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_MD_FAULTY) += faulty.o
29obj-$(CONFIG_BLK_DEV_MD) += md-mod.o 29obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
30obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o 30obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
31obj-$(CONFIG_DM_BUFIO) += dm-bufio.o 31obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
32obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o
32obj-$(CONFIG_DM_CRYPT) += dm-crypt.o 33obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
33obj-$(CONFIG_DM_DELAY) += dm-delay.o 34obj-$(CONFIG_DM_DELAY) += dm-delay.o
34obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o 35obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 94e7f6ba2e11..7155945f8eb8 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -163,20 +163,17 @@ static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mdde
163 * As devices are only added or removed when raid_disk is < 0 and 163 * As devices are only added or removed when raid_disk is < 0 and
164 * nr_pending is 0 and In_sync is clear, the entries we return will 164 * nr_pending is 0 and In_sync is clear, the entries we return will
165 * still be in the same position on the list when we re-enter 165 * still be in the same position on the list when we re-enter
166 * list_for_each_continue_rcu. 166 * list_for_each_entry_continue_rcu.
167 */ 167 */
168 struct list_head *pos;
169 rcu_read_lock(); 168 rcu_read_lock();
170 if (rdev == NULL) 169 if (rdev == NULL)
171 /* start at the beginning */ 170 /* start at the beginning */
172 pos = &mddev->disks; 171 rdev = list_entry_rcu(&mddev->disks, struct md_rdev, same_set);
173 else { 172 else {
174 /* release the previous rdev and start from there. */ 173 /* release the previous rdev and start from there. */
175 rdev_dec_pending(rdev, mddev); 174 rdev_dec_pending(rdev, mddev);
176 pos = &rdev->same_set;
177 } 175 }
178 list_for_each_continue_rcu(pos, &mddev->disks) { 176 list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) {
179 rdev = list_entry(pos, struct md_rdev, same_set);
180 if (rdev->raid_disk >= 0 && 177 if (rdev->raid_disk >= 0 &&
181 !test_bit(Faulty, &rdev->flags)) { 178 !test_bit(Faulty, &rdev->flags)) {
182 /* this is a usable devices */ 179 /* this is a usable devices */
@@ -473,14 +470,10 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
473{ 470{
474 bitmap_super_t *sb; 471 bitmap_super_t *sb;
475 unsigned long chunksize, daemon_sleep, write_behind; 472 unsigned long chunksize, daemon_sleep, write_behind;
476 int err = -EINVAL;
477 473
478 bitmap->storage.sb_page = alloc_page(GFP_KERNEL); 474 bitmap->storage.sb_page = alloc_page(GFP_KERNEL);
479 if (IS_ERR(bitmap->storage.sb_page)) { 475 if (bitmap->storage.sb_page == NULL)
480 err = PTR_ERR(bitmap->storage.sb_page); 476 return -ENOMEM;
481 bitmap->storage.sb_page = NULL;
482 return err;
483 }
484 bitmap->storage.sb_page->index = 0; 477 bitmap->storage.sb_page->index = 0;
485 478
486 sb = kmap_atomic(bitmap->storage.sb_page); 479 sb = kmap_atomic(bitmap->storage.sb_page);
diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c
new file mode 100644
index 000000000000..e4e841567459
--- /dev/null
+++ b/drivers/md/dm-bio-prison.c
@@ -0,0 +1,415 @@
1/*
2 * Copyright (C) 2012 Red Hat, Inc.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm.h"
8#include "dm-bio-prison.h"
9
10#include <linux/spinlock.h>
11#include <linux/mempool.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14
15/*----------------------------------------------------------------*/
16
17struct dm_bio_prison_cell {
18 struct hlist_node list;
19 struct dm_bio_prison *prison;
20 struct dm_cell_key key;
21 struct bio *holder;
22 struct bio_list bios;
23};
24
25struct dm_bio_prison {
26 spinlock_t lock;
27 mempool_t *cell_pool;
28
29 unsigned nr_buckets;
30 unsigned hash_mask;
31 struct hlist_head *cells;
32};
33
34/*----------------------------------------------------------------*/
35
36static uint32_t calc_nr_buckets(unsigned nr_cells)
37{
38 uint32_t n = 128;
39
40 nr_cells /= 4;
41 nr_cells = min(nr_cells, 8192u);
42
43 while (n < nr_cells)
44 n <<= 1;
45
46 return n;
47}
48
49static struct kmem_cache *_cell_cache;
50
51/*
52 * @nr_cells should be the number of cells you want in use _concurrently_.
53 * Don't confuse it with the number of distinct keys.
54 */
55struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells)
56{
57 unsigned i;
58 uint32_t nr_buckets = calc_nr_buckets(nr_cells);
59 size_t len = sizeof(struct dm_bio_prison) +
60 (sizeof(struct hlist_head) * nr_buckets);
61 struct dm_bio_prison *prison = kmalloc(len, GFP_KERNEL);
62
63 if (!prison)
64 return NULL;
65
66 spin_lock_init(&prison->lock);
67 prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
68 if (!prison->cell_pool) {
69 kfree(prison);
70 return NULL;
71 }
72
73 prison->nr_buckets = nr_buckets;
74 prison->hash_mask = nr_buckets - 1;
75 prison->cells = (struct hlist_head *) (prison + 1);
76 for (i = 0; i < nr_buckets; i++)
77 INIT_HLIST_HEAD(prison->cells + i);
78
79 return prison;
80}
81EXPORT_SYMBOL_GPL(dm_bio_prison_create);
82
83void dm_bio_prison_destroy(struct dm_bio_prison *prison)
84{
85 mempool_destroy(prison->cell_pool);
86 kfree(prison);
87}
88EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);
89
90static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key)
91{
92 const unsigned long BIG_PRIME = 4294967291UL;
93 uint64_t hash = key->block * BIG_PRIME;
94
95 return (uint32_t) (hash & prison->hash_mask);
96}
97
98static int keys_equal(struct dm_cell_key *lhs, struct dm_cell_key *rhs)
99{
100 return (lhs->virtual == rhs->virtual) &&
101 (lhs->dev == rhs->dev) &&
102 (lhs->block == rhs->block);
103}
104
105static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
106 struct dm_cell_key *key)
107{
108 struct dm_bio_prison_cell *cell;
109 struct hlist_node *tmp;
110
111 hlist_for_each_entry(cell, tmp, bucket, list)
112 if (keys_equal(&cell->key, key))
113 return cell;
114
115 return NULL;
116}
117
118/*
119 * This may block if a new cell needs allocating. You must ensure that
120 * cells will be unlocked even if the calling thread is blocked.
121 *
122 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
123 */
124int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
125 struct bio *inmate, struct dm_bio_prison_cell **ref)
126{
127 int r = 1;
128 unsigned long flags;
129 uint32_t hash = hash_key(prison, key);
130 struct dm_bio_prison_cell *cell, *cell2;
131
132 BUG_ON(hash > prison->nr_buckets);
133
134 spin_lock_irqsave(&prison->lock, flags);
135
136 cell = __search_bucket(prison->cells + hash, key);
137 if (cell) {
138 bio_list_add(&cell->bios, inmate);
139 goto out;
140 }
141
142 /*
143 * Allocate a new cell
144 */
145 spin_unlock_irqrestore(&prison->lock, flags);
146 cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
147 spin_lock_irqsave(&prison->lock, flags);
148
149 /*
150 * We've been unlocked, so we have to double check that
151 * nobody else has inserted this cell in the meantime.
152 */
153 cell = __search_bucket(prison->cells + hash, key);
154 if (cell) {
155 mempool_free(cell2, prison->cell_pool);
156 bio_list_add(&cell->bios, inmate);
157 goto out;
158 }
159
160 /*
161 * Use new cell.
162 */
163 cell = cell2;
164
165 cell->prison = prison;
166 memcpy(&cell->key, key, sizeof(cell->key));
167 cell->holder = inmate;
168 bio_list_init(&cell->bios);
169 hlist_add_head(&cell->list, prison->cells + hash);
170
171 r = 0;
172
173out:
174 spin_unlock_irqrestore(&prison->lock, flags);
175
176 *ref = cell;
177
178 return r;
179}
180EXPORT_SYMBOL_GPL(dm_bio_detain);
181
182/*
183 * @inmates must have been initialised prior to this call
184 */
185static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
186{
187 struct dm_bio_prison *prison = cell->prison;
188
189 hlist_del(&cell->list);
190
191 if (inmates) {
192 bio_list_add(inmates, cell->holder);
193 bio_list_merge(inmates, &cell->bios);
194 }
195
196 mempool_free(cell, prison->cell_pool);
197}
198
199void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
200{
201 unsigned long flags;
202 struct dm_bio_prison *prison = cell->prison;
203
204 spin_lock_irqsave(&prison->lock, flags);
205 __cell_release(cell, bios);
206 spin_unlock_irqrestore(&prison->lock, flags);
207}
208EXPORT_SYMBOL_GPL(dm_cell_release);
209
210/*
211 * There are a couple of places where we put a bio into a cell briefly
212 * before taking it out again. In these situations we know that no other
213 * bio may be in the cell. This function releases the cell, and also does
214 * a sanity check.
215 */
216static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
217{
218 BUG_ON(cell->holder != bio);
219 BUG_ON(!bio_list_empty(&cell->bios));
220
221 __cell_release(cell, NULL);
222}
223
224void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
225{
226 unsigned long flags;
227 struct dm_bio_prison *prison = cell->prison;
228
229 spin_lock_irqsave(&prison->lock, flags);
230 __cell_release_singleton(cell, bio);
231 spin_unlock_irqrestore(&prison->lock, flags);
232}
233EXPORT_SYMBOL_GPL(dm_cell_release_singleton);
234
235/*
236 * Sometimes we don't want the holder, just the additional bios.
237 */
238static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
239{
240 struct dm_bio_prison *prison = cell->prison;
241
242 hlist_del(&cell->list);
243 bio_list_merge(inmates, &cell->bios);
244
245 mempool_free(cell, prison->cell_pool);
246}
247
248void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
249{
250 unsigned long flags;
251 struct dm_bio_prison *prison = cell->prison;
252
253 spin_lock_irqsave(&prison->lock, flags);
254 __cell_release_no_holder(cell, inmates);
255 spin_unlock_irqrestore(&prison->lock, flags);
256}
257EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
258
259void dm_cell_error(struct dm_bio_prison_cell *cell)
260{
261 struct dm_bio_prison *prison = cell->prison;
262 struct bio_list bios;
263 struct bio *bio;
264 unsigned long flags;
265
266 bio_list_init(&bios);
267
268 spin_lock_irqsave(&prison->lock, flags);
269 __cell_release(cell, &bios);
270 spin_unlock_irqrestore(&prison->lock, flags);
271
272 while ((bio = bio_list_pop(&bios)))
273 bio_io_error(bio);
274}
275EXPORT_SYMBOL_GPL(dm_cell_error);
276
277/*----------------------------------------------------------------*/
278
279#define DEFERRED_SET_SIZE 64
280
281struct dm_deferred_entry {
282 struct dm_deferred_set *ds;
283 unsigned count;
284 struct list_head work_items;
285};
286
287struct dm_deferred_set {
288 spinlock_t lock;
289 unsigned current_entry;
290 unsigned sweeper;
291 struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
292};
293
294struct dm_deferred_set *dm_deferred_set_create(void)
295{
296 int i;
297 struct dm_deferred_set *ds;
298
299 ds = kmalloc(sizeof(*ds), GFP_KERNEL);
300 if (!ds)
301 return NULL;
302
303 spin_lock_init(&ds->lock);
304 ds->current_entry = 0;
305 ds->sweeper = 0;
306 for (i = 0; i < DEFERRED_SET_SIZE; i++) {
307 ds->entries[i].ds = ds;
308 ds->entries[i].count = 0;
309 INIT_LIST_HEAD(&ds->entries[i].work_items);
310 }
311
312 return ds;
313}
314EXPORT_SYMBOL_GPL(dm_deferred_set_create);
315
316void dm_deferred_set_destroy(struct dm_deferred_set *ds)
317{
318 kfree(ds);
319}
320EXPORT_SYMBOL_GPL(dm_deferred_set_destroy);
321
322struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
323{
324 unsigned long flags;
325 struct dm_deferred_entry *entry;
326
327 spin_lock_irqsave(&ds->lock, flags);
328 entry = ds->entries + ds->current_entry;
329 entry->count++;
330 spin_unlock_irqrestore(&ds->lock, flags);
331
332 return entry;
333}
334EXPORT_SYMBOL_GPL(dm_deferred_entry_inc);
335
336static unsigned ds_next(unsigned index)
337{
338 return (index + 1) % DEFERRED_SET_SIZE;
339}
340
341static void __sweep(struct dm_deferred_set *ds, struct list_head *head)
342{
343 while ((ds->sweeper != ds->current_entry) &&
344 !ds->entries[ds->sweeper].count) {
345 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
346 ds->sweeper = ds_next(ds->sweeper);
347 }
348
349 if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
350 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
351}
352
353void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head)
354{
355 unsigned long flags;
356
357 spin_lock_irqsave(&entry->ds->lock, flags);
358 BUG_ON(!entry->count);
359 --entry->count;
360 __sweep(entry->ds, head);
361 spin_unlock_irqrestore(&entry->ds->lock, flags);
362}
363EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
364
365/*
366 * Returns 1 if deferred or 0 if no pending items to delay job.
367 */
368int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
369{
370 int r = 1;
371 unsigned long flags;
372 unsigned next_entry;
373
374 spin_lock_irqsave(&ds->lock, flags);
375 if ((ds->sweeper == ds->current_entry) &&
376 !ds->entries[ds->current_entry].count)
377 r = 0;
378 else {
379 list_add(work, &ds->entries[ds->current_entry].work_items);
380 next_entry = ds_next(ds->current_entry);
381 if (!ds->entries[next_entry].count)
382 ds->current_entry = next_entry;
383 }
384 spin_unlock_irqrestore(&ds->lock, flags);
385
386 return r;
387}
388EXPORT_SYMBOL_GPL(dm_deferred_set_add_work);
389
390/*----------------------------------------------------------------*/
391
392static int __init dm_bio_prison_init(void)
393{
394 _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
395 if (!_cell_cache)
396 return -ENOMEM;
397
398 return 0;
399}
400
401static void __exit dm_bio_prison_exit(void)
402{
403 kmem_cache_destroy(_cell_cache);
404 _cell_cache = NULL;
405}
406
407/*
408 * module hooks
409 */
410module_init(dm_bio_prison_init);
411module_exit(dm_bio_prison_exit);
412
413MODULE_DESCRIPTION(DM_NAME " bio prison");
414MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
415MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-bio-prison.h b/drivers/md/dm-bio-prison.h
new file mode 100644
index 000000000000..4e0ac376700a
--- /dev/null
+++ b/drivers/md/dm-bio-prison.h
@@ -0,0 +1,72 @@
1/*
2 * Copyright (C) 2011-2012 Red Hat, Inc.
3 *
4 * This file is released under the GPL.
5 */
6
7#ifndef DM_BIO_PRISON_H
8#define DM_BIO_PRISON_H
9
10#include "persistent-data/dm-block-manager.h" /* FIXME: for dm_block_t */
11#include "dm-thin-metadata.h" /* FIXME: for dm_thin_id */
12
13#include <linux/list.h>
14#include <linux/bio.h>
15
16/*----------------------------------------------------------------*/
17
18/*
19 * Sometimes we can't deal with a bio straight away. We put them in prison
20 * where they can't cause any mischief. Bios are put in a cell identified
21 * by a key, multiple bios can be in the same cell. When the cell is
22 * subsequently unlocked the bios become available.
23 */
24struct dm_bio_prison;
25struct dm_bio_prison_cell;
26
27/* FIXME: this needs to be more abstract */
28struct dm_cell_key {
29 int virtual;
30 dm_thin_id dev;
31 dm_block_t block;
32};
33
34struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells);
35void dm_bio_prison_destroy(struct dm_bio_prison *prison);
36
37/*
38 * This may block if a new cell needs allocating. You must ensure that
39 * cells will be unlocked even if the calling thread is blocked.
40 *
41 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
42 */
43int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
44 struct bio *inmate, struct dm_bio_prison_cell **ref);
45
46void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios);
47void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio); // FIXME: bio arg not needed
48void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates);
49void dm_cell_error(struct dm_bio_prison_cell *cell);
50
51/*----------------------------------------------------------------*/
52
53/*
54 * We use the deferred set to keep track of pending reads to shared blocks.
55 * We do this to ensure the new mapping caused by a write isn't performed
56 * until these prior reads have completed. Otherwise the insertion of the
57 * new mapping could free the old block that the read bios are mapped to.
58 */
59
60struct dm_deferred_set;
61struct dm_deferred_entry;
62
63struct dm_deferred_set *dm_deferred_set_create(void);
64void dm_deferred_set_destroy(struct dm_deferred_set *ds);
65
66struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds);
67void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head);
68int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work);
69
70/*----------------------------------------------------------------*/
71
72#endif
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index cc06a1e52423..651ca79881dd 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -280,9 +280,7 @@ static void __cache_size_refresh(void)
280 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock)); 280 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
281 BUG_ON(dm_bufio_client_count < 0); 281 BUG_ON(dm_bufio_client_count < 0);
282 282
283 dm_bufio_cache_size_latch = dm_bufio_cache_size; 283 dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
284
285 barrier();
286 284
287 /* 285 /*
288 * Use default if set to 0 and report the actual cache size used. 286 * Use default if set to 0 and report the actual cache size used.
@@ -441,8 +439,7 @@ static void __relink_lru(struct dm_buffer *b, int dirty)
441 c->n_buffers[b->list_mode]--; 439 c->n_buffers[b->list_mode]--;
442 c->n_buffers[dirty]++; 440 c->n_buffers[dirty]++;
443 b->list_mode = dirty; 441 b->list_mode = dirty;
444 list_del(&b->lru_list); 442 list_move(&b->lru_list, &c->lru[dirty]);
445 list_add(&b->lru_list, &c->lru[dirty]);
446} 443}
447 444
448/*---------------------------------------------------------------- 445/*----------------------------------------------------------------
@@ -813,7 +810,7 @@ static void __get_memory_limit(struct dm_bufio_client *c,
813{ 810{
814 unsigned long buffers; 811 unsigned long buffers;
815 812
816 if (dm_bufio_cache_size != dm_bufio_cache_size_latch) { 813 if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) {
817 mutex_lock(&dm_bufio_clients_lock); 814 mutex_lock(&dm_bufio_clients_lock);
818 __cache_size_refresh(); 815 __cache_size_refresh();
819 mutex_unlock(&dm_bufio_clients_lock); 816 mutex_unlock(&dm_bufio_clients_lock);
@@ -1591,11 +1588,9 @@ EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1591 1588
1592static void cleanup_old_buffers(void) 1589static void cleanup_old_buffers(void)
1593{ 1590{
1594 unsigned long max_age = dm_bufio_max_age; 1591 unsigned long max_age = ACCESS_ONCE(dm_bufio_max_age);
1595 struct dm_bufio_client *c; 1592 struct dm_bufio_client *c;
1596 1593
1597 barrier();
1598
1599 if (max_age > ULONG_MAX / HZ) 1594 if (max_age > ULONG_MAX / HZ)
1600 max_age = ULONG_MAX / HZ; 1595 max_age = ULONG_MAX / HZ;
1601 1596
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 664743d6a6cd..bbf459bca61d 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -798,14 +798,6 @@ static int crypt_convert(struct crypt_config *cc,
798 return 0; 798 return 0;
799} 799}
800 800
801static void dm_crypt_bio_destructor(struct bio *bio)
802{
803 struct dm_crypt_io *io = bio->bi_private;
804 struct crypt_config *cc = io->cc;
805
806 bio_free(bio, cc->bs);
807}
808
809/* 801/*
810 * Generate a new unfragmented bio with the given size 802 * Generate a new unfragmented bio with the given size
811 * This should never violate the device limitations 803 * This should never violate the device limitations
@@ -974,7 +966,6 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
974 clone->bi_end_io = crypt_endio; 966 clone->bi_end_io = crypt_endio;
975 clone->bi_bdev = cc->dev->bdev; 967 clone->bi_bdev = cc->dev->bdev;
976 clone->bi_rw = io->base_bio->bi_rw; 968 clone->bi_rw = io->base_bio->bi_rw;
977 clone->bi_destructor = dm_crypt_bio_destructor;
978} 969}
979 970
980static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) 971static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
@@ -988,19 +979,14 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
988 * copy the required bvecs because we need the original 979 * copy the required bvecs because we need the original
989 * one in order to decrypt the whole bio data *afterwards*. 980 * one in order to decrypt the whole bio data *afterwards*.
990 */ 981 */
991 clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs); 982 clone = bio_clone_bioset(base_bio, gfp, cc->bs);
992 if (!clone) 983 if (!clone)
993 return 1; 984 return 1;
994 985
995 crypt_inc_pending(io); 986 crypt_inc_pending(io);
996 987
997 clone_init(io, clone); 988 clone_init(io, clone);
998 clone->bi_idx = 0;
999 clone->bi_vcnt = bio_segments(base_bio);
1000 clone->bi_size = base_bio->bi_size;
1001 clone->bi_sector = cc->start + io->sector; 989 clone->bi_sector = cc->start + io->sector;
1002 memcpy(clone->bi_io_vec, bio_iovec(base_bio),
1003 sizeof(struct bio_vec) * clone->bi_vcnt);
1004 990
1005 generic_make_request(clone); 991 generic_make_request(clone);
1006 return 0; 992 return 0;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index ea5dd289fe2a..1c46f97d6664 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -249,16 +249,6 @@ static void vm_dp_init(struct dpages *dp, void *data)
249 dp->context_ptr = data; 249 dp->context_ptr = data;
250} 250}
251 251
252static void dm_bio_destructor(struct bio *bio)
253{
254 unsigned region;
255 struct io *io;
256
257 retrieve_io_and_region_from_bio(bio, &io, &region);
258
259 bio_free(bio, io->client->bios);
260}
261
262/* 252/*
263 * Functions for getting the pages from kernel memory. 253 * Functions for getting the pages from kernel memory.
264 */ 254 */
@@ -317,7 +307,6 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
317 bio->bi_sector = where->sector + (where->count - remaining); 307 bio->bi_sector = where->sector + (where->count - remaining);
318 bio->bi_bdev = where->bdev; 308 bio->bi_bdev = where->bdev;
319 bio->bi_end_io = endio; 309 bio->bi_end_io = endio;
320 bio->bi_destructor = dm_bio_destructor;
321 store_io_and_region_in_bio(bio, io, region); 310 store_io_and_region_in_bio(bio, io, region);
322 311
323 if (rw & REQ_DISCARD) { 312 if (rw & REQ_DISCARD) {
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d778563a4ffd..573bd04591bf 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1309,13 +1309,14 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
1309{ 1309{
1310 struct multipath *m = ti->private; 1310 struct multipath *m = ti->private;
1311 struct dm_mpath_io *mpio = map_context->ptr; 1311 struct dm_mpath_io *mpio = map_context->ptr;
1312 struct pgpath *pgpath = mpio->pgpath; 1312 struct pgpath *pgpath;
1313 struct path_selector *ps; 1313 struct path_selector *ps;
1314 int r; 1314 int r;
1315 1315
1316 BUG_ON(!mpio); 1316 BUG_ON(!mpio);
1317 1317
1318 r = do_end_io(m, clone, error, mpio); 1318 r = do_end_io(m, clone, error, mpio);
1319 pgpath = mpio->pgpath;
1319 if (pgpath) { 1320 if (pgpath) {
1320 ps = &pgpath->pg->ps; 1321 ps = &pgpath->pg->ps;
1321 if (ps->type->end_io) 1322 if (ps->type->end_io)
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 982e3e390c45..45d94a7e7f6d 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -338,6 +338,84 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
338} 338}
339 339
340/* 340/*
341 * validate_rebuild_devices
342 * @rs
343 *
344 * Determine if the devices specified for rebuild can result in a valid
345 * usable array that is capable of rebuilding the given devices.
346 *
347 * Returns: 0 on success, -EINVAL on failure.
348 */
349static int validate_rebuild_devices(struct raid_set *rs)
350{
351 unsigned i, rebuild_cnt = 0;
352 unsigned rebuilds_per_group, copies, d;
353
354 if (!(rs->print_flags & DMPF_REBUILD))
355 return 0;
356
357 for (i = 0; i < rs->md.raid_disks; i++)
358 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
359 rebuild_cnt++;
360
361 switch (rs->raid_type->level) {
362 case 1:
363 if (rebuild_cnt >= rs->md.raid_disks)
364 goto too_many;
365 break;
366 case 4:
367 case 5:
368 case 6:
369 if (rebuild_cnt > rs->raid_type->parity_devs)
370 goto too_many;
371 break;
372 case 10:
373 copies = raid10_md_layout_to_copies(rs->md.layout);
374 if (rebuild_cnt < copies)
375 break;
376
377 /*
378 * It is possible to have a higher rebuild count for RAID10,
379 * as long as the failed devices occur in different mirror
380 * groups (i.e. different stripes).
381 *
382 * Right now, we only allow for "near" copies. When other
383 * formats are added, we will have to check those too.
384 *
385 * When checking "near" format, make sure no adjacent devices
386 * have failed beyond what can be handled. In addition to the
387 * simple case where the number of devices is a multiple of the
388 * number of copies, we must also handle cases where the number
389 * of devices is not a multiple of the number of copies.
390 * E.g. dev1 dev2 dev3 dev4 dev5
391 * A A B B C
392 * C D D E E
393 */
394 rebuilds_per_group = 0;
395 for (i = 0; i < rs->md.raid_disks * copies; i++) {
396 d = i % rs->md.raid_disks;
397 if (!test_bit(In_sync, &rs->dev[d].rdev.flags) &&
398 (++rebuilds_per_group >= copies))
399 goto too_many;
400 if (!((i + 1) % copies))
401 rebuilds_per_group = 0;
402 }
403 break;
404 default:
405 DMERR("The rebuild parameter is not supported for %s",
406 rs->raid_type->name);
407 rs->ti->error = "Rebuild not supported for this RAID type";
408 return -EINVAL;
409 }
410
411 return 0;
412
413too_many:
414 rs->ti->error = "Too many rebuild devices specified";
415 return -EINVAL;
416}
417
418/*
341 * Possible arguments are... 419 * Possible arguments are...
342 * <chunk_size> [optional_args] 420 * <chunk_size> [optional_args]
343 * 421 *
@@ -365,7 +443,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
365{ 443{
366 char *raid10_format = "near"; 444 char *raid10_format = "near";
367 unsigned raid10_copies = 2; 445 unsigned raid10_copies = 2;
368 unsigned i, rebuild_cnt = 0; 446 unsigned i;
369 unsigned long value, region_size = 0; 447 unsigned long value, region_size = 0;
370 sector_t sectors_per_dev = rs->ti->len; 448 sector_t sectors_per_dev = rs->ti->len;
371 sector_t max_io_len; 449 sector_t max_io_len;
@@ -461,31 +539,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
461 539
462 /* Parameters that take a numeric value are checked here */ 540 /* Parameters that take a numeric value are checked here */
463 if (!strcasecmp(key, "rebuild")) { 541 if (!strcasecmp(key, "rebuild")) {
464 rebuild_cnt++; 542 if (value >= rs->md.raid_disks) {
465
466 switch (rs->raid_type->level) {
467 case 1:
468 if (rebuild_cnt >= rs->md.raid_disks) {
469 rs->ti->error = "Too many rebuild devices specified";
470 return -EINVAL;
471 }
472 break;
473 case 4:
474 case 5:
475 case 6:
476 if (rebuild_cnt > rs->raid_type->parity_devs) {
477 rs->ti->error = "Too many rebuild devices specified for given RAID type";
478 return -EINVAL;
479 }
480 break;
481 case 10:
482 default:
483 DMERR("The rebuild parameter is not supported for %s", rs->raid_type->name);
484 rs->ti->error = "Rebuild not supported for this RAID type";
485 return -EINVAL;
486 }
487
488 if (value > rs->md.raid_disks) {
489 rs->ti->error = "Invalid rebuild index given"; 543 rs->ti->error = "Invalid rebuild index given";
490 return -EINVAL; 544 return -EINVAL;
491 } 545 }
@@ -608,6 +662,9 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
608 } 662 }
609 rs->md.dev_sectors = sectors_per_dev; 663 rs->md.dev_sectors = sectors_per_dev;
610 664
665 if (validate_rebuild_devices(rs))
666 return -EINVAL;
667
611 /* Assume there are no metadata devices until the drives are parsed */ 668 /* Assume there are no metadata devices until the drives are parsed */
612 rs->md.persistent = 0; 669 rs->md.persistent = 0;
613 rs->md.external = 1; 670 rs->md.external = 1;
@@ -960,6 +1017,19 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
960 1017
961 freshest = NULL; 1018 freshest = NULL;
962 rdev_for_each_safe(rdev, tmp, mddev) { 1019 rdev_for_each_safe(rdev, tmp, mddev) {
1020 /*
1021 * Skipping super_load due to DMPF_SYNC will cause
1022 * the array to undergo initialization again as
1023 * though it were new. This is the intended effect
1024 * of the "sync" directive.
1025 *
1026 * When reshaping capability is added, we must ensure
1027 * that the "sync" directive is disallowed during the
1028 * reshape.
1029 */
1030 if (rs->print_flags & DMPF_SYNC)
1031 continue;
1032
963 if (!rdev->meta_bdev) 1033 if (!rdev->meta_bdev)
964 continue; 1034 continue;
965 1035
@@ -1360,7 +1430,7 @@ static void raid_resume(struct dm_target *ti)
1360 1430
1361static struct target_type raid_target = { 1431static struct target_type raid_target = {
1362 .name = "raid", 1432 .name = "raid",
1363 .version = {1, 3, 0}, 1433 .version = {1, 3, 1},
1364 .module = THIS_MODULE, 1434 .module = THIS_MODULE,
1365 .ctr = raid_ctr, 1435 .ctr = raid_ctr,
1366 .dtr = raid_dtr, 1436 .dtr = raid_dtr,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index c29410af1e22..058acf3a5ba7 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -5,6 +5,7 @@
5 */ 5 */
6 6
7#include "dm-thin-metadata.h" 7#include "dm-thin-metadata.h"
8#include "dm-bio-prison.h"
8#include "dm.h" 9#include "dm.h"
9 10
10#include <linux/device-mapper.h> 11#include <linux/device-mapper.h>
@@ -21,7 +22,6 @@
21 * Tunable constants 22 * Tunable constants
22 */ 23 */
23#define ENDIO_HOOK_POOL_SIZE 1024 24#define ENDIO_HOOK_POOL_SIZE 1024
24#define DEFERRED_SET_SIZE 64
25#define MAPPING_POOL_SIZE 1024 25#define MAPPING_POOL_SIZE 1024
26#define PRISON_CELLS 1024 26#define PRISON_CELLS 1024
27#define COMMIT_PERIOD HZ 27#define COMMIT_PERIOD HZ
@@ -58,7 +58,7 @@
58 * i) plug io further to this physical block. (see bio_prison code). 58 * i) plug io further to this physical block. (see bio_prison code).
59 * 59 *
60 * ii) quiesce any read io to that shared data block. Obviously 60 * ii) quiesce any read io to that shared data block. Obviously
61 * including all devices that share this block. (see deferred_set code) 61 * including all devices that share this block. (see dm_deferred_set code)
62 * 62 *
63 * iii) copy the data block to a newly allocate block. This step can be 63 * iii) copy the data block to a newly allocate block. This step can be
64 * missed out if the io covers the block. (schedule_copy). 64 * missed out if the io covers the block. (schedule_copy).
@@ -99,381 +99,10 @@
99/*----------------------------------------------------------------*/ 99/*----------------------------------------------------------------*/
100 100
101/* 101/*
102 * Sometimes we can't deal with a bio straight away. We put them in prison
103 * where they can't cause any mischief. Bios are put in a cell identified
104 * by a key, multiple bios can be in the same cell. When the cell is
105 * subsequently unlocked the bios become available.
106 */
107struct bio_prison;
108
109struct cell_key {
110 int virtual;
111 dm_thin_id dev;
112 dm_block_t block;
113};
114
115struct dm_bio_prison_cell {
116 struct hlist_node list;
117 struct bio_prison *prison;
118 struct cell_key key;
119 struct bio *holder;
120 struct bio_list bios;
121};
122
123struct bio_prison {
124 spinlock_t lock;
125 mempool_t *cell_pool;
126
127 unsigned nr_buckets;
128 unsigned hash_mask;
129 struct hlist_head *cells;
130};
131
132static uint32_t calc_nr_buckets(unsigned nr_cells)
133{
134 uint32_t n = 128;
135
136 nr_cells /= 4;
137 nr_cells = min(nr_cells, 8192u);
138
139 while (n < nr_cells)
140 n <<= 1;
141
142 return n;
143}
144
145static struct kmem_cache *_cell_cache;
146
147/*
148 * @nr_cells should be the number of cells you want in use _concurrently_.
149 * Don't confuse it with the number of distinct keys.
150 */
151static struct bio_prison *prison_create(unsigned nr_cells)
152{
153 unsigned i;
154 uint32_t nr_buckets = calc_nr_buckets(nr_cells);
155 size_t len = sizeof(struct bio_prison) +
156 (sizeof(struct hlist_head) * nr_buckets);
157 struct bio_prison *prison = kmalloc(len, GFP_KERNEL);
158
159 if (!prison)
160 return NULL;
161
162 spin_lock_init(&prison->lock);
163 prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
164 if (!prison->cell_pool) {
165 kfree(prison);
166 return NULL;
167 }
168
169 prison->nr_buckets = nr_buckets;
170 prison->hash_mask = nr_buckets - 1;
171 prison->cells = (struct hlist_head *) (prison + 1);
172 for (i = 0; i < nr_buckets; i++)
173 INIT_HLIST_HEAD(prison->cells + i);
174
175 return prison;
176}
177
178static void prison_destroy(struct bio_prison *prison)
179{
180 mempool_destroy(prison->cell_pool);
181 kfree(prison);
182}
183
184static uint32_t hash_key(struct bio_prison *prison, struct cell_key *key)
185{
186 const unsigned long BIG_PRIME = 4294967291UL;
187 uint64_t hash = key->block * BIG_PRIME;
188
189 return (uint32_t) (hash & prison->hash_mask);
190}
191
192static int keys_equal(struct cell_key *lhs, struct cell_key *rhs)
193{
194 return (lhs->virtual == rhs->virtual) &&
195 (lhs->dev == rhs->dev) &&
196 (lhs->block == rhs->block);
197}
198
199static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
200 struct cell_key *key)
201{
202 struct dm_bio_prison_cell *cell;
203 struct hlist_node *tmp;
204
205 hlist_for_each_entry(cell, tmp, bucket, list)
206 if (keys_equal(&cell->key, key))
207 return cell;
208
209 return NULL;
210}
211
212/*
213 * This may block if a new cell needs allocating. You must ensure that
214 * cells will be unlocked even if the calling thread is blocked.
215 *
216 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
217 */
218static int bio_detain(struct bio_prison *prison, struct cell_key *key,
219 struct bio *inmate, struct dm_bio_prison_cell **ref)
220{
221 int r = 1;
222 unsigned long flags;
223 uint32_t hash = hash_key(prison, key);
224 struct dm_bio_prison_cell *cell, *cell2;
225
226 BUG_ON(hash > prison->nr_buckets);
227
228 spin_lock_irqsave(&prison->lock, flags);
229
230 cell = __search_bucket(prison->cells + hash, key);
231 if (cell) {
232 bio_list_add(&cell->bios, inmate);
233 goto out;
234 }
235
236 /*
237 * Allocate a new cell
238 */
239 spin_unlock_irqrestore(&prison->lock, flags);
240 cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
241 spin_lock_irqsave(&prison->lock, flags);
242
243 /*
244 * We've been unlocked, so we have to double check that
245 * nobody else has inserted this cell in the meantime.
246 */
247 cell = __search_bucket(prison->cells + hash, key);
248 if (cell) {
249 mempool_free(cell2, prison->cell_pool);
250 bio_list_add(&cell->bios, inmate);
251 goto out;
252 }
253
254 /*
255 * Use new cell.
256 */
257 cell = cell2;
258
259 cell->prison = prison;
260 memcpy(&cell->key, key, sizeof(cell->key));
261 cell->holder = inmate;
262 bio_list_init(&cell->bios);
263 hlist_add_head(&cell->list, prison->cells + hash);
264
265 r = 0;
266
267out:
268 spin_unlock_irqrestore(&prison->lock, flags);
269
270 *ref = cell;
271
272 return r;
273}
274
275/*
276 * @inmates must have been initialised prior to this call
277 */
278static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
279{
280 struct bio_prison *prison = cell->prison;
281
282 hlist_del(&cell->list);
283
284 if (inmates) {
285 bio_list_add(inmates, cell->holder);
286 bio_list_merge(inmates, &cell->bios);
287 }
288
289 mempool_free(cell, prison->cell_pool);
290}
291
292static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
293{
294 unsigned long flags;
295 struct bio_prison *prison = cell->prison;
296
297 spin_lock_irqsave(&prison->lock, flags);
298 __cell_release(cell, bios);
299 spin_unlock_irqrestore(&prison->lock, flags);
300}
301
302/*
303 * There are a couple of places where we put a bio into a cell briefly
304 * before taking it out again. In these situations we know that no other
305 * bio may be in the cell. This function releases the cell, and also does
306 * a sanity check.
307 */
308static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
309{
310 BUG_ON(cell->holder != bio);
311 BUG_ON(!bio_list_empty(&cell->bios));
312
313 __cell_release(cell, NULL);
314}
315
316static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
317{
318 unsigned long flags;
319 struct bio_prison *prison = cell->prison;
320
321 spin_lock_irqsave(&prison->lock, flags);
322 __cell_release_singleton(cell, bio);
323 spin_unlock_irqrestore(&prison->lock, flags);
324}
325
326/*
327 * Sometimes we don't want the holder, just the additional bios.
328 */
329static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
330 struct bio_list *inmates)
331{
332 struct bio_prison *prison = cell->prison;
333
334 hlist_del(&cell->list);
335 bio_list_merge(inmates, &cell->bios);
336
337 mempool_free(cell, prison->cell_pool);
338}
339
340static void cell_release_no_holder(struct dm_bio_prison_cell *cell,
341 struct bio_list *inmates)
342{
343 unsigned long flags;
344 struct bio_prison *prison = cell->prison;
345
346 spin_lock_irqsave(&prison->lock, flags);
347 __cell_release_no_holder(cell, inmates);
348 spin_unlock_irqrestore(&prison->lock, flags);
349}
350
351static void cell_error(struct dm_bio_prison_cell *cell)
352{
353 struct bio_prison *prison = cell->prison;
354 struct bio_list bios;
355 struct bio *bio;
356 unsigned long flags;
357
358 bio_list_init(&bios);
359
360 spin_lock_irqsave(&prison->lock, flags);
361 __cell_release(cell, &bios);
362 spin_unlock_irqrestore(&prison->lock, flags);
363
364 while ((bio = bio_list_pop(&bios)))
365 bio_io_error(bio);
366}
367
368/*----------------------------------------------------------------*/
369
370/*
371 * We use the deferred set to keep track of pending reads to shared blocks.
372 * We do this to ensure the new mapping caused by a write isn't performed
373 * until these prior reads have completed. Otherwise the insertion of the
374 * new mapping could free the old block that the read bios are mapped to.
375 */
376
377struct deferred_set;
378struct deferred_entry {
379 struct deferred_set *ds;
380 unsigned count;
381 struct list_head work_items;
382};
383
384struct deferred_set {
385 spinlock_t lock;
386 unsigned current_entry;
387 unsigned sweeper;
388 struct deferred_entry entries[DEFERRED_SET_SIZE];
389};
390
391static void ds_init(struct deferred_set *ds)
392{
393 int i;
394
395 spin_lock_init(&ds->lock);
396 ds->current_entry = 0;
397 ds->sweeper = 0;
398 for (i = 0; i < DEFERRED_SET_SIZE; i++) {
399 ds->entries[i].ds = ds;
400 ds->entries[i].count = 0;
401 INIT_LIST_HEAD(&ds->entries[i].work_items);
402 }
403}
404
405static struct deferred_entry *ds_inc(struct deferred_set *ds)
406{
407 unsigned long flags;
408 struct deferred_entry *entry;
409
410 spin_lock_irqsave(&ds->lock, flags);
411 entry = ds->entries + ds->current_entry;
412 entry->count++;
413 spin_unlock_irqrestore(&ds->lock, flags);
414
415 return entry;
416}
417
418static unsigned ds_next(unsigned index)
419{
420 return (index + 1) % DEFERRED_SET_SIZE;
421}
422
423static void __sweep(struct deferred_set *ds, struct list_head *head)
424{
425 while ((ds->sweeper != ds->current_entry) &&
426 !ds->entries[ds->sweeper].count) {
427 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
428 ds->sweeper = ds_next(ds->sweeper);
429 }
430
431 if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
432 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
433}
434
435static void ds_dec(struct deferred_entry *entry, struct list_head *head)
436{
437 unsigned long flags;
438
439 spin_lock_irqsave(&entry->ds->lock, flags);
440 BUG_ON(!entry->count);
441 --entry->count;
442 __sweep(entry->ds, head);
443 spin_unlock_irqrestore(&entry->ds->lock, flags);
444}
445
446/*
447 * Returns 1 if deferred or 0 if no pending items to delay job.
448 */
449static int ds_add_work(struct deferred_set *ds, struct list_head *work)
450{
451 int r = 1;
452 unsigned long flags;
453 unsigned next_entry;
454
455 spin_lock_irqsave(&ds->lock, flags);
456 if ((ds->sweeper == ds->current_entry) &&
457 !ds->entries[ds->current_entry].count)
458 r = 0;
459 else {
460 list_add(work, &ds->entries[ds->current_entry].work_items);
461 next_entry = ds_next(ds->current_entry);
462 if (!ds->entries[next_entry].count)
463 ds->current_entry = next_entry;
464 }
465 spin_unlock_irqrestore(&ds->lock, flags);
466
467 return r;
468}
469
470/*----------------------------------------------------------------*/
471
472/*
473 * Key building. 102 * Key building.
474 */ 103 */
475static void build_data_key(struct dm_thin_device *td, 104static void build_data_key(struct dm_thin_device *td,
476 dm_block_t b, struct cell_key *key) 105 dm_block_t b, struct dm_cell_key *key)
477{ 106{
478 key->virtual = 0; 107 key->virtual = 0;
479 key->dev = dm_thin_dev_id(td); 108 key->dev = dm_thin_dev_id(td);
@@ -481,7 +110,7 @@ static void build_data_key(struct dm_thin_device *td,
481} 110}
482 111
483static void build_virtual_key(struct dm_thin_device *td, dm_block_t b, 112static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
484 struct cell_key *key) 113 struct dm_cell_key *key)
485{ 114{
486 key->virtual = 1; 115 key->virtual = 1;
487 key->dev = dm_thin_dev_id(td); 116 key->dev = dm_thin_dev_id(td);
@@ -534,7 +163,7 @@ struct pool {
534 unsigned low_water_triggered:1; /* A dm event has been sent */ 163 unsigned low_water_triggered:1; /* A dm event has been sent */
535 unsigned no_free_space:1; /* A -ENOSPC warning has been issued */ 164 unsigned no_free_space:1; /* A -ENOSPC warning has been issued */
536 165
537 struct bio_prison *prison; 166 struct dm_bio_prison *prison;
538 struct dm_kcopyd_client *copier; 167 struct dm_kcopyd_client *copier;
539 168
540 struct workqueue_struct *wq; 169 struct workqueue_struct *wq;
@@ -552,8 +181,8 @@ struct pool {
552 181
553 struct bio_list retry_on_resume_list; 182 struct bio_list retry_on_resume_list;
554 183
555 struct deferred_set shared_read_ds; 184 struct dm_deferred_set *shared_read_ds;
556 struct deferred_set all_io_ds; 185 struct dm_deferred_set *all_io_ds;
557 186
558 struct dm_thin_new_mapping *next_mapping; 187 struct dm_thin_new_mapping *next_mapping;
559 mempool_t *mapping_pool; 188 mempool_t *mapping_pool;
@@ -660,8 +289,8 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev
660 289
661struct dm_thin_endio_hook { 290struct dm_thin_endio_hook {
662 struct thin_c *tc; 291 struct thin_c *tc;
663 struct deferred_entry *shared_read_entry; 292 struct dm_deferred_entry *shared_read_entry;
664 struct deferred_entry *all_io_entry; 293 struct dm_deferred_entry *all_io_entry;
665 struct dm_thin_new_mapping *overwrite_mapping; 294 struct dm_thin_new_mapping *overwrite_mapping;
666}; 295};
667 296
@@ -877,7 +506,7 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
877 unsigned long flags; 506 unsigned long flags;
878 507
879 spin_lock_irqsave(&pool->lock, flags); 508 spin_lock_irqsave(&pool->lock, flags);
880 cell_release(cell, &pool->deferred_bios); 509 dm_cell_release(cell, &pool->deferred_bios);
881 spin_unlock_irqrestore(&tc->pool->lock, flags); 510 spin_unlock_irqrestore(&tc->pool->lock, flags);
882 511
883 wake_worker(pool); 512 wake_worker(pool);
@@ -896,7 +525,7 @@ static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell
896 bio_list_init(&bios); 525 bio_list_init(&bios);
897 526
898 spin_lock_irqsave(&pool->lock, flags); 527 spin_lock_irqsave(&pool->lock, flags);
899 cell_release_no_holder(cell, &pool->deferred_bios); 528 dm_cell_release_no_holder(cell, &pool->deferred_bios);
900 spin_unlock_irqrestore(&pool->lock, flags); 529 spin_unlock_irqrestore(&pool->lock, flags);
901 530
902 wake_worker(pool); 531 wake_worker(pool);
@@ -906,7 +535,7 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
906{ 535{
907 if (m->bio) 536 if (m->bio)
908 m->bio->bi_end_io = m->saved_bi_end_io; 537 m->bio->bi_end_io = m->saved_bi_end_io;
909 cell_error(m->cell); 538 dm_cell_error(m->cell);
910 list_del(&m->list); 539 list_del(&m->list);
911 mempool_free(m, m->tc->pool->mapping_pool); 540 mempool_free(m, m->tc->pool->mapping_pool);
912} 541}
@@ -921,7 +550,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
921 bio->bi_end_io = m->saved_bi_end_io; 550 bio->bi_end_io = m->saved_bi_end_io;
922 551
923 if (m->err) { 552 if (m->err) {
924 cell_error(m->cell); 553 dm_cell_error(m->cell);
925 goto out; 554 goto out;
926 } 555 }
927 556
@@ -933,7 +562,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
933 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block); 562 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
934 if (r) { 563 if (r) {
935 DMERR("dm_thin_insert_block() failed"); 564 DMERR("dm_thin_insert_block() failed");
936 cell_error(m->cell); 565 dm_cell_error(m->cell);
937 goto out; 566 goto out;
938 } 567 }
939 568
@@ -1067,7 +696,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1067 m->err = 0; 696 m->err = 0;
1068 m->bio = NULL; 697 m->bio = NULL;
1069 698
1070 if (!ds_add_work(&pool->shared_read_ds, &m->list)) 699 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
1071 m->quiesced = 1; 700 m->quiesced = 1;
1072 701
1073 /* 702 /*
@@ -1099,7 +728,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1099 if (r < 0) { 728 if (r < 0) {
1100 mempool_free(m, pool->mapping_pool); 729 mempool_free(m, pool->mapping_pool);
1101 DMERR("dm_kcopyd_copy() failed"); 730 DMERR("dm_kcopyd_copy() failed");
1102 cell_error(cell); 731 dm_cell_error(cell);
1103 } 732 }
1104 } 733 }
1105} 734}
@@ -1164,7 +793,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1164 if (r < 0) { 793 if (r < 0) {
1165 mempool_free(m, pool->mapping_pool); 794 mempool_free(m, pool->mapping_pool);
1166 DMERR("dm_kcopyd_zero() failed"); 795 DMERR("dm_kcopyd_zero() failed");
1167 cell_error(cell); 796 dm_cell_error(cell);
1168 } 797 }
1169 } 798 }
1170} 799}
@@ -1276,7 +905,7 @@ static void no_space(struct dm_bio_prison_cell *cell)
1276 struct bio_list bios; 905 struct bio_list bios;
1277 906
1278 bio_list_init(&bios); 907 bio_list_init(&bios);
1279 cell_release(cell, &bios); 908 dm_cell_release(cell, &bios);
1280 909
1281 while ((bio = bio_list_pop(&bios))) 910 while ((bio = bio_list_pop(&bios)))
1282 retry_on_resume(bio); 911 retry_on_resume(bio);
@@ -1288,13 +917,13 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1288 unsigned long flags; 917 unsigned long flags;
1289 struct pool *pool = tc->pool; 918 struct pool *pool = tc->pool;
1290 struct dm_bio_prison_cell *cell, *cell2; 919 struct dm_bio_prison_cell *cell, *cell2;
1291 struct cell_key key, key2; 920 struct dm_cell_key key, key2;
1292 dm_block_t block = get_bio_block(tc, bio); 921 dm_block_t block = get_bio_block(tc, bio);
1293 struct dm_thin_lookup_result lookup_result; 922 struct dm_thin_lookup_result lookup_result;
1294 struct dm_thin_new_mapping *m; 923 struct dm_thin_new_mapping *m;
1295 924
1296 build_virtual_key(tc->td, block, &key); 925 build_virtual_key(tc->td, block, &key);
1297 if (bio_detain(tc->pool->prison, &key, bio, &cell)) 926 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
1298 return; 927 return;
1299 928
1300 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 929 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
@@ -1306,8 +935,8 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1306 * on this block. 935 * on this block.
1307 */ 936 */
1308 build_data_key(tc->td, lookup_result.block, &key2); 937 build_data_key(tc->td, lookup_result.block, &key2);
1309 if (bio_detain(tc->pool->prison, &key2, bio, &cell2)) { 938 if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
1310 cell_release_singleton(cell, bio); 939 dm_cell_release_singleton(cell, bio);
1311 break; 940 break;
1312 } 941 }
1313 942
@@ -1326,7 +955,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1326 m->err = 0; 955 m->err = 0;
1327 m->bio = bio; 956 m->bio = bio;
1328 957
1329 if (!ds_add_work(&pool->all_io_ds, &m->list)) { 958 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1330 spin_lock_irqsave(&pool->lock, flags); 959 spin_lock_irqsave(&pool->lock, flags);
1331 list_add(&m->list, &pool->prepared_discards); 960 list_add(&m->list, &pool->prepared_discards);
1332 spin_unlock_irqrestore(&pool->lock, flags); 961 spin_unlock_irqrestore(&pool->lock, flags);
@@ -1338,8 +967,8 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1338 * a block boundary. So we submit the discard of a 967 * a block boundary. So we submit the discard of a
1339 * partial block appropriately. 968 * partial block appropriately.
1340 */ 969 */
1341 cell_release_singleton(cell, bio); 970 dm_cell_release_singleton(cell, bio);
1342 cell_release_singleton(cell2, bio); 971 dm_cell_release_singleton(cell2, bio);
1343 if ((!lookup_result.shared) && pool->pf.discard_passdown) 972 if ((!lookup_result.shared) && pool->pf.discard_passdown)
1344 remap_and_issue(tc, bio, lookup_result.block); 973 remap_and_issue(tc, bio, lookup_result.block);
1345 else 974 else
@@ -1351,20 +980,20 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1351 /* 980 /*
1352 * It isn't provisioned, just forget it. 981 * It isn't provisioned, just forget it.
1353 */ 982 */
1354 cell_release_singleton(cell, bio); 983 dm_cell_release_singleton(cell, bio);
1355 bio_endio(bio, 0); 984 bio_endio(bio, 0);
1356 break; 985 break;
1357 986
1358 default: 987 default:
1359 DMERR("discard: find block unexpectedly returned %d", r); 988 DMERR("discard: find block unexpectedly returned %d", r);
1360 cell_release_singleton(cell, bio); 989 dm_cell_release_singleton(cell, bio);
1361 bio_io_error(bio); 990 bio_io_error(bio);
1362 break; 991 break;
1363 } 992 }
1364} 993}
1365 994
1366static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, 995static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1367 struct cell_key *key, 996 struct dm_cell_key *key,
1368 struct dm_thin_lookup_result *lookup_result, 997 struct dm_thin_lookup_result *lookup_result,
1369 struct dm_bio_prison_cell *cell) 998 struct dm_bio_prison_cell *cell)
1370{ 999{
@@ -1384,7 +1013,7 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1384 1013
1385 default: 1014 default:
1386 DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); 1015 DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1387 cell_error(cell); 1016 dm_cell_error(cell);
1388 break; 1017 break;
1389 } 1018 }
1390} 1019}
@@ -1395,14 +1024,14 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1395{ 1024{
1396 struct dm_bio_prison_cell *cell; 1025 struct dm_bio_prison_cell *cell;
1397 struct pool *pool = tc->pool; 1026 struct pool *pool = tc->pool;
1398 struct cell_key key; 1027 struct dm_cell_key key;
1399 1028
1400 /* 1029 /*
1401 * If cell is already occupied, then sharing is already in the process 1030 * If cell is already occupied, then sharing is already in the process
1402 * of being broken so we have nothing further to do here. 1031 * of being broken so we have nothing further to do here.
1403 */ 1032 */
1404 build_data_key(tc->td, lookup_result->block, &key); 1033 build_data_key(tc->td, lookup_result->block, &key);
1405 if (bio_detain(pool->prison, &key, bio, &cell)) 1034 if (dm_bio_detain(pool->prison, &key, bio, &cell))
1406 return; 1035 return;
1407 1036
1408 if (bio_data_dir(bio) == WRITE && bio->bi_size) 1037 if (bio_data_dir(bio) == WRITE && bio->bi_size)
@@ -1410,9 +1039,9 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1410 else { 1039 else {
1411 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 1040 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1412 1041
1413 h->shared_read_entry = ds_inc(&pool->shared_read_ds); 1042 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1414 1043
1415 cell_release_singleton(cell, bio); 1044 dm_cell_release_singleton(cell, bio);
1416 remap_and_issue(tc, bio, lookup_result->block); 1045 remap_and_issue(tc, bio, lookup_result->block);
1417 } 1046 }
1418} 1047}
@@ -1427,7 +1056,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
1427 * Remap empty bios (flushes) immediately, without provisioning. 1056 * Remap empty bios (flushes) immediately, without provisioning.
1428 */ 1057 */
1429 if (!bio->bi_size) { 1058 if (!bio->bi_size) {
1430 cell_release_singleton(cell, bio); 1059 dm_cell_release_singleton(cell, bio);
1431 remap_and_issue(tc, bio, 0); 1060 remap_and_issue(tc, bio, 0);
1432 return; 1061 return;
1433 } 1062 }
@@ -1437,7 +1066,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
1437 */ 1066 */
1438 if (bio_data_dir(bio) == READ) { 1067 if (bio_data_dir(bio) == READ) {
1439 zero_fill_bio(bio); 1068 zero_fill_bio(bio);
1440 cell_release_singleton(cell, bio); 1069 dm_cell_release_singleton(cell, bio);
1441 bio_endio(bio, 0); 1070 bio_endio(bio, 0);
1442 return; 1071 return;
1443 } 1072 }
@@ -1458,7 +1087,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
1458 default: 1087 default:
1459 DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); 1088 DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1460 set_pool_mode(tc->pool, PM_READ_ONLY); 1089 set_pool_mode(tc->pool, PM_READ_ONLY);
1461 cell_error(cell); 1090 dm_cell_error(cell);
1462 break; 1091 break;
1463 } 1092 }
1464} 1093}
@@ -1468,7 +1097,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
1468 int r; 1097 int r;
1469 dm_block_t block = get_bio_block(tc, bio); 1098 dm_block_t block = get_bio_block(tc, bio);
1470 struct dm_bio_prison_cell *cell; 1099 struct dm_bio_prison_cell *cell;
1471 struct cell_key key; 1100 struct dm_cell_key key;
1472 struct dm_thin_lookup_result lookup_result; 1101 struct dm_thin_lookup_result lookup_result;
1473 1102
1474 /* 1103 /*
@@ -1476,7 +1105,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
1476 * being provisioned so we have nothing further to do here. 1105 * being provisioned so we have nothing further to do here.
1477 */ 1106 */
1478 build_virtual_key(tc->td, block, &key); 1107 build_virtual_key(tc->td, block, &key);
1479 if (bio_detain(tc->pool->prison, &key, bio, &cell)) 1108 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
1480 return; 1109 return;
1481 1110
1482 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1111 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
@@ -1491,7 +1120,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
1491 * TODO: this will probably have to change when discard goes 1120 * TODO: this will probably have to change when discard goes
1492 * back in. 1121 * back in.
1493 */ 1122 */
1494 cell_release_singleton(cell, bio); 1123 dm_cell_release_singleton(cell, bio);
1495 1124
1496 if (lookup_result.shared) 1125 if (lookup_result.shared)
1497 process_shared_bio(tc, bio, block, &lookup_result); 1126 process_shared_bio(tc, bio, block, &lookup_result);
@@ -1501,7 +1130,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
1501 1130
1502 case -ENODATA: 1131 case -ENODATA:
1503 if (bio_data_dir(bio) == READ && tc->origin_dev) { 1132 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1504 cell_release_singleton(cell, bio); 1133 dm_cell_release_singleton(cell, bio);
1505 remap_to_origin_and_issue(tc, bio); 1134 remap_to_origin_and_issue(tc, bio);
1506 } else 1135 } else
1507 provision_block(tc, bio, block, cell); 1136 provision_block(tc, bio, block, cell);
@@ -1509,7 +1138,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
1509 1138
1510 default: 1139 default:
1511 DMERR("dm_thin_find_block() failed, error = %d", r); 1140 DMERR("dm_thin_find_block() failed, error = %d", r);
1512 cell_release_singleton(cell, bio); 1141 dm_cell_release_singleton(cell, bio);
1513 bio_io_error(bio); 1142 bio_io_error(bio);
1514 break; 1143 break;
1515 } 1144 }
@@ -1718,7 +1347,7 @@ static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *b
1718 1347
1719 h->tc = tc; 1348 h->tc = tc;
1720 h->shared_read_entry = NULL; 1349 h->shared_read_entry = NULL;
1721 h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : ds_inc(&pool->all_io_ds); 1350 h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : dm_deferred_entry_inc(pool->all_io_ds);
1722 h->overwrite_mapping = NULL; 1351 h->overwrite_mapping = NULL;
1723 1352
1724 return h; 1353 return h;
@@ -1928,7 +1557,7 @@ static void __pool_destroy(struct pool *pool)
1928 if (dm_pool_metadata_close(pool->pmd) < 0) 1557 if (dm_pool_metadata_close(pool->pmd) < 0)
1929 DMWARN("%s: dm_pool_metadata_close() failed.", __func__); 1558 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1930 1559
1931 prison_destroy(pool->prison); 1560 dm_bio_prison_destroy(pool->prison);
1932 dm_kcopyd_client_destroy(pool->copier); 1561 dm_kcopyd_client_destroy(pool->copier);
1933 1562
1934 if (pool->wq) 1563 if (pool->wq)
@@ -1938,6 +1567,8 @@ static void __pool_destroy(struct pool *pool)
1938 mempool_free(pool->next_mapping, pool->mapping_pool); 1567 mempool_free(pool->next_mapping, pool->mapping_pool);
1939 mempool_destroy(pool->mapping_pool); 1568 mempool_destroy(pool->mapping_pool);
1940 mempool_destroy(pool->endio_hook_pool); 1569 mempool_destroy(pool->endio_hook_pool);
1570 dm_deferred_set_destroy(pool->shared_read_ds);
1571 dm_deferred_set_destroy(pool->all_io_ds);
1941 kfree(pool); 1572 kfree(pool);
1942} 1573}
1943 1574
@@ -1976,7 +1607,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
1976 pool->sectors_per_block_shift = __ffs(block_size); 1607 pool->sectors_per_block_shift = __ffs(block_size);
1977 pool->low_water_blocks = 0; 1608 pool->low_water_blocks = 0;
1978 pool_features_init(&pool->pf); 1609 pool_features_init(&pool->pf);
1979 pool->prison = prison_create(PRISON_CELLS); 1610 pool->prison = dm_bio_prison_create(PRISON_CELLS);
1980 if (!pool->prison) { 1611 if (!pool->prison) {
1981 *error = "Error creating pool's bio prison"; 1612 *error = "Error creating pool's bio prison";
1982 err_p = ERR_PTR(-ENOMEM); 1613 err_p = ERR_PTR(-ENOMEM);
@@ -2012,8 +1643,20 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2012 pool->low_water_triggered = 0; 1643 pool->low_water_triggered = 0;
2013 pool->no_free_space = 0; 1644 pool->no_free_space = 0;
2014 bio_list_init(&pool->retry_on_resume_list); 1645 bio_list_init(&pool->retry_on_resume_list);
2015 ds_init(&pool->shared_read_ds); 1646
2016 ds_init(&pool->all_io_ds); 1647 pool->shared_read_ds = dm_deferred_set_create();
1648 if (!pool->shared_read_ds) {
1649 *error = "Error creating pool's shared read deferred set";
1650 err_p = ERR_PTR(-ENOMEM);
1651 goto bad_shared_read_ds;
1652 }
1653
1654 pool->all_io_ds = dm_deferred_set_create();
1655 if (!pool->all_io_ds) {
1656 *error = "Error creating pool's all io deferred set";
1657 err_p = ERR_PTR(-ENOMEM);
1658 goto bad_all_io_ds;
1659 }
2017 1660
2018 pool->next_mapping = NULL; 1661 pool->next_mapping = NULL;
2019 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE, 1662 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
@@ -2042,11 +1685,15 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2042bad_endio_hook_pool: 1685bad_endio_hook_pool:
2043 mempool_destroy(pool->mapping_pool); 1686 mempool_destroy(pool->mapping_pool);
2044bad_mapping_pool: 1687bad_mapping_pool:
1688 dm_deferred_set_destroy(pool->all_io_ds);
1689bad_all_io_ds:
1690 dm_deferred_set_destroy(pool->shared_read_ds);
1691bad_shared_read_ds:
2045 destroy_workqueue(pool->wq); 1692 destroy_workqueue(pool->wq);
2046bad_wq: 1693bad_wq:
2047 dm_kcopyd_client_destroy(pool->copier); 1694 dm_kcopyd_client_destroy(pool->copier);
2048bad_kcopyd_client: 1695bad_kcopyd_client:
2049 prison_destroy(pool->prison); 1696 dm_bio_prison_destroy(pool->prison);
2050bad_prison: 1697bad_prison:
2051 kfree(pool); 1698 kfree(pool);
2052bad_pool: 1699bad_pool:
@@ -2272,15 +1919,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2272 goto out_flags_changed; 1919 goto out_flags_changed;
2273 } 1920 }
2274 1921
2275 /*
2276 * The block layer requires discard_granularity to be a power of 2.
2277 */
2278 if (pf.discard_enabled && !is_power_of_2(block_size)) {
2279 ti->error = "Discard support must be disabled when the block size is not a power of 2";
2280 r = -EINVAL;
2281 goto out_flags_changed;
2282 }
2283
2284 pt->pool = pool; 1922 pt->pool = pool;
2285 pt->ti = ti; 1923 pt->ti = ti;
2286 pt->metadata_dev = metadata_dev; 1924 pt->metadata_dev = metadata_dev;
@@ -2762,6 +2400,11 @@ static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2762 return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); 2400 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2763} 2401}
2764 2402
2403static bool block_size_is_power_of_two(struct pool *pool)
2404{
2405 return pool->sectors_per_block_shift >= 0;
2406}
2407
2765static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits) 2408static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
2766{ 2409{
2767 struct pool *pool = pt->pool; 2410 struct pool *pool = pt->pool;
@@ -2775,8 +2418,15 @@ static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
2775 if (pt->adjusted_pf.discard_passdown) { 2418 if (pt->adjusted_pf.discard_passdown) {
2776 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits; 2419 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
2777 limits->discard_granularity = data_limits->discard_granularity; 2420 limits->discard_granularity = data_limits->discard_granularity;
2778 } else 2421 } else if (block_size_is_power_of_two(pool))
2779 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; 2422 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
2423 else
2424 /*
2425 * Use largest power of 2 that is a factor of sectors_per_block
2426 * but at least DATA_DEV_BLOCK_SIZE_MIN_SECTORS.
2427 */
2428 limits->discard_granularity = max(1 << (ffs(pool->sectors_per_block) - 1),
2429 DATA_DEV_BLOCK_SIZE_MIN_SECTORS) << SECTOR_SHIFT;
2780} 2430}
2781 2431
2782static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) 2432static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
@@ -2804,7 +2454,7 @@ static struct target_type pool_target = {
2804 .name = "thin-pool", 2454 .name = "thin-pool",
2805 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 2455 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2806 DM_TARGET_IMMUTABLE, 2456 DM_TARGET_IMMUTABLE,
2807 .version = {1, 4, 0}, 2457 .version = {1, 5, 0},
2808 .module = THIS_MODULE, 2458 .module = THIS_MODULE,
2809 .ctr = pool_ctr, 2459 .ctr = pool_ctr,
2810 .dtr = pool_dtr, 2460 .dtr = pool_dtr,
@@ -2979,7 +2629,7 @@ static int thin_endio(struct dm_target *ti,
2979 2629
2980 if (h->shared_read_entry) { 2630 if (h->shared_read_entry) {
2981 INIT_LIST_HEAD(&work); 2631 INIT_LIST_HEAD(&work);
2982 ds_dec(h->shared_read_entry, &work); 2632 dm_deferred_entry_dec(h->shared_read_entry, &work);
2983 2633
2984 spin_lock_irqsave(&pool->lock, flags); 2634 spin_lock_irqsave(&pool->lock, flags);
2985 list_for_each_entry_safe(m, tmp, &work, list) { 2635 list_for_each_entry_safe(m, tmp, &work, list) {
@@ -2992,7 +2642,7 @@ static int thin_endio(struct dm_target *ti,
2992 2642
2993 if (h->all_io_entry) { 2643 if (h->all_io_entry) {
2994 INIT_LIST_HEAD(&work); 2644 INIT_LIST_HEAD(&work);
2995 ds_dec(h->all_io_entry, &work); 2645 dm_deferred_entry_dec(h->all_io_entry, &work);
2996 spin_lock_irqsave(&pool->lock, flags); 2646 spin_lock_irqsave(&pool->lock, flags);
2997 list_for_each_entry_safe(m, tmp, &work, list) 2647 list_for_each_entry_safe(m, tmp, &work, list)
2998 list_add(&m->list, &pool->prepared_discards); 2648 list_add(&m->list, &pool->prepared_discards);
@@ -3095,7 +2745,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
3095 2745
3096static struct target_type thin_target = { 2746static struct target_type thin_target = {
3097 .name = "thin", 2747 .name = "thin",
3098 .version = {1, 4, 0}, 2748 .version = {1, 5, 0},
3099 .module = THIS_MODULE, 2749 .module = THIS_MODULE,
3100 .ctr = thin_ctr, 2750 .ctr = thin_ctr,
3101 .dtr = thin_dtr, 2751 .dtr = thin_dtr,
@@ -3125,10 +2775,6 @@ static int __init dm_thin_init(void)
3125 2775
3126 r = -ENOMEM; 2776 r = -ENOMEM;
3127 2777
3128 _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
3129 if (!_cell_cache)
3130 goto bad_cell_cache;
3131
3132 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); 2778 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
3133 if (!_new_mapping_cache) 2779 if (!_new_mapping_cache)
3134 goto bad_new_mapping_cache; 2780 goto bad_new_mapping_cache;
@@ -3142,8 +2788,6 @@ static int __init dm_thin_init(void)
3142bad_endio_hook_cache: 2788bad_endio_hook_cache:
3143 kmem_cache_destroy(_new_mapping_cache); 2789 kmem_cache_destroy(_new_mapping_cache);
3144bad_new_mapping_cache: 2790bad_new_mapping_cache:
3145 kmem_cache_destroy(_cell_cache);
3146bad_cell_cache:
3147 dm_unregister_target(&pool_target); 2791 dm_unregister_target(&pool_target);
3148bad_pool_target: 2792bad_pool_target:
3149 dm_unregister_target(&thin_target); 2793 dm_unregister_target(&thin_target);
@@ -3156,7 +2800,6 @@ static void dm_thin_exit(void)
3156 dm_unregister_target(&thin_target); 2800 dm_unregister_target(&thin_target);
3157 dm_unregister_target(&pool_target); 2801 dm_unregister_target(&pool_target);
3158 2802
3159 kmem_cache_destroy(_cell_cache);
3160 kmem_cache_destroy(_new_mapping_cache); 2803 kmem_cache_destroy(_new_mapping_cache);
3161 kmem_cache_destroy(_endio_hook_cache); 2804 kmem_cache_destroy(_endio_hook_cache);
3162} 2805}
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 892ae2766aa6..9e7328bb4030 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -438,7 +438,7 @@ static void verity_prefetch_io(struct dm_verity *v, struct dm_verity_io *io)
438 verity_hash_at_level(v, io->block, i, &hash_block_start, NULL); 438 verity_hash_at_level(v, io->block, i, &hash_block_start, NULL);
439 verity_hash_at_level(v, io->block + io->n_blocks - 1, i, &hash_block_end, NULL); 439 verity_hash_at_level(v, io->block + io->n_blocks - 1, i, &hash_block_end, NULL);
440 if (!i) { 440 if (!i) {
441 unsigned cluster = *(volatile unsigned *)&dm_verity_prefetch_cluster; 441 unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster);
442 442
443 cluster >>= v->data_dev_block_bits; 443 cluster >>= v->data_dev_block_bits;
444 if (unlikely(!cluster)) 444 if (unlikely(!cluster))
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 67ffa391edcf..02db9183ca01 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -71,6 +71,7 @@ struct dm_target_io {
71 struct dm_io *io; 71 struct dm_io *io;
72 struct dm_target *ti; 72 struct dm_target *ti;
73 union map_info info; 73 union map_info info;
74 struct bio clone;
74}; 75};
75 76
76/* 77/*
@@ -86,12 +87,17 @@ struct dm_rq_target_io {
86}; 87};
87 88
88/* 89/*
89 * For request-based dm. 90 * For request-based dm - the bio clones we allocate are embedded in these
90 * One of these is allocated per bio. 91 * structs.
92 *
93 * We allocate these with bio_alloc_bioset, using the front_pad parameter when
94 * the bioset is created - this means the bio has to come at the end of the
95 * struct.
91 */ 96 */
92struct dm_rq_clone_bio_info { 97struct dm_rq_clone_bio_info {
93 struct bio *orig; 98 struct bio *orig;
94 struct dm_rq_target_io *tio; 99 struct dm_rq_target_io *tio;
100 struct bio clone;
95}; 101};
96 102
97union map_info *dm_get_mapinfo(struct bio *bio) 103union map_info *dm_get_mapinfo(struct bio *bio)
@@ -209,8 +215,12 @@ struct dm_md_mempools {
209 215
210#define MIN_IOS 256 216#define MIN_IOS 256
211static struct kmem_cache *_io_cache; 217static struct kmem_cache *_io_cache;
212static struct kmem_cache *_tio_cache;
213static struct kmem_cache *_rq_tio_cache; 218static struct kmem_cache *_rq_tio_cache;
219
220/*
221 * Unused now, and needs to be deleted. But since io_pool is overloaded and it's
222 * still used for _io_cache, I'm leaving this for a later cleanup
223 */
214static struct kmem_cache *_rq_bio_info_cache; 224static struct kmem_cache *_rq_bio_info_cache;
215 225
216static int __init local_init(void) 226static int __init local_init(void)
@@ -222,14 +232,9 @@ static int __init local_init(void)
222 if (!_io_cache) 232 if (!_io_cache)
223 return r; 233 return r;
224 234
225 /* allocate a slab for the target ios */
226 _tio_cache = KMEM_CACHE(dm_target_io, 0);
227 if (!_tio_cache)
228 goto out_free_io_cache;
229
230 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 235 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
231 if (!_rq_tio_cache) 236 if (!_rq_tio_cache)
232 goto out_free_tio_cache; 237 goto out_free_io_cache;
233 238
234 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0); 239 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
235 if (!_rq_bio_info_cache) 240 if (!_rq_bio_info_cache)
@@ -255,8 +260,6 @@ out_free_rq_bio_info_cache:
255 kmem_cache_destroy(_rq_bio_info_cache); 260 kmem_cache_destroy(_rq_bio_info_cache);
256out_free_rq_tio_cache: 261out_free_rq_tio_cache:
257 kmem_cache_destroy(_rq_tio_cache); 262 kmem_cache_destroy(_rq_tio_cache);
258out_free_tio_cache:
259 kmem_cache_destroy(_tio_cache);
260out_free_io_cache: 263out_free_io_cache:
261 kmem_cache_destroy(_io_cache); 264 kmem_cache_destroy(_io_cache);
262 265
@@ -267,7 +270,6 @@ static void local_exit(void)
267{ 270{
268 kmem_cache_destroy(_rq_bio_info_cache); 271 kmem_cache_destroy(_rq_bio_info_cache);
269 kmem_cache_destroy(_rq_tio_cache); 272 kmem_cache_destroy(_rq_tio_cache);
270 kmem_cache_destroy(_tio_cache);
271 kmem_cache_destroy(_io_cache); 273 kmem_cache_destroy(_io_cache);
272 unregister_blkdev(_major, _name); 274 unregister_blkdev(_major, _name);
273 dm_uevent_exit(); 275 dm_uevent_exit();
@@ -453,7 +455,7 @@ static void free_io(struct mapped_device *md, struct dm_io *io)
453 455
454static void free_tio(struct mapped_device *md, struct dm_target_io *tio) 456static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
455{ 457{
456 mempool_free(tio, md->tio_pool); 458 bio_put(&tio->clone);
457} 459}
458 460
459static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, 461static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
@@ -467,16 +469,6 @@ static void free_rq_tio(struct dm_rq_target_io *tio)
467 mempool_free(tio, tio->md->tio_pool); 469 mempool_free(tio, tio->md->tio_pool);
468} 470}
469 471
470static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
471{
472 return mempool_alloc(md->io_pool, GFP_ATOMIC);
473}
474
475static void free_bio_info(struct dm_rq_clone_bio_info *info)
476{
477 mempool_free(info, info->tio->md->io_pool);
478}
479
480static int md_in_flight(struct mapped_device *md) 472static int md_in_flight(struct mapped_device *md)
481{ 473{
482 return atomic_read(&md->pending[READ]) + 474 return atomic_read(&md->pending[READ]) +
@@ -681,13 +673,7 @@ static void clone_endio(struct bio *bio, int error)
681 } 673 }
682 } 674 }
683 675
684 /*
685 * Store md for cleanup instead of tio which is about to get freed.
686 */
687 bio->bi_private = md->bs;
688
689 free_tio(md, tio); 676 free_tio(md, tio);
690 bio_put(bio);
691 dec_pending(io, error); 677 dec_pending(io, error);
692} 678}
693 679
@@ -1007,12 +993,12 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1007} 993}
1008EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 994EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1009 995
1010static void __map_bio(struct dm_target *ti, struct bio *clone, 996static void __map_bio(struct dm_target *ti, struct dm_target_io *tio)
1011 struct dm_target_io *tio)
1012{ 997{
1013 int r; 998 int r;
1014 sector_t sector; 999 sector_t sector;
1015 struct mapped_device *md; 1000 struct mapped_device *md;
1001 struct bio *clone = &tio->clone;
1016 1002
1017 clone->bi_end_io = clone_endio; 1003 clone->bi_end_io = clone_endio;
1018 clone->bi_private = tio; 1004 clone->bi_private = tio;
@@ -1036,12 +1022,6 @@ static void __map_bio(struct dm_target *ti, struct bio *clone,
1036 /* error the io and bail out, or requeue it if needed */ 1022 /* error the io and bail out, or requeue it if needed */
1037 md = tio->io->md; 1023 md = tio->io->md;
1038 dec_pending(tio->io, r); 1024 dec_pending(tio->io, r);
1039 /*
1040 * Store bio_set for cleanup.
1041 */
1042 clone->bi_end_io = NULL;
1043 clone->bi_private = md->bs;
1044 bio_put(clone);
1045 free_tio(md, tio); 1025 free_tio(md, tio);
1046 } else if (r) { 1026 } else if (r) {
1047 DMWARN("unimplemented target map return value: %d", r); 1027 DMWARN("unimplemented target map return value: %d", r);
@@ -1059,25 +1039,16 @@ struct clone_info {
1059 unsigned short idx; 1039 unsigned short idx;
1060}; 1040};
1061 1041
1062static void dm_bio_destructor(struct bio *bio)
1063{
1064 struct bio_set *bs = bio->bi_private;
1065
1066 bio_free(bio, bs);
1067}
1068
1069/* 1042/*
1070 * Creates a little bio that just does part of a bvec. 1043 * Creates a little bio that just does part of a bvec.
1071 */ 1044 */
1072static struct bio *split_bvec(struct bio *bio, sector_t sector, 1045static void split_bvec(struct dm_target_io *tio, struct bio *bio,
1073 unsigned short idx, unsigned int offset, 1046 sector_t sector, unsigned short idx, unsigned int offset,
1074 unsigned int len, struct bio_set *bs) 1047 unsigned int len, struct bio_set *bs)
1075{ 1048{
1076 struct bio *clone; 1049 struct bio *clone = &tio->clone;
1077 struct bio_vec *bv = bio->bi_io_vec + idx; 1050 struct bio_vec *bv = bio->bi_io_vec + idx;
1078 1051
1079 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
1080 clone->bi_destructor = dm_bio_destructor;
1081 *clone->bi_io_vec = *bv; 1052 *clone->bi_io_vec = *bv;
1082 1053
1083 clone->bi_sector = sector; 1054 clone->bi_sector = sector;
@@ -1090,26 +1061,23 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
1090 clone->bi_flags |= 1 << BIO_CLONED; 1061 clone->bi_flags |= 1 << BIO_CLONED;
1091 1062
1092 if (bio_integrity(bio)) { 1063 if (bio_integrity(bio)) {
1093 bio_integrity_clone(clone, bio, GFP_NOIO, bs); 1064 bio_integrity_clone(clone, bio, GFP_NOIO);
1094 bio_integrity_trim(clone, 1065 bio_integrity_trim(clone,
1095 bio_sector_offset(bio, idx, offset), len); 1066 bio_sector_offset(bio, idx, offset), len);
1096 } 1067 }
1097
1098 return clone;
1099} 1068}
1100 1069
1101/* 1070/*
1102 * Creates a bio that consists of range of complete bvecs. 1071 * Creates a bio that consists of range of complete bvecs.
1103 */ 1072 */
1104static struct bio *clone_bio(struct bio *bio, sector_t sector, 1073static void clone_bio(struct dm_target_io *tio, struct bio *bio,
1105 unsigned short idx, unsigned short bv_count, 1074 sector_t sector, unsigned short idx,
1106 unsigned int len, struct bio_set *bs) 1075 unsigned short bv_count, unsigned int len,
1076 struct bio_set *bs)
1107{ 1077{
1108 struct bio *clone; 1078 struct bio *clone = &tio->clone;
1109 1079
1110 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
1111 __bio_clone(clone, bio); 1080 __bio_clone(clone, bio);
1112 clone->bi_destructor = dm_bio_destructor;
1113 clone->bi_sector = sector; 1081 clone->bi_sector = sector;
1114 clone->bi_idx = idx; 1082 clone->bi_idx = idx;
1115 clone->bi_vcnt = idx + bv_count; 1083 clone->bi_vcnt = idx + bv_count;
@@ -1117,20 +1085,22 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
1117 clone->bi_flags &= ~(1 << BIO_SEG_VALID); 1085 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
1118 1086
1119 if (bio_integrity(bio)) { 1087 if (bio_integrity(bio)) {
1120 bio_integrity_clone(clone, bio, GFP_NOIO, bs); 1088 bio_integrity_clone(clone, bio, GFP_NOIO);
1121 1089
1122 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) 1090 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
1123 bio_integrity_trim(clone, 1091 bio_integrity_trim(clone,
1124 bio_sector_offset(bio, idx, 0), len); 1092 bio_sector_offset(bio, idx, 0), len);
1125 } 1093 }
1126
1127 return clone;
1128} 1094}
1129 1095
1130static struct dm_target_io *alloc_tio(struct clone_info *ci, 1096static struct dm_target_io *alloc_tio(struct clone_info *ci,
1131 struct dm_target *ti) 1097 struct dm_target *ti, int nr_iovecs)
1132{ 1098{
1133 struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO); 1099 struct dm_target_io *tio;
1100 struct bio *clone;
1101
1102 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, ci->md->bs);
1103 tio = container_of(clone, struct dm_target_io, clone);
1134 1104
1135 tio->io = ci->io; 1105 tio->io = ci->io;
1136 tio->ti = ti; 1106 tio->ti = ti;
@@ -1142,8 +1112,8 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,
1142static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, 1112static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
1143 unsigned request_nr, sector_t len) 1113 unsigned request_nr, sector_t len)
1144{ 1114{
1145 struct dm_target_io *tio = alloc_tio(ci, ti); 1115 struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs);
1146 struct bio *clone; 1116 struct bio *clone = &tio->clone;
1147 1117
1148 tio->info.target_request_nr = request_nr; 1118 tio->info.target_request_nr = request_nr;
1149 1119
@@ -1152,15 +1122,14 @@ static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
1152 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush 1122 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
1153 * and discard, so no need for concern about wasted bvec allocations. 1123 * and discard, so no need for concern about wasted bvec allocations.
1154 */ 1124 */
1155 clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs); 1125
1156 __bio_clone(clone, ci->bio); 1126 __bio_clone(clone, ci->bio);
1157 clone->bi_destructor = dm_bio_destructor;
1158 if (len) { 1127 if (len) {
1159 clone->bi_sector = ci->sector; 1128 clone->bi_sector = ci->sector;
1160 clone->bi_size = to_bytes(len); 1129 clone->bi_size = to_bytes(len);
1161 } 1130 }
1162 1131
1163 __map_bio(ti, clone, tio); 1132 __map_bio(ti, tio);
1164} 1133}
1165 1134
1166static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti, 1135static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
@@ -1189,14 +1158,13 @@ static int __clone_and_map_empty_flush(struct clone_info *ci)
1189 */ 1158 */
1190static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti) 1159static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
1191{ 1160{
1192 struct bio *clone, *bio = ci->bio; 1161 struct bio *bio = ci->bio;
1193 struct dm_target_io *tio; 1162 struct dm_target_io *tio;
1194 1163
1195 tio = alloc_tio(ci, ti); 1164 tio = alloc_tio(ci, ti, bio->bi_max_vecs);
1196 clone = clone_bio(bio, ci->sector, ci->idx, 1165 clone_bio(tio, bio, ci->sector, ci->idx, bio->bi_vcnt - ci->idx,
1197 bio->bi_vcnt - ci->idx, ci->sector_count, 1166 ci->sector_count, ci->md->bs);
1198 ci->md->bs); 1167 __map_bio(ti, tio);
1199 __map_bio(ti, clone, tio);
1200 ci->sector_count = 0; 1168 ci->sector_count = 0;
1201} 1169}
1202 1170
@@ -1234,7 +1202,7 @@ static int __clone_and_map_discard(struct clone_info *ci)
1234 1202
1235static int __clone_and_map(struct clone_info *ci) 1203static int __clone_and_map(struct clone_info *ci)
1236{ 1204{
1237 struct bio *clone, *bio = ci->bio; 1205 struct bio *bio = ci->bio;
1238 struct dm_target *ti; 1206 struct dm_target *ti;
1239 sector_t len = 0, max; 1207 sector_t len = 0, max;
1240 struct dm_target_io *tio; 1208 struct dm_target_io *tio;
@@ -1274,10 +1242,10 @@ static int __clone_and_map(struct clone_info *ci)
1274 len += bv_len; 1242 len += bv_len;
1275 } 1243 }
1276 1244
1277 tio = alloc_tio(ci, ti); 1245 tio = alloc_tio(ci, ti, bio->bi_max_vecs);
1278 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, 1246 clone_bio(tio, bio, ci->sector, ci->idx, i - ci->idx, len,
1279 ci->md->bs); 1247 ci->md->bs);
1280 __map_bio(ti, clone, tio); 1248 __map_bio(ti, tio);
1281 1249
1282 ci->sector += len; 1250 ci->sector += len;
1283 ci->sector_count -= len; 1251 ci->sector_count -= len;
@@ -1302,12 +1270,11 @@ static int __clone_and_map(struct clone_info *ci)
1302 1270
1303 len = min(remaining, max); 1271 len = min(remaining, max);
1304 1272
1305 tio = alloc_tio(ci, ti); 1273 tio = alloc_tio(ci, ti, 1);
1306 clone = split_bvec(bio, ci->sector, ci->idx, 1274 split_bvec(tio, bio, ci->sector, ci->idx,
1307 bv->bv_offset + offset, len, 1275 bv->bv_offset + offset, len, ci->md->bs);
1308 ci->md->bs);
1309 1276
1310 __map_bio(ti, clone, tio); 1277 __map_bio(ti, tio);
1311 1278
1312 ci->sector += len; 1279 ci->sector += len;
1313 ci->sector_count -= len; 1280 ci->sector_count -= len;
@@ -1484,30 +1451,17 @@ void dm_dispatch_request(struct request *rq)
1484} 1451}
1485EXPORT_SYMBOL_GPL(dm_dispatch_request); 1452EXPORT_SYMBOL_GPL(dm_dispatch_request);
1486 1453
1487static void dm_rq_bio_destructor(struct bio *bio)
1488{
1489 struct dm_rq_clone_bio_info *info = bio->bi_private;
1490 struct mapped_device *md = info->tio->md;
1491
1492 free_bio_info(info);
1493 bio_free(bio, md->bs);
1494}
1495
1496static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, 1454static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1497 void *data) 1455 void *data)
1498{ 1456{
1499 struct dm_rq_target_io *tio = data; 1457 struct dm_rq_target_io *tio = data;
1500 struct mapped_device *md = tio->md; 1458 struct dm_rq_clone_bio_info *info =
1501 struct dm_rq_clone_bio_info *info = alloc_bio_info(md); 1459 container_of(bio, struct dm_rq_clone_bio_info, clone);
1502
1503 if (!info)
1504 return -ENOMEM;
1505 1460
1506 info->orig = bio_orig; 1461 info->orig = bio_orig;
1507 info->tio = tio; 1462 info->tio = tio;
1508 bio->bi_end_io = end_clone_bio; 1463 bio->bi_end_io = end_clone_bio;
1509 bio->bi_private = info; 1464 bio->bi_private = info;
1510 bio->bi_destructor = dm_rq_bio_destructor;
1511 1465
1512 return 0; 1466 return 0;
1513} 1467}
@@ -1988,7 +1942,7 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1988{ 1942{
1989 struct dm_md_mempools *p; 1943 struct dm_md_mempools *p;
1990 1944
1991 if (md->io_pool && md->tio_pool && md->bs) 1945 if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs)
1992 /* the md already has necessary mempools */ 1946 /* the md already has necessary mempools */
1993 goto out; 1947 goto out;
1994 1948
@@ -2765,13 +2719,18 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
2765 if (!pools->io_pool) 2719 if (!pools->io_pool)
2766 goto free_pools_and_out; 2720 goto free_pools_and_out;
2767 2721
2768 pools->tio_pool = (type == DM_TYPE_BIO_BASED) ? 2722 pools->tio_pool = NULL;
2769 mempool_create_slab_pool(MIN_IOS, _tio_cache) : 2723 if (type == DM_TYPE_REQUEST_BASED) {
2770 mempool_create_slab_pool(MIN_IOS, _rq_tio_cache); 2724 pools->tio_pool = mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
2771 if (!pools->tio_pool) 2725 if (!pools->tio_pool)
2772 goto free_io_pool_and_out; 2726 goto free_io_pool_and_out;
2727 }
2773 2728
2774 pools->bs = bioset_create(pool_size, 0); 2729 pools->bs = (type == DM_TYPE_BIO_BASED) ?
2730 bioset_create(pool_size,
2731 offsetof(struct dm_target_io, clone)) :
2732 bioset_create(pool_size,
2733 offsetof(struct dm_rq_clone_bio_info, clone));
2775 if (!pools->bs) 2734 if (!pools->bs)
2776 goto free_tio_pool_and_out; 2735 goto free_tio_pool_and_out;
2777 2736
@@ -2784,7 +2743,8 @@ free_bioset_and_out:
2784 bioset_free(pools->bs); 2743 bioset_free(pools->bs);
2785 2744
2786free_tio_pool_and_out: 2745free_tio_pool_and_out:
2787 mempool_destroy(pools->tio_pool); 2746 if (pools->tio_pool)
2747 mempool_destroy(pools->tio_pool);
2788 2748
2789free_io_pool_and_out: 2749free_io_pool_and_out:
2790 mempool_destroy(pools->io_pool); 2750 mempool_destroy(pools->io_pool);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 45135f69509c..5e7dc772f5de 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -315,8 +315,11 @@ static int run(struct mddev *mddev)
315 } 315 }
316 conf->nfaults = 0; 316 conf->nfaults = 0;
317 317
318 rdev_for_each(rdev, mddev) 318 rdev_for_each(rdev, mddev) {
319 conf->rdev = rdev; 319 conf->rdev = rdev;
320 disk_stack_limits(mddev->gendisk, rdev->bdev,
321 rdev->data_offset << 9);
322 }
320 323
321 md_set_array_sectors(mddev, faulty_size(mddev, 0, 0)); 324 md_set_array_sectors(mddev, faulty_size(mddev, 0, 0));
322 mddev->private = conf; 325 mddev->private = conf;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index fa211d80fc0a..21014836bdbf 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -138,6 +138,7 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
138 struct linear_conf *conf; 138 struct linear_conf *conf;
139 struct md_rdev *rdev; 139 struct md_rdev *rdev;
140 int i, cnt; 140 int i, cnt;
141 bool discard_supported = false;
141 142
142 conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(struct dev_info), 143 conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(struct dev_info),
143 GFP_KERNEL); 144 GFP_KERNEL);
@@ -171,6 +172,8 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
171 conf->array_sectors += rdev->sectors; 172 conf->array_sectors += rdev->sectors;
172 cnt++; 173 cnt++;
173 174
175 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
176 discard_supported = true;
174 } 177 }
175 if (cnt != raid_disks) { 178 if (cnt != raid_disks) {
176 printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n", 179 printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n",
@@ -178,6 +181,11 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
178 goto out; 181 goto out;
179 } 182 }
180 183
184 if (!discard_supported)
185 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
186 else
187 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
188
181 /* 189 /*
182 * Here we calculate the device offsets. 190 * Here we calculate the device offsets.
183 */ 191 */
@@ -244,7 +252,9 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
244 if (!newconf) 252 if (!newconf)
245 return -ENOMEM; 253 return -ENOMEM;
246 254
247 oldconf = rcu_dereference(mddev->private); 255 oldconf = rcu_dereference_protected(mddev->private,
256 lockdep_is_held(
257 &mddev->reconfig_mutex));
248 mddev->raid_disks++; 258 mddev->raid_disks++;
249 rcu_assign_pointer(mddev->private, newconf); 259 rcu_assign_pointer(mddev->private, newconf);
250 md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); 260 md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
@@ -256,7 +266,10 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
256 266
257static int linear_stop (struct mddev *mddev) 267static int linear_stop (struct mddev *mddev)
258{ 268{
259 struct linear_conf *conf = mddev->private; 269 struct linear_conf *conf =
270 rcu_dereference_protected(mddev->private,
271 lockdep_is_held(
272 &mddev->reconfig_mutex));
260 273
261 /* 274 /*
262 * We do not require rcu protection here since 275 * We do not require rcu protection here since
@@ -326,6 +339,14 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
326 bio->bi_sector = bio->bi_sector - start_sector 339 bio->bi_sector = bio->bi_sector - start_sector
327 + tmp_dev->rdev->data_offset; 340 + tmp_dev->rdev->data_offset;
328 rcu_read_unlock(); 341 rcu_read_unlock();
342
343 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
344 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
345 /* Just ignore it */
346 bio_endio(bio, 0);
347 return;
348 }
349
329 generic_make_request(bio); 350 generic_make_request(bio);
330} 351}
331 352
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 308e87b417e0..9ab768acfb62 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -155,32 +155,17 @@ static int start_readonly;
155 * like bio_clone, but with a local bio set 155 * like bio_clone, but with a local bio set
156 */ 156 */
157 157
158static void mddev_bio_destructor(struct bio *bio)
159{
160 struct mddev *mddev, **mddevp;
161
162 mddevp = (void*)bio;
163 mddev = mddevp[-1];
164
165 bio_free(bio, mddev->bio_set);
166}
167
168struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 158struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
169 struct mddev *mddev) 159 struct mddev *mddev)
170{ 160{
171 struct bio *b; 161 struct bio *b;
172 struct mddev **mddevp;
173 162
174 if (!mddev || !mddev->bio_set) 163 if (!mddev || !mddev->bio_set)
175 return bio_alloc(gfp_mask, nr_iovecs); 164 return bio_alloc(gfp_mask, nr_iovecs);
176 165
177 b = bio_alloc_bioset(gfp_mask, nr_iovecs, 166 b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
178 mddev->bio_set);
179 if (!b) 167 if (!b)
180 return NULL; 168 return NULL;
181 mddevp = (void*)b;
182 mddevp[-1] = mddev;
183 b->bi_destructor = mddev_bio_destructor;
184 return b; 169 return b;
185} 170}
186EXPORT_SYMBOL_GPL(bio_alloc_mddev); 171EXPORT_SYMBOL_GPL(bio_alloc_mddev);
@@ -188,32 +173,10 @@ EXPORT_SYMBOL_GPL(bio_alloc_mddev);
188struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, 173struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
189 struct mddev *mddev) 174 struct mddev *mddev)
190{ 175{
191 struct bio *b;
192 struct mddev **mddevp;
193
194 if (!mddev || !mddev->bio_set) 176 if (!mddev || !mddev->bio_set)
195 return bio_clone(bio, gfp_mask); 177 return bio_clone(bio, gfp_mask);
196 178
197 b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, 179 return bio_clone_bioset(bio, gfp_mask, mddev->bio_set);
198 mddev->bio_set);
199 if (!b)
200 return NULL;
201 mddevp = (void*)b;
202 mddevp[-1] = mddev;
203 b->bi_destructor = mddev_bio_destructor;
204 __bio_clone(b, bio);
205 if (bio_integrity(bio)) {
206 int ret;
207
208 ret = bio_integrity_clone(b, bio, gfp_mask, mddev->bio_set);
209
210 if (ret < 0) {
211 bio_put(b);
212 return NULL;
213 }
214 }
215
216 return b;
217} 180}
218EXPORT_SYMBOL_GPL(bio_clone_mddev); 181EXPORT_SYMBOL_GPL(bio_clone_mddev);
219 182
@@ -711,7 +674,18 @@ static struct md_rdev * find_rdev_nr(struct mddev *mddev, int nr)
711 return NULL; 674 return NULL;
712} 675}
713 676
714static struct md_rdev * find_rdev(struct mddev * mddev, dev_t dev) 677static struct md_rdev *find_rdev_nr_rcu(struct mddev *mddev, int nr)
678{
679 struct md_rdev *rdev;
680
681 rdev_for_each_rcu(rdev, mddev)
682 if (rdev->desc_nr == nr)
683 return rdev;
684
685 return NULL;
686}
687
688static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
715{ 689{
716 struct md_rdev *rdev; 690 struct md_rdev *rdev;
717 691
@@ -722,6 +696,17 @@ static struct md_rdev * find_rdev(struct mddev * mddev, dev_t dev)
722 return NULL; 696 return NULL;
723} 697}
724 698
699static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
700{
701 struct md_rdev *rdev;
702
703 rdev_for_each_rcu(rdev, mddev)
704 if (rdev->bdev->bd_dev == dev)
705 return rdev;
706
707 return NULL;
708}
709
725static struct md_personality *find_pers(int level, char *clevel) 710static struct md_personality *find_pers(int level, char *clevel)
726{ 711{
727 struct md_personality *pers; 712 struct md_personality *pers;
@@ -2059,8 +2044,14 @@ EXPORT_SYMBOL(md_integrity_register);
2059/* Disable data integrity if non-capable/non-matching disk is being added */ 2044/* Disable data integrity if non-capable/non-matching disk is being added */
2060void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) 2045void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2061{ 2046{
2062 struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev); 2047 struct blk_integrity *bi_rdev;
2063 struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk); 2048 struct blk_integrity *bi_mddev;
2049
2050 if (!mddev->gendisk)
2051 return;
2052
2053 bi_rdev = bdev_get_integrity(rdev->bdev);
2054 bi_mddev = blk_get_integrity(mddev->gendisk);
2064 2055
2065 if (!bi_mddev) /* nothing to do */ 2056 if (!bi_mddev) /* nothing to do */
2066 return; 2057 return;
@@ -3791,6 +3782,8 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len)
3791 return -EINVAL; 3782 return -EINVAL;
3792 3783
3793 mddev->recovery_cp = n; 3784 mddev->recovery_cp = n;
3785 if (mddev->pers)
3786 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3794 return len; 3787 return len;
3795} 3788}
3796static struct md_sysfs_entry md_resync_start = 3789static struct md_sysfs_entry md_resync_start =
@@ -4268,6 +4261,13 @@ action_store(struct mddev *mddev, const char *page, size_t len)
4268 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 4261 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4269 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4262 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4270 } 4263 }
4264 if (mddev->ro == 2) {
4265 /* A write to sync_action is enough to justify
4266 * canceling read-auto mode
4267 */
4268 mddev->ro = 0;
4269 md_wakeup_thread(mddev->sync_thread);
4270 }
4271 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4271 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4272 md_wakeup_thread(mddev->thread); 4272 md_wakeup_thread(mddev->thread);
4273 sysfs_notify_dirent_safe(mddev->sysfs_action); 4273 sysfs_notify_dirent_safe(mddev->sysfs_action);
@@ -4278,7 +4278,8 @@ static ssize_t
4278mismatch_cnt_show(struct mddev *mddev, char *page) 4278mismatch_cnt_show(struct mddev *mddev, char *page)
4279{ 4279{
4280 return sprintf(page, "%llu\n", 4280 return sprintf(page, "%llu\n",
4281 (unsigned long long) mddev->resync_mismatches); 4281 (unsigned long long)
4282 atomic64_read(&mddev->resync_mismatches));
4282} 4283}
4283 4284
4284static struct md_sysfs_entry md_scan_mode = 4285static struct md_sysfs_entry md_scan_mode =
@@ -4399,6 +4400,10 @@ sync_completed_show(struct mddev *mddev, char *page)
4399 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4400 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4400 return sprintf(page, "none\n"); 4401 return sprintf(page, "none\n");
4401 4402
4403 if (mddev->curr_resync == 1 ||
4404 mddev->curr_resync == 2)
4405 return sprintf(page, "delayed\n");
4406
4402 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 4407 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
4403 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4408 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4404 max_sectors = mddev->resync_max_sectors; 4409 max_sectors = mddev->resync_max_sectors;
@@ -5006,8 +5011,7 @@ int md_run(struct mddev *mddev)
5006 } 5011 }
5007 5012
5008 if (mddev->bio_set == NULL) 5013 if (mddev->bio_set == NULL)
5009 mddev->bio_set = bioset_create(BIO_POOL_SIZE, 5014 mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
5010 sizeof(struct mddev *));
5011 5015
5012 spin_lock(&pers_lock); 5016 spin_lock(&pers_lock);
5013 pers = find_pers(mddev->level, mddev->clevel); 5017 pers = find_pers(mddev->level, mddev->clevel);
@@ -5245,7 +5249,7 @@ static void md_clean(struct mddev *mddev)
5245 mddev->new_layout = 0; 5249 mddev->new_layout = 0;
5246 mddev->new_chunk_sectors = 0; 5250 mddev->new_chunk_sectors = 0;
5247 mddev->curr_resync = 0; 5251 mddev->curr_resync = 0;
5248 mddev->resync_mismatches = 0; 5252 atomic64_set(&mddev->resync_mismatches, 0);
5249 mddev->suspend_lo = mddev->suspend_hi = 0; 5253 mddev->suspend_lo = mddev->suspend_hi = 0;
5250 mddev->sync_speed_min = mddev->sync_speed_max = 0; 5254 mddev->sync_speed_min = mddev->sync_speed_max = 0;
5251 mddev->recovery = 0; 5255 mddev->recovery = 0;
@@ -5547,8 +5551,9 @@ static int get_array_info(struct mddev * mddev, void __user * arg)
5547 int nr,working,insync,failed,spare; 5551 int nr,working,insync,failed,spare;
5548 struct md_rdev *rdev; 5552 struct md_rdev *rdev;
5549 5553
5550 nr=working=insync=failed=spare=0; 5554 nr = working = insync = failed = spare = 0;
5551 rdev_for_each(rdev, mddev) { 5555 rcu_read_lock();
5556 rdev_for_each_rcu(rdev, mddev) {
5552 nr++; 5557 nr++;
5553 if (test_bit(Faulty, &rdev->flags)) 5558 if (test_bit(Faulty, &rdev->flags))
5554 failed++; 5559 failed++;
@@ -5560,6 +5565,7 @@ static int get_array_info(struct mddev * mddev, void __user * arg)
5560 spare++; 5565 spare++;
5561 } 5566 }
5562 } 5567 }
5568 rcu_read_unlock();
5563 5569
5564 info.major_version = mddev->major_version; 5570 info.major_version = mddev->major_version;
5565 info.minor_version = mddev->minor_version; 5571 info.minor_version = mddev->minor_version;
@@ -5643,7 +5649,8 @@ static int get_disk_info(struct mddev * mddev, void __user * arg)
5643 if (copy_from_user(&info, arg, sizeof(info))) 5649 if (copy_from_user(&info, arg, sizeof(info)))
5644 return -EFAULT; 5650 return -EFAULT;
5645 5651
5646 rdev = find_rdev_nr(mddev, info.number); 5652 rcu_read_lock();
5653 rdev = find_rdev_nr_rcu(mddev, info.number);
5647 if (rdev) { 5654 if (rdev) {
5648 info.major = MAJOR(rdev->bdev->bd_dev); 5655 info.major = MAJOR(rdev->bdev->bd_dev);
5649 info.minor = MINOR(rdev->bdev->bd_dev); 5656 info.minor = MINOR(rdev->bdev->bd_dev);
@@ -5662,6 +5669,7 @@ static int get_disk_info(struct mddev * mddev, void __user * arg)
5662 info.raid_disk = -1; 5669 info.raid_disk = -1;
5663 info.state = (1<<MD_DISK_REMOVED); 5670 info.state = (1<<MD_DISK_REMOVED);
5664 } 5671 }
5672 rcu_read_unlock();
5665 5673
5666 if (copy_to_user(arg, &info, sizeof(info))) 5674 if (copy_to_user(arg, &info, sizeof(info)))
5667 return -EFAULT; 5675 return -EFAULT;
@@ -6270,18 +6278,22 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
6270static int set_disk_faulty(struct mddev *mddev, dev_t dev) 6278static int set_disk_faulty(struct mddev *mddev, dev_t dev)
6271{ 6279{
6272 struct md_rdev *rdev; 6280 struct md_rdev *rdev;
6281 int err = 0;
6273 6282
6274 if (mddev->pers == NULL) 6283 if (mddev->pers == NULL)
6275 return -ENODEV; 6284 return -ENODEV;
6276 6285
6277 rdev = find_rdev(mddev, dev); 6286 rcu_read_lock();
6287 rdev = find_rdev_rcu(mddev, dev);
6278 if (!rdev) 6288 if (!rdev)
6279 return -ENODEV; 6289 err = -ENODEV;
6280 6290 else {
6281 md_error(mddev, rdev); 6291 md_error(mddev, rdev);
6282 if (!test_bit(Faulty, &rdev->flags)) 6292 if (!test_bit(Faulty, &rdev->flags))
6283 return -EBUSY; 6293 err = -EBUSY;
6284 return 0; 6294 }
6295 rcu_read_unlock();
6296 return err;
6285} 6297}
6286 6298
6287/* 6299/*
@@ -6353,6 +6365,27 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
6353 goto abort; 6365 goto abort;
6354 } 6366 }
6355 6367
6368 /* Some actions do not requires the mutex */
6369 switch (cmd) {
6370 case GET_ARRAY_INFO:
6371 if (!mddev->raid_disks && !mddev->external)
6372 err = -ENODEV;
6373 else
6374 err = get_array_info(mddev, argp);
6375 goto abort;
6376
6377 case GET_DISK_INFO:
6378 if (!mddev->raid_disks && !mddev->external)
6379 err = -ENODEV;
6380 else
6381 err = get_disk_info(mddev, argp);
6382 goto abort;
6383
6384 case SET_DISK_FAULTY:
6385 err = set_disk_faulty(mddev, new_decode_dev(arg));
6386 goto abort;
6387 }
6388
6356 err = mddev_lock(mddev); 6389 err = mddev_lock(mddev);
6357 if (err) { 6390 if (err) {
6358 printk(KERN_INFO 6391 printk(KERN_INFO
@@ -6425,18 +6458,10 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
6425 */ 6458 */
6426 switch (cmd) 6459 switch (cmd)
6427 { 6460 {
6428 case GET_ARRAY_INFO:
6429 err = get_array_info(mddev, argp);
6430 goto done_unlock;
6431
6432 case GET_BITMAP_FILE: 6461 case GET_BITMAP_FILE:
6433 err = get_bitmap_file(mddev, argp); 6462 err = get_bitmap_file(mddev, argp);
6434 goto done_unlock; 6463 goto done_unlock;
6435 6464
6436 case GET_DISK_INFO:
6437 err = get_disk_info(mddev, argp);
6438 goto done_unlock;
6439
6440 case RESTART_ARRAY_RW: 6465 case RESTART_ARRAY_RW:
6441 err = restart_array(mddev); 6466 err = restart_array(mddev);
6442 goto done_unlock; 6467 goto done_unlock;
@@ -6518,10 +6543,6 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
6518 err = hot_add_disk(mddev, new_decode_dev(arg)); 6543 err = hot_add_disk(mddev, new_decode_dev(arg));
6519 goto done_unlock; 6544 goto done_unlock;
6520 6545
6521 case SET_DISK_FAULTY:
6522 err = set_disk_faulty(mddev, new_decode_dev(arg));
6523 goto done_unlock;
6524
6525 case RUN_ARRAY: 6546 case RUN_ARRAY:
6526 err = do_md_run(mddev); 6547 err = do_md_run(mddev);
6527 goto done_unlock; 6548 goto done_unlock;
@@ -6679,7 +6700,7 @@ static int md_thread(void * arg)
6679 6700
6680 clear_bit(THREAD_WAKEUP, &thread->flags); 6701 clear_bit(THREAD_WAKEUP, &thread->flags);
6681 if (!kthread_should_stop()) 6702 if (!kthread_should_stop())
6682 thread->run(thread->mddev); 6703 thread->run(thread);
6683 } 6704 }
6684 6705
6685 return 0; 6706 return 0;
@@ -6694,8 +6715,8 @@ void md_wakeup_thread(struct md_thread *thread)
6694 } 6715 }
6695} 6716}
6696 6717
6697struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev *mddev, 6718struct md_thread *md_register_thread(void (*run) (struct md_thread *),
6698 const char *name) 6719 struct mddev *mddev, const char *name)
6699{ 6720{
6700 struct md_thread *thread; 6721 struct md_thread *thread;
6701 6722
@@ -6790,7 +6811,11 @@ static void status_resync(struct seq_file *seq, struct mddev * mddev)
6790 int scale; 6811 int scale;
6791 unsigned int per_milli; 6812 unsigned int per_milli;
6792 6813
6793 resync = mddev->curr_resync - atomic_read(&mddev->recovery_active); 6814 if (mddev->curr_resync <= 3)
6815 resync = 0;
6816 else
6817 resync = mddev->curr_resync
6818 - atomic_read(&mddev->recovery_active);
6794 6819
6795 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 6820 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
6796 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 6821 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
@@ -7016,7 +7041,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
7016 if (mddev->curr_resync > 2) { 7041 if (mddev->curr_resync > 2) {
7017 status_resync(seq, mddev); 7042 status_resync(seq, mddev);
7018 seq_printf(seq, "\n "); 7043 seq_printf(seq, "\n ");
7019 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) 7044 } else if (mddev->curr_resync >= 1)
7020 seq_printf(seq, "\tresync=DELAYED\n "); 7045 seq_printf(seq, "\tresync=DELAYED\n ");
7021 else if (mddev->recovery_cp < MaxSector) 7046 else if (mddev->recovery_cp < MaxSector)
7022 seq_printf(seq, "\tresync=PENDING\n "); 7047 seq_printf(seq, "\tresync=PENDING\n ");
@@ -7244,8 +7269,9 @@ EXPORT_SYMBOL_GPL(md_allow_write);
7244 7269
7245#define SYNC_MARKS 10 7270#define SYNC_MARKS 10
7246#define SYNC_MARK_STEP (3*HZ) 7271#define SYNC_MARK_STEP (3*HZ)
7247void md_do_sync(struct mddev *mddev) 7272void md_do_sync(struct md_thread *thread)
7248{ 7273{
7274 struct mddev *mddev = thread->mddev;
7249 struct mddev *mddev2; 7275 struct mddev *mddev2;
7250 unsigned int currspeed = 0, 7276 unsigned int currspeed = 0,
7251 window; 7277 window;
@@ -7349,7 +7375,7 @@ void md_do_sync(struct mddev *mddev)
7349 * which defaults to physical size, but can be virtual size 7375 * which defaults to physical size, but can be virtual size
7350 */ 7376 */
7351 max_sectors = mddev->resync_max_sectors; 7377 max_sectors = mddev->resync_max_sectors;
7352 mddev->resync_mismatches = 0; 7378 atomic64_set(&mddev->resync_mismatches, 0);
7353 /* we don't use the checkpoint if there's a bitmap */ 7379 /* we don't use the checkpoint if there's a bitmap */
7354 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7380 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
7355 j = mddev->resync_min; 7381 j = mddev->resync_min;
@@ -7405,8 +7431,11 @@ void md_do_sync(struct mddev *mddev)
7405 "md: resuming %s of %s from checkpoint.\n", 7431 "md: resuming %s of %s from checkpoint.\n",
7406 desc, mdname(mddev)); 7432 desc, mdname(mddev));
7407 mddev->curr_resync = j; 7433 mddev->curr_resync = j;
7408 } 7434 } else
7435 mddev->curr_resync = 3; /* no longer delayed */
7409 mddev->curr_resync_completed = j; 7436 mddev->curr_resync_completed = j;
7437 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
7438 md_new_event(mddev);
7410 7439
7411 blk_start_plug(&plug); 7440 blk_start_plug(&plug);
7412 while (j < max_sectors) { 7441 while (j < max_sectors) {
@@ -7459,7 +7488,8 @@ void md_do_sync(struct mddev *mddev)
7459 break; 7488 break;
7460 7489
7461 j += sectors; 7490 j += sectors;
7462 if (j>1) mddev->curr_resync = j; 7491 if (j > 2)
7492 mddev->curr_resync = j;
7463 mddev->curr_mark_cnt = io_sectors; 7493 mddev->curr_mark_cnt = io_sectors;
7464 if (last_check == 0) 7494 if (last_check == 0)
7465 /* this is the earliest that rebuild will be 7495 /* this is the earliest that rebuild will be
@@ -7581,8 +7611,6 @@ static int remove_and_add_spares(struct mddev *mddev)
7581 int spares = 0; 7611 int spares = 0;
7582 int removed = 0; 7612 int removed = 0;
7583 7613
7584 mddev->curr_resync_completed = 0;
7585
7586 rdev_for_each(rdev, mddev) 7614 rdev_for_each(rdev, mddev)
7587 if (rdev->raid_disk >= 0 && 7615 if (rdev->raid_disk >= 0 &&
7588 !test_bit(Blocked, &rdev->flags) && 7616 !test_bit(Blocked, &rdev->flags) &&
@@ -7777,6 +7805,7 @@ void md_check_recovery(struct mddev *mddev)
7777 /* Set RUNNING before clearing NEEDED to avoid 7805 /* Set RUNNING before clearing NEEDED to avoid
7778 * any transients in the value of "sync_action". 7806 * any transients in the value of "sync_action".
7779 */ 7807 */
7808 mddev->curr_resync_completed = 0;
7780 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7809 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7781 /* Clear some bits that don't mean anything, but 7810 /* Clear some bits that don't mean anything, but
7782 * might be left set 7811 * might be left set
@@ -7790,7 +7819,7 @@ void md_check_recovery(struct mddev *mddev)
7790 /* no recovery is running. 7819 /* no recovery is running.
7791 * remove any failed drives, then 7820 * remove any failed drives, then
7792 * add spares if possible. 7821 * add spares if possible.
7793 * Spare are also removed and re-added, to allow 7822 * Spares are also removed and re-added, to allow
7794 * the personality to fail the re-add. 7823 * the personality to fail the re-add.
7795 */ 7824 */
7796 7825
diff --git a/drivers/md/md.h b/drivers/md/md.h
index f385b038589d..af443ab868db 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -282,7 +282,7 @@ struct mddev {
282 282
283 sector_t resync_max_sectors; /* may be set by personality */ 283 sector_t resync_max_sectors; /* may be set by personality */
284 284
285 sector_t resync_mismatches; /* count of sectors where 285 atomic64_t resync_mismatches; /* count of sectors where
286 * parity/replica mismatch found 286 * parity/replica mismatch found
287 */ 287 */
288 288
@@ -540,12 +540,13 @@ static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
540 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) 540 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
541 541
542struct md_thread { 542struct md_thread {
543 void (*run) (struct mddev *mddev); 543 void (*run) (struct md_thread *thread);
544 struct mddev *mddev; 544 struct mddev *mddev;
545 wait_queue_head_t wqueue; 545 wait_queue_head_t wqueue;
546 unsigned long flags; 546 unsigned long flags;
547 struct task_struct *tsk; 547 struct task_struct *tsk;
548 unsigned long timeout; 548 unsigned long timeout;
549 void *private;
549}; 550};
550 551
551#define THREAD_WAKEUP 0 552#define THREAD_WAKEUP 0
@@ -584,7 +585,7 @@ static inline void safe_put_page(struct page *p)
584extern int register_md_personality(struct md_personality *p); 585extern int register_md_personality(struct md_personality *p);
585extern int unregister_md_personality(struct md_personality *p); 586extern int unregister_md_personality(struct md_personality *p);
586extern struct md_thread *md_register_thread( 587extern struct md_thread *md_register_thread(
587 void (*run)(struct mddev *mddev), 588 void (*run)(struct md_thread *thread),
588 struct mddev *mddev, 589 struct mddev *mddev,
589 const char *name); 590 const char *name);
590extern void md_unregister_thread(struct md_thread **threadp); 591extern void md_unregister_thread(struct md_thread **threadp);
@@ -603,7 +604,7 @@ extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
603extern void md_super_wait(struct mddev *mddev); 604extern void md_super_wait(struct mddev *mddev);
604extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 605extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
605 struct page *page, int rw, bool metadata_op); 606 struct page *page, int rw, bool metadata_op);
606extern void md_do_sync(struct mddev *mddev); 607extern void md_do_sync(struct md_thread *thread);
607extern void md_new_event(struct mddev *mddev); 608extern void md_new_event(struct mddev *mddev);
608extern int md_allow_write(struct mddev *mddev); 609extern int md_allow_write(struct mddev *mddev);
609extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); 610extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 61a1833ebaf3..1642eae75a33 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -335,8 +335,9 @@ abort:
335 * 3. Performs writes following reads for array syncronising. 335 * 3. Performs writes following reads for array syncronising.
336 */ 336 */
337 337
338static void multipathd (struct mddev *mddev) 338static void multipathd(struct md_thread *thread)
339{ 339{
340 struct mddev *mddev = thread->mddev;
340 struct multipath_bh *mp_bh; 341 struct multipath_bh *mp_bh;
341 struct bio *bio; 342 struct bio *bio;
342 unsigned long flags; 343 unsigned long flags;
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index d77602d63c83..f3a9af8cdec3 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -434,14 +434,14 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
434 if (ref_count && !old) { 434 if (ref_count && !old) {
435 *ev = SM_ALLOC; 435 *ev = SM_ALLOC;
436 ll->nr_allocated++; 436 ll->nr_allocated++;
437 ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) - 1); 437 le32_add_cpu(&ie_disk.nr_free, -1);
438 if (le32_to_cpu(ie_disk.none_free_before) == bit) 438 if (le32_to_cpu(ie_disk.none_free_before) == bit)
439 ie_disk.none_free_before = cpu_to_le32(bit + 1); 439 ie_disk.none_free_before = cpu_to_le32(bit + 1);
440 440
441 } else if (old && !ref_count) { 441 } else if (old && !ref_count) {
442 *ev = SM_FREE; 442 *ev = SM_FREE;
443 ll->nr_allocated--; 443 ll->nr_allocated--;
444 ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) + 1); 444 le32_add_cpu(&ie_disk.nr_free, 1);
445 ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit)); 445 ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit));
446 } 446 }
447 447
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index de63a1fc3737..24b359717a7e 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -88,6 +88,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
88 char b[BDEVNAME_SIZE]; 88 char b[BDEVNAME_SIZE];
89 char b2[BDEVNAME_SIZE]; 89 char b2[BDEVNAME_SIZE];
90 struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL); 90 struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
91 bool discard_supported = false;
91 92
92 if (!conf) 93 if (!conf)
93 return -ENOMEM; 94 return -ENOMEM;
@@ -195,6 +196,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
195 if (!smallest || (rdev1->sectors < smallest->sectors)) 196 if (!smallest || (rdev1->sectors < smallest->sectors))
196 smallest = rdev1; 197 smallest = rdev1;
197 cnt++; 198 cnt++;
199
200 if (blk_queue_discard(bdev_get_queue(rdev1->bdev)))
201 discard_supported = true;
198 } 202 }
199 if (cnt != mddev->raid_disks) { 203 if (cnt != mddev->raid_disks) {
200 printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - " 204 printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
@@ -272,6 +276,11 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
272 blk_queue_io_opt(mddev->queue, 276 blk_queue_io_opt(mddev->queue,
273 (mddev->chunk_sectors << 9) * mddev->raid_disks); 277 (mddev->chunk_sectors << 9) * mddev->raid_disks);
274 278
279 if (!discard_supported)
280 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
281 else
282 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
283
275 pr_debug("md/raid0:%s: done.\n", mdname(mddev)); 284 pr_debug("md/raid0:%s: done.\n", mdname(mddev));
276 *private_conf = conf; 285 *private_conf = conf;
277 286
@@ -422,6 +431,8 @@ static int raid0_run(struct mddev *mddev)
422 if (md_check_no_bitmap(mddev)) 431 if (md_check_no_bitmap(mddev))
423 return -EINVAL; 432 return -EINVAL;
424 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); 433 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
434 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
435 blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
425 436
426 /* if private is not null, we are here after takeover */ 437 /* if private is not null, we are here after takeover */
427 if (mddev->private == NULL) { 438 if (mddev->private == NULL) {
@@ -509,7 +520,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
509 sector_t sector = bio->bi_sector; 520 sector_t sector = bio->bi_sector;
510 struct bio_pair *bp; 521 struct bio_pair *bp;
511 /* Sanity check -- queue functions should prevent this happening */ 522 /* Sanity check -- queue functions should prevent this happening */
512 if (bio->bi_vcnt != 1 || 523 if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
513 bio->bi_idx != 0) 524 bio->bi_idx != 0)
514 goto bad_map; 525 goto bad_map;
515 /* This is a one page bio that upper layers 526 /* This is a one page bio that upper layers
@@ -535,6 +546,13 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
535 bio->bi_sector = sector_offset + zone->dev_start + 546 bio->bi_sector = sector_offset + zone->dev_start +
536 tmp_dev->data_offset; 547 tmp_dev->data_offset;
537 548
549 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
550 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
551 /* Just ignore it */
552 bio_endio(bio, 0);
553 return;
554 }
555
538 generic_make_request(bio); 556 generic_make_request(bio);
539 return; 557 return;
540 558
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 611b5f797618..636bae0405e8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -333,9 +333,10 @@ static void raid1_end_read_request(struct bio *bio, int error)
333 spin_unlock_irqrestore(&conf->device_lock, flags); 333 spin_unlock_irqrestore(&conf->device_lock, flags);
334 } 334 }
335 335
336 if (uptodate) 336 if (uptodate) {
337 raid_end_bio_io(r1_bio); 337 raid_end_bio_io(r1_bio);
338 else { 338 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
339 } else {
339 /* 340 /*
340 * oops, read error: 341 * oops, read error:
341 */ 342 */
@@ -349,9 +350,8 @@ static void raid1_end_read_request(struct bio *bio, int error)
349 (unsigned long long)r1_bio->sector); 350 (unsigned long long)r1_bio->sector);
350 set_bit(R1BIO_ReadError, &r1_bio->state); 351 set_bit(R1BIO_ReadError, &r1_bio->state);
351 reschedule_retry(r1_bio); 352 reschedule_retry(r1_bio);
353 /* don't drop the reference on read_disk yet */
352 } 354 }
353
354 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
355} 355}
356 356
357static void close_write(struct r1bio *r1_bio) 357static void close_write(struct r1bio *r1_bio)
@@ -781,7 +781,12 @@ static void flush_pending_writes(struct r1conf *conf)
781 while (bio) { /* submit pending writes */ 781 while (bio) { /* submit pending writes */
782 struct bio *next = bio->bi_next; 782 struct bio *next = bio->bi_next;
783 bio->bi_next = NULL; 783 bio->bi_next = NULL;
784 generic_make_request(bio); 784 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
785 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
786 /* Just ignore it */
787 bio_endio(bio, 0);
788 else
789 generic_make_request(bio);
785 bio = next; 790 bio = next;
786 } 791 }
787 } else 792 } else
@@ -994,6 +999,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
994 const int rw = bio_data_dir(bio); 999 const int rw = bio_data_dir(bio);
995 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 1000 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
996 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); 1001 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
1002 const unsigned long do_discard = (bio->bi_rw
1003 & (REQ_DISCARD | REQ_SECURE));
997 struct md_rdev *blocked_rdev; 1004 struct md_rdev *blocked_rdev;
998 struct blk_plug_cb *cb; 1005 struct blk_plug_cb *cb;
999 struct raid1_plug_cb *plug = NULL; 1006 struct raid1_plug_cb *plug = NULL;
@@ -1295,7 +1302,7 @@ read_again:
1295 conf->mirrors[i].rdev->data_offset); 1302 conf->mirrors[i].rdev->data_offset);
1296 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1303 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1297 mbio->bi_end_io = raid1_end_write_request; 1304 mbio->bi_end_io = raid1_end_write_request;
1298 mbio->bi_rw = WRITE | do_flush_fua | do_sync; 1305 mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard;
1299 mbio->bi_private = r1_bio; 1306 mbio->bi_private = r1_bio;
1300 1307
1301 atomic_inc(&r1_bio->remaining); 1308 atomic_inc(&r1_bio->remaining);
@@ -1549,6 +1556,8 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1549 clear_bit(Unmerged, &rdev->flags); 1556 clear_bit(Unmerged, &rdev->flags);
1550 } 1557 }
1551 md_integrity_add_rdev(rdev, mddev); 1558 md_integrity_add_rdev(rdev, mddev);
1559 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
1560 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
1552 print_conf(conf); 1561 print_conf(conf);
1553 return err; 1562 return err;
1554} 1563}
@@ -1867,7 +1876,7 @@ static int process_checks(struct r1bio *r1_bio)
1867 } else 1876 } else
1868 j = 0; 1877 j = 0;
1869 if (j >= 0) 1878 if (j >= 0)
1870 mddev->resync_mismatches += r1_bio->sectors; 1879 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
1871 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) 1880 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
1872 && test_bit(BIO_UPTODATE, &sbio->bi_flags))) { 1881 && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
1873 /* No need to write to this device. */ 1882 /* No need to write to this device. */
@@ -2220,6 +2229,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2220 unfreeze_array(conf); 2229 unfreeze_array(conf);
2221 } else 2230 } else
2222 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); 2231 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
2232 rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
2223 2233
2224 bio = r1_bio->bios[r1_bio->read_disk]; 2234 bio = r1_bio->bios[r1_bio->read_disk];
2225 bdevname(bio->bi_bdev, b); 2235 bdevname(bio->bi_bdev, b);
@@ -2285,8 +2295,9 @@ read_more:
2285 } 2295 }
2286} 2296}
2287 2297
2288static void raid1d(struct mddev *mddev) 2298static void raid1d(struct md_thread *thread)
2289{ 2299{
2300 struct mddev *mddev = thread->mddev;
2290 struct r1bio *r1_bio; 2301 struct r1bio *r1_bio;
2291 unsigned long flags; 2302 unsigned long flags;
2292 struct r1conf *conf = mddev->private; 2303 struct r1conf *conf = mddev->private;
@@ -2699,7 +2710,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
2699 || disk_idx < 0) 2710 || disk_idx < 0)
2700 continue; 2711 continue;
2701 if (test_bit(Replacement, &rdev->flags)) 2712 if (test_bit(Replacement, &rdev->flags))
2702 disk = conf->mirrors + conf->raid_disks + disk_idx; 2713 disk = conf->mirrors + mddev->raid_disks + disk_idx;
2703 else 2714 else
2704 disk = conf->mirrors + disk_idx; 2715 disk = conf->mirrors + disk_idx;
2705 2716
@@ -2783,6 +2794,7 @@ static int run(struct mddev *mddev)
2783 int i; 2794 int i;
2784 struct md_rdev *rdev; 2795 struct md_rdev *rdev;
2785 int ret; 2796 int ret;
2797 bool discard_supported = false;
2786 2798
2787 if (mddev->level != 1) { 2799 if (mddev->level != 1) {
2788 printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n", 2800 printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
@@ -2812,6 +2824,8 @@ static int run(struct mddev *mddev)
2812 continue; 2824 continue;
2813 disk_stack_limits(mddev->gendisk, rdev->bdev, 2825 disk_stack_limits(mddev->gendisk, rdev->bdev,
2814 rdev->data_offset << 9); 2826 rdev->data_offset << 9);
2827 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
2828 discard_supported = true;
2815 } 2829 }
2816 2830
2817 mddev->degraded = 0; 2831 mddev->degraded = 0;
@@ -2846,6 +2860,13 @@ static int run(struct mddev *mddev)
2846 mddev->queue->backing_dev_info.congested_fn = raid1_congested; 2860 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2847 mddev->queue->backing_dev_info.congested_data = mddev; 2861 mddev->queue->backing_dev_info.congested_data = mddev;
2848 blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec); 2862 blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
2863
2864 if (discard_supported)
2865 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
2866 mddev->queue);
2867 else
2868 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
2869 mddev->queue);
2849 } 2870 }
2850 2871
2851 ret = md_integrity_register(mddev); 2872 ret = md_integrity_register(mddev);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 0138a727c1f3..d1295aff4173 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -911,7 +911,12 @@ static void flush_pending_writes(struct r10conf *conf)
911 while (bio) { /* submit pending writes */ 911 while (bio) { /* submit pending writes */
912 struct bio *next = bio->bi_next; 912 struct bio *next = bio->bi_next;
913 bio->bi_next = NULL; 913 bio->bi_next = NULL;
914 generic_make_request(bio); 914 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
915 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
916 /* Just ignore it */
917 bio_endio(bio, 0);
918 else
919 generic_make_request(bio);
915 bio = next; 920 bio = next;
916 } 921 }
917 } else 922 } else
@@ -1050,6 +1055,44 @@ static sector_t choose_data_offset(struct r10bio *r10_bio,
1050 return rdev->new_data_offset; 1055 return rdev->new_data_offset;
1051} 1056}
1052 1057
1058struct raid10_plug_cb {
1059 struct blk_plug_cb cb;
1060 struct bio_list pending;
1061 int pending_cnt;
1062};
1063
1064static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1065{
1066 struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
1067 cb);
1068 struct mddev *mddev = plug->cb.data;
1069 struct r10conf *conf = mddev->private;
1070 struct bio *bio;
1071
1072 if (from_schedule) {
1073 spin_lock_irq(&conf->device_lock);
1074 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1075 conf->pending_count += plug->pending_cnt;
1076 spin_unlock_irq(&conf->device_lock);
1077 md_wakeup_thread(mddev->thread);
1078 kfree(plug);
1079 return;
1080 }
1081
1082 /* we aren't scheduling, so we can do the write-out directly. */
1083 bio = bio_list_get(&plug->pending);
1084 bitmap_unplug(mddev->bitmap);
1085 wake_up(&conf->wait_barrier);
1086
1087 while (bio) { /* submit pending writes */
1088 struct bio *next = bio->bi_next;
1089 bio->bi_next = NULL;
1090 generic_make_request(bio);
1091 bio = next;
1092 }
1093 kfree(plug);
1094}
1095
1053static void make_request(struct mddev *mddev, struct bio * bio) 1096static void make_request(struct mddev *mddev, struct bio * bio)
1054{ 1097{
1055 struct r10conf *conf = mddev->private; 1098 struct r10conf *conf = mddev->private;
@@ -1061,8 +1104,12 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1061 const int rw = bio_data_dir(bio); 1104 const int rw = bio_data_dir(bio);
1062 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 1105 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
1063 const unsigned long do_fua = (bio->bi_rw & REQ_FUA); 1106 const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
1107 const unsigned long do_discard = (bio->bi_rw
1108 & (REQ_DISCARD | REQ_SECURE));
1064 unsigned long flags; 1109 unsigned long flags;
1065 struct md_rdev *blocked_rdev; 1110 struct md_rdev *blocked_rdev;
1111 struct blk_plug_cb *cb;
1112 struct raid10_plug_cb *plug = NULL;
1066 int sectors_handled; 1113 int sectors_handled;
1067 int max_sectors; 1114 int max_sectors;
1068 int sectors; 1115 int sectors;
@@ -1081,7 +1128,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1081 || conf->prev.near_copies < conf->prev.raid_disks))) { 1128 || conf->prev.near_copies < conf->prev.raid_disks))) {
1082 struct bio_pair *bp; 1129 struct bio_pair *bp;
1083 /* Sanity check -- queue functions should prevent this happening */ 1130 /* Sanity check -- queue functions should prevent this happening */
1084 if (bio->bi_vcnt != 1 || 1131 if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
1085 bio->bi_idx != 0) 1132 bio->bi_idx != 0)
1086 goto bad_map; 1133 goto bad_map;
1087 /* This is a one page bio that upper layers 1134 /* This is a one page bio that upper layers
@@ -1410,15 +1457,26 @@ retry_write:
1410 conf->mirrors[d].rdev)); 1457 conf->mirrors[d].rdev));
1411 mbio->bi_bdev = conf->mirrors[d].rdev->bdev; 1458 mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
1412 mbio->bi_end_io = raid10_end_write_request; 1459 mbio->bi_end_io = raid10_end_write_request;
1413 mbio->bi_rw = WRITE | do_sync | do_fua; 1460 mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
1414 mbio->bi_private = r10_bio; 1461 mbio->bi_private = r10_bio;
1415 1462
1416 atomic_inc(&r10_bio->remaining); 1463 atomic_inc(&r10_bio->remaining);
1464
1465 cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
1466 if (cb)
1467 plug = container_of(cb, struct raid10_plug_cb, cb);
1468 else
1469 plug = NULL;
1417 spin_lock_irqsave(&conf->device_lock, flags); 1470 spin_lock_irqsave(&conf->device_lock, flags);
1418 bio_list_add(&conf->pending_bio_list, mbio); 1471 if (plug) {
1419 conf->pending_count++; 1472 bio_list_add(&plug->pending, mbio);
1473 plug->pending_cnt++;
1474 } else {
1475 bio_list_add(&conf->pending_bio_list, mbio);
1476 conf->pending_count++;
1477 }
1420 spin_unlock_irqrestore(&conf->device_lock, flags); 1478 spin_unlock_irqrestore(&conf->device_lock, flags);
1421 if (!mddev_check_plugged(mddev)) 1479 if (!plug)
1422 md_wakeup_thread(mddev->thread); 1480 md_wakeup_thread(mddev->thread);
1423 1481
1424 if (!r10_bio->devs[i].repl_bio) 1482 if (!r10_bio->devs[i].repl_bio)
@@ -1439,7 +1497,7 @@ retry_write:
1439 conf->mirrors[d].replacement)); 1497 conf->mirrors[d].replacement));
1440 mbio->bi_bdev = conf->mirrors[d].replacement->bdev; 1498 mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
1441 mbio->bi_end_io = raid10_end_write_request; 1499 mbio->bi_end_io = raid10_end_write_request;
1442 mbio->bi_rw = WRITE | do_sync | do_fua; 1500 mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
1443 mbio->bi_private = r10_bio; 1501 mbio->bi_private = r10_bio;
1444 1502
1445 atomic_inc(&r10_bio->remaining); 1503 atomic_inc(&r10_bio->remaining);
@@ -1638,7 +1696,7 @@ static int raid10_spare_active(struct mddev *mddev)
1638 && !test_bit(Faulty, &tmp->rdev->flags) 1696 && !test_bit(Faulty, &tmp->rdev->flags)
1639 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 1697 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1640 count++; 1698 count++;
1641 sysfs_notify_dirent(tmp->rdev->sysfs_state); 1699 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
1642 } 1700 }
1643 } 1701 }
1644 spin_lock_irqsave(&conf->device_lock, flags); 1702 spin_lock_irqsave(&conf->device_lock, flags);
@@ -1725,6 +1783,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1725 clear_bit(Unmerged, &rdev->flags); 1783 clear_bit(Unmerged, &rdev->flags);
1726 } 1784 }
1727 md_integrity_add_rdev(rdev, mddev); 1785 md_integrity_add_rdev(rdev, mddev);
1786 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1787 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
1788
1728 print_conf(conf); 1789 print_conf(conf);
1729 return err; 1790 return err;
1730} 1791}
@@ -1952,7 +2013,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1952 break; 2013 break;
1953 if (j == vcnt) 2014 if (j == vcnt)
1954 continue; 2015 continue;
1955 mddev->resync_mismatches += r10_bio->sectors; 2016 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
1956 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 2017 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1957 /* Don't fix anything. */ 2018 /* Don't fix anything. */
1958 continue; 2019 continue;
@@ -2673,8 +2734,9 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2673 } 2734 }
2674} 2735}
2675 2736
2676static void raid10d(struct mddev *mddev) 2737static void raid10d(struct md_thread *thread)
2677{ 2738{
2739 struct mddev *mddev = thread->mddev;
2678 struct r10bio *r10_bio; 2740 struct r10bio *r10_bio;
2679 unsigned long flags; 2741 unsigned long flags;
2680 struct r10conf *conf = mddev->private; 2742 struct r10conf *conf = mddev->private;
@@ -3158,7 +3220,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3158 else { 3220 else {
3159 bad_sectors -= (sector - first_bad); 3221 bad_sectors -= (sector - first_bad);
3160 if (max_sync > bad_sectors) 3222 if (max_sync > bad_sectors)
3161 max_sync = max_sync; 3223 max_sync = bad_sectors;
3162 continue; 3224 continue;
3163 } 3225 }
3164 } 3226 }
@@ -3482,6 +3544,7 @@ static int run(struct mddev *mddev)
3482 sector_t size; 3544 sector_t size;
3483 sector_t min_offset_diff = 0; 3545 sector_t min_offset_diff = 0;
3484 int first = 1; 3546 int first = 1;
3547 bool discard_supported = false;
3485 3548
3486 if (mddev->private == NULL) { 3549 if (mddev->private == NULL) {
3487 conf = setup_conf(mddev); 3550 conf = setup_conf(mddev);
@@ -3498,6 +3561,8 @@ static int run(struct mddev *mddev)
3498 3561
3499 chunk_size = mddev->chunk_sectors << 9; 3562 chunk_size = mddev->chunk_sectors << 9;
3500 if (mddev->queue) { 3563 if (mddev->queue) {
3564 blk_queue_max_discard_sectors(mddev->queue,
3565 mddev->chunk_sectors);
3501 blk_queue_io_min(mddev->queue, chunk_size); 3566 blk_queue_io_min(mddev->queue, chunk_size);
3502 if (conf->geo.raid_disks % conf->geo.near_copies) 3567 if (conf->geo.raid_disks % conf->geo.near_copies)
3503 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); 3568 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
@@ -3543,8 +3608,19 @@ static int run(struct mddev *mddev)
3543 rdev->data_offset << 9); 3608 rdev->data_offset << 9);
3544 3609
3545 disk->head_position = 0; 3610 disk->head_position = 0;
3611
3612 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3613 discard_supported = true;
3546 } 3614 }
3547 3615
3616 if (mddev->queue) {
3617 if (discard_supported)
3618 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
3619 mddev->queue);
3620 else
3621 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
3622 mddev->queue);
3623 }
3548 /* need to check that every block has at least one working mirror */ 3624 /* need to check that every block has at least one working mirror */
3549 if (!enough(conf, -1)) { 3625 if (!enough(conf, -1)) {
3550 printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n", 3626 printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 0689173fd9f5..c5439dce0295 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -551,6 +551,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
551 rw = WRITE_FUA; 551 rw = WRITE_FUA;
552 else 552 else
553 rw = WRITE; 553 rw = WRITE;
554 if (test_bit(R5_Discard, &sh->dev[i].flags))
555 rw |= REQ_DISCARD;
554 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 556 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
555 rw = READ; 557 rw = READ;
556 else if (test_and_clear_bit(R5_WantReplace, 558 else if (test_and_clear_bit(R5_WantReplace,
@@ -1174,8 +1176,11 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1174 set_bit(R5_WantFUA, &dev->flags); 1176 set_bit(R5_WantFUA, &dev->flags);
1175 if (wbi->bi_rw & REQ_SYNC) 1177 if (wbi->bi_rw & REQ_SYNC)
1176 set_bit(R5_SyncIO, &dev->flags); 1178 set_bit(R5_SyncIO, &dev->flags);
1177 tx = async_copy_data(1, wbi, dev->page, 1179 if (wbi->bi_rw & REQ_DISCARD)
1178 dev->sector, tx); 1180 set_bit(R5_Discard, &dev->flags);
1181 else
1182 tx = async_copy_data(1, wbi, dev->page,
1183 dev->sector, tx);
1179 wbi = r5_next_bio(wbi, dev->sector); 1184 wbi = r5_next_bio(wbi, dev->sector);
1180 } 1185 }
1181 } 1186 }
@@ -1191,7 +1196,7 @@ static void ops_complete_reconstruct(void *stripe_head_ref)
1191 int pd_idx = sh->pd_idx; 1196 int pd_idx = sh->pd_idx;
1192 int qd_idx = sh->qd_idx; 1197 int qd_idx = sh->qd_idx;
1193 int i; 1198 int i;
1194 bool fua = false, sync = false; 1199 bool fua = false, sync = false, discard = false;
1195 1200
1196 pr_debug("%s: stripe %llu\n", __func__, 1201 pr_debug("%s: stripe %llu\n", __func__,
1197 (unsigned long long)sh->sector); 1202 (unsigned long long)sh->sector);
@@ -1199,13 +1204,15 @@ static void ops_complete_reconstruct(void *stripe_head_ref)
1199 for (i = disks; i--; ) { 1204 for (i = disks; i--; ) {
1200 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); 1205 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
1201 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); 1206 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags);
1207 discard |= test_bit(R5_Discard, &sh->dev[i].flags);
1202 } 1208 }
1203 1209
1204 for (i = disks; i--; ) { 1210 for (i = disks; i--; ) {
1205 struct r5dev *dev = &sh->dev[i]; 1211 struct r5dev *dev = &sh->dev[i];
1206 1212
1207 if (dev->written || i == pd_idx || i == qd_idx) { 1213 if (dev->written || i == pd_idx || i == qd_idx) {
1208 set_bit(R5_UPTODATE, &dev->flags); 1214 if (!discard)
1215 set_bit(R5_UPTODATE, &dev->flags);
1209 if (fua) 1216 if (fua)
1210 set_bit(R5_WantFUA, &dev->flags); 1217 set_bit(R5_WantFUA, &dev->flags);
1211 if (sync) 1218 if (sync)
@@ -1241,6 +1248,18 @@ ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
1241 pr_debug("%s: stripe %llu\n", __func__, 1248 pr_debug("%s: stripe %llu\n", __func__,
1242 (unsigned long long)sh->sector); 1249 (unsigned long long)sh->sector);
1243 1250
1251 for (i = 0; i < sh->disks; i++) {
1252 if (pd_idx == i)
1253 continue;
1254 if (!test_bit(R5_Discard, &sh->dev[i].flags))
1255 break;
1256 }
1257 if (i >= sh->disks) {
1258 atomic_inc(&sh->count);
1259 set_bit(R5_Discard, &sh->dev[pd_idx].flags);
1260 ops_complete_reconstruct(sh);
1261 return;
1262 }
1244 /* check if prexor is active which means only process blocks 1263 /* check if prexor is active which means only process blocks
1245 * that are part of a read-modify-write (written) 1264 * that are part of a read-modify-write (written)
1246 */ 1265 */
@@ -1285,10 +1304,24 @@ ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
1285{ 1304{
1286 struct async_submit_ctl submit; 1305 struct async_submit_ctl submit;
1287 struct page **blocks = percpu->scribble; 1306 struct page **blocks = percpu->scribble;
1288 int count; 1307 int count, i;
1289 1308
1290 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); 1309 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
1291 1310
1311 for (i = 0; i < sh->disks; i++) {
1312 if (sh->pd_idx == i || sh->qd_idx == i)
1313 continue;
1314 if (!test_bit(R5_Discard, &sh->dev[i].flags))
1315 break;
1316 }
1317 if (i >= sh->disks) {
1318 atomic_inc(&sh->count);
1319 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
1320 set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
1321 ops_complete_reconstruct(sh);
1322 return;
1323 }
1324
1292 count = set_syndrome_sources(blocks, sh); 1325 count = set_syndrome_sources(blocks, sh);
1293 1326
1294 atomic_inc(&sh->count); 1327 atomic_inc(&sh->count);
@@ -2408,11 +2441,11 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2408 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 2441 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
2409 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 2442 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
2410 } 2443 }
2411 spin_unlock_irq(&sh->stripe_lock);
2412 2444
2413 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 2445 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2414 (unsigned long long)(*bip)->bi_sector, 2446 (unsigned long long)(*bip)->bi_sector,
2415 (unsigned long long)sh->sector, dd_idx); 2447 (unsigned long long)sh->sector, dd_idx);
2448 spin_unlock_irq(&sh->stripe_lock);
2416 2449
2417 if (conf->mddev->bitmap && firstwrite) { 2450 if (conf->mddev->bitmap && firstwrite) {
2418 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 2451 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
@@ -2479,10 +2512,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2479 bi = sh->dev[i].towrite; 2512 bi = sh->dev[i].towrite;
2480 sh->dev[i].towrite = NULL; 2513 sh->dev[i].towrite = NULL;
2481 spin_unlock_irq(&sh->stripe_lock); 2514 spin_unlock_irq(&sh->stripe_lock);
2482 if (bi) { 2515 if (bi)
2483 s->to_write--;
2484 bitmap_end = 1; 2516 bitmap_end = 1;
2485 }
2486 2517
2487 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2518 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2488 wake_up(&conf->wait_for_overlap); 2519 wake_up(&conf->wait_for_overlap);
@@ -2524,11 +2555,12 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2524 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && 2555 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
2525 (!test_bit(R5_Insync, &sh->dev[i].flags) || 2556 (!test_bit(R5_Insync, &sh->dev[i].flags) ||
2526 test_bit(R5_ReadError, &sh->dev[i].flags))) { 2557 test_bit(R5_ReadError, &sh->dev[i].flags))) {
2558 spin_lock_irq(&sh->stripe_lock);
2527 bi = sh->dev[i].toread; 2559 bi = sh->dev[i].toread;
2528 sh->dev[i].toread = NULL; 2560 sh->dev[i].toread = NULL;
2561 spin_unlock_irq(&sh->stripe_lock);
2529 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2562 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2530 wake_up(&conf->wait_for_overlap); 2563 wake_up(&conf->wait_for_overlap);
2531 if (bi) s->to_read--;
2532 while (bi && bi->bi_sector < 2564 while (bi && bi->bi_sector <
2533 sh->dev[i].sector + STRIPE_SECTORS) { 2565 sh->dev[i].sector + STRIPE_SECTORS) {
2534 struct bio *nextbi = 2566 struct bio *nextbi =
@@ -2741,7 +2773,8 @@ static void handle_stripe_clean_event(struct r5conf *conf,
2741 if (sh->dev[i].written) { 2773 if (sh->dev[i].written) {
2742 dev = &sh->dev[i]; 2774 dev = &sh->dev[i];
2743 if (!test_bit(R5_LOCKED, &dev->flags) && 2775 if (!test_bit(R5_LOCKED, &dev->flags) &&
2744 test_bit(R5_UPTODATE, &dev->flags)) { 2776 (test_bit(R5_UPTODATE, &dev->flags) ||
2777 test_and_clear_bit(R5_Discard, &dev->flags))) {
2745 /* We can return any write requests */ 2778 /* We can return any write requests */
2746 struct bio *wbi, *wbi2; 2779 struct bio *wbi, *wbi2;
2747 pr_debug("Return write for disc %d\n", i); 2780 pr_debug("Return write for disc %d\n", i);
@@ -2775,12 +2808,25 @@ static void handle_stripe_dirtying(struct r5conf *conf,
2775 int disks) 2808 int disks)
2776{ 2809{
2777 int rmw = 0, rcw = 0, i; 2810 int rmw = 0, rcw = 0, i;
2778 if (conf->max_degraded == 2) { 2811 sector_t recovery_cp = conf->mddev->recovery_cp;
2779 /* RAID6 requires 'rcw' in current implementation 2812
2780 * Calculate the real rcw later - for now fake it 2813 /* RAID6 requires 'rcw' in current implementation.
2814 * Otherwise, check whether resync is now happening or should start.
2815 * If yes, then the array is dirty (after unclean shutdown or
2816 * initial creation), so parity in some stripes might be inconsistent.
2817 * In this case, we need to always do reconstruct-write, to ensure
2818 * that in case of drive failure or read-error correction, we
2819 * generate correct data from the parity.
2820 */
2821 if (conf->max_degraded == 2 ||
2822 (recovery_cp < MaxSector && sh->sector >= recovery_cp)) {
2823 /* Calculate the real rcw later - for now make it
2781 * look like rcw is cheaper 2824 * look like rcw is cheaper
2782 */ 2825 */
2783 rcw = 1; rmw = 2; 2826 rcw = 1; rmw = 2;
2827 pr_debug("force RCW max_degraded=%u, recovery_cp=%llu sh->sector=%llu\n",
2828 conf->max_degraded, (unsigned long long)recovery_cp,
2829 (unsigned long long)sh->sector);
2784 } else for (i = disks; i--; ) { 2830 } else for (i = disks; i--; ) {
2785 /* would I have to read this buffer for read_modify_write */ 2831 /* would I have to read this buffer for read_modify_write */
2786 struct r5dev *dev = &sh->dev[i]; 2832 struct r5dev *dev = &sh->dev[i];
@@ -2932,7 +2978,7 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
2932 */ 2978 */
2933 set_bit(STRIPE_INSYNC, &sh->state); 2979 set_bit(STRIPE_INSYNC, &sh->state);
2934 else { 2980 else {
2935 conf->mddev->resync_mismatches += STRIPE_SECTORS; 2981 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
2936 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2982 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2937 /* don't try to repair!! */ 2983 /* don't try to repair!! */
2938 set_bit(STRIPE_INSYNC, &sh->state); 2984 set_bit(STRIPE_INSYNC, &sh->state);
@@ -3084,7 +3130,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
3084 */ 3130 */
3085 } 3131 }
3086 } else { 3132 } else {
3087 conf->mddev->resync_mismatches += STRIPE_SECTORS; 3133 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
3088 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 3134 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
3089 /* don't try to repair!! */ 3135 /* don't try to repair!! */
3090 set_bit(STRIPE_INSYNC, &sh->state); 3136 set_bit(STRIPE_INSYNC, &sh->state);
@@ -3459,10 +3505,12 @@ static void handle_stripe(struct stripe_head *sh)
3459 if (s.written && 3505 if (s.written &&
3460 (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) 3506 (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
3461 && !test_bit(R5_LOCKED, &pdev->flags) 3507 && !test_bit(R5_LOCKED, &pdev->flags)
3462 && test_bit(R5_UPTODATE, &pdev->flags)))) && 3508 && (test_bit(R5_UPTODATE, &pdev->flags) ||
3509 test_bit(R5_Discard, &pdev->flags))))) &&
3463 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 3510 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
3464 && !test_bit(R5_LOCKED, &qdev->flags) 3511 && !test_bit(R5_LOCKED, &qdev->flags)
3465 && test_bit(R5_UPTODATE, &qdev->flags))))) 3512 && (test_bit(R5_UPTODATE, &qdev->flags) ||
3513 test_bit(R5_Discard, &qdev->flags))))))
3466 handle_stripe_clean_event(conf, sh, disks, &s.return_bi); 3514 handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
3467 3515
3468 /* Now we might consider reading some blocks, either to check/generate 3516 /* Now we might consider reading some blocks, either to check/generate
@@ -3489,9 +3537,11 @@ static void handle_stripe(struct stripe_head *sh)
3489 /* All the 'written' buffers and the parity block are ready to 3537 /* All the 'written' buffers and the parity block are ready to
3490 * be written back to disk 3538 * be written back to disk
3491 */ 3539 */
3492 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); 3540 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) &&
3541 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags));
3493 BUG_ON(sh->qd_idx >= 0 && 3542 BUG_ON(sh->qd_idx >= 0 &&
3494 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags)); 3543 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) &&
3544 !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags));
3495 for (i = disks; i--; ) { 3545 for (i = disks; i--; ) {
3496 struct r5dev *dev = &sh->dev[i]; 3546 struct r5dev *dev = &sh->dev[i];
3497 if (test_bit(R5_LOCKED, &dev->flags) && 3547 if (test_bit(R5_LOCKED, &dev->flags) &&
@@ -4072,6 +4122,88 @@ static void release_stripe_plug(struct mddev *mddev,
4072 release_stripe(sh); 4122 release_stripe(sh);
4073} 4123}
4074 4124
4125static void make_discard_request(struct mddev *mddev, struct bio *bi)
4126{
4127 struct r5conf *conf = mddev->private;
4128 sector_t logical_sector, last_sector;
4129 struct stripe_head *sh;
4130 int remaining;
4131 int stripe_sectors;
4132
4133 if (mddev->reshape_position != MaxSector)
4134 /* Skip discard while reshape is happening */
4135 return;
4136
4137 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4138 last_sector = bi->bi_sector + (bi->bi_size>>9);
4139
4140 bi->bi_next = NULL;
4141 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
4142
4143 stripe_sectors = conf->chunk_sectors *
4144 (conf->raid_disks - conf->max_degraded);
4145 logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector,
4146 stripe_sectors);
4147 sector_div(last_sector, stripe_sectors);
4148
4149 logical_sector *= conf->chunk_sectors;
4150 last_sector *= conf->chunk_sectors;
4151
4152 for (; logical_sector < last_sector;
4153 logical_sector += STRIPE_SECTORS) {
4154 DEFINE_WAIT(w);
4155 int d;
4156 again:
4157 sh = get_active_stripe(conf, logical_sector, 0, 0, 0);
4158 prepare_to_wait(&conf->wait_for_overlap, &w,
4159 TASK_UNINTERRUPTIBLE);
4160 spin_lock_irq(&sh->stripe_lock);
4161 for (d = 0; d < conf->raid_disks; d++) {
4162 if (d == sh->pd_idx || d == sh->qd_idx)
4163 continue;
4164 if (sh->dev[d].towrite || sh->dev[d].toread) {
4165 set_bit(R5_Overlap, &sh->dev[d].flags);
4166 spin_unlock_irq(&sh->stripe_lock);
4167 release_stripe(sh);
4168 schedule();
4169 goto again;
4170 }
4171 }
4172 finish_wait(&conf->wait_for_overlap, &w);
4173 for (d = 0; d < conf->raid_disks; d++) {
4174 if (d == sh->pd_idx || d == sh->qd_idx)
4175 continue;
4176 sh->dev[d].towrite = bi;
4177 set_bit(R5_OVERWRITE, &sh->dev[d].flags);
4178 raid5_inc_bi_active_stripes(bi);
4179 }
4180 spin_unlock_irq(&sh->stripe_lock);
4181 if (conf->mddev->bitmap) {
4182 for (d = 0;
4183 d < conf->raid_disks - conf->max_degraded;
4184 d++)
4185 bitmap_startwrite(mddev->bitmap,
4186 sh->sector,
4187 STRIPE_SECTORS,
4188 0);
4189 sh->bm_seq = conf->seq_flush + 1;
4190 set_bit(STRIPE_BIT_DELAY, &sh->state);
4191 }
4192
4193 set_bit(STRIPE_HANDLE, &sh->state);
4194 clear_bit(STRIPE_DELAYED, &sh->state);
4195 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4196 atomic_inc(&conf->preread_active_stripes);
4197 release_stripe_plug(mddev, sh);
4198 }
4199
4200 remaining = raid5_dec_bi_active_stripes(bi);
4201 if (remaining == 0) {
4202 md_write_end(mddev);
4203 bio_endio(bi, 0);
4204 }
4205}
4206
4075static void make_request(struct mddev *mddev, struct bio * bi) 4207static void make_request(struct mddev *mddev, struct bio * bi)
4076{ 4208{
4077 struct r5conf *conf = mddev->private; 4209 struct r5conf *conf = mddev->private;
@@ -4094,6 +4226,11 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4094 chunk_aligned_read(mddev,bi)) 4226 chunk_aligned_read(mddev,bi))
4095 return; 4227 return;
4096 4228
4229 if (unlikely(bi->bi_rw & REQ_DISCARD)) {
4230 make_discard_request(mddev, bi);
4231 return;
4232 }
4233
4097 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4234 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4098 last_sector = bi->bi_sector + (bi->bi_size>>9); 4235 last_sector = bi->bi_sector + (bi->bi_size>>9);
4099 bi->bi_next = NULL; 4236 bi->bi_next = NULL;
@@ -4630,8 +4767,9 @@ static int handle_active_stripes(struct r5conf *conf)
4630 * During the scan, completed stripes are saved for us by the interrupt 4767 * During the scan, completed stripes are saved for us by the interrupt
4631 * handler, so that they will not have to wait for our next wakeup. 4768 * handler, so that they will not have to wait for our next wakeup.
4632 */ 4769 */
4633static void raid5d(struct mddev *mddev) 4770static void raid5d(struct md_thread *thread)
4634{ 4771{
4772 struct mddev *mddev = thread->mddev;
4635 struct r5conf *conf = mddev->private; 4773 struct r5conf *conf = mddev->private;
4636 int handled; 4774 int handled;
4637 struct blk_plug plug; 4775 struct blk_plug plug;
@@ -5366,6 +5504,7 @@ static int run(struct mddev *mddev)
5366 5504
5367 if (mddev->queue) { 5505 if (mddev->queue) {
5368 int chunk_size; 5506 int chunk_size;
5507 bool discard_supported = true;
5369 /* read-ahead size must cover two whole stripes, which 5508 /* read-ahead size must cover two whole stripes, which
5370 * is 2 * (datadisks) * chunksize where 'n' is the 5509 * is 2 * (datadisks) * chunksize where 'n' is the
5371 * number of raid devices 5510 * number of raid devices
@@ -5385,13 +5524,48 @@ static int run(struct mddev *mddev)
5385 blk_queue_io_min(mddev->queue, chunk_size); 5524 blk_queue_io_min(mddev->queue, chunk_size);
5386 blk_queue_io_opt(mddev->queue, chunk_size * 5525 blk_queue_io_opt(mddev->queue, chunk_size *
5387 (conf->raid_disks - conf->max_degraded)); 5526 (conf->raid_disks - conf->max_degraded));
5527 /*
5528 * We can only discard a whole stripe. It doesn't make sense to
5529 * discard data disk but write parity disk
5530 */
5531 stripe = stripe * PAGE_SIZE;
5532 mddev->queue->limits.discard_alignment = stripe;
5533 mddev->queue->limits.discard_granularity = stripe;
5534 /*
5535 * unaligned part of discard request will be ignored, so can't
5536 * guarantee discard_zerors_data
5537 */
5538 mddev->queue->limits.discard_zeroes_data = 0;
5388 5539
5389 rdev_for_each(rdev, mddev) { 5540 rdev_for_each(rdev, mddev) {
5390 disk_stack_limits(mddev->gendisk, rdev->bdev, 5541 disk_stack_limits(mddev->gendisk, rdev->bdev,
5391 rdev->data_offset << 9); 5542 rdev->data_offset << 9);
5392 disk_stack_limits(mddev->gendisk, rdev->bdev, 5543 disk_stack_limits(mddev->gendisk, rdev->bdev,
5393 rdev->new_data_offset << 9); 5544 rdev->new_data_offset << 9);
5545 /*
5546 * discard_zeroes_data is required, otherwise data
5547 * could be lost. Consider a scenario: discard a stripe
5548 * (the stripe could be inconsistent if
5549 * discard_zeroes_data is 0); write one disk of the
5550 * stripe (the stripe could be inconsistent again
5551 * depending on which disks are used to calculate
5552 * parity); the disk is broken; The stripe data of this
5553 * disk is lost.
5554 */
5555 if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) ||
5556 !bdev_get_queue(rdev->bdev)->
5557 limits.discard_zeroes_data)
5558 discard_supported = false;
5394 } 5559 }
5560
5561 if (discard_supported &&
5562 mddev->queue->limits.max_discard_sectors >= stripe &&
5563 mddev->queue->limits.discard_granularity >= stripe)
5564 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
5565 mddev->queue);
5566 else
5567 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
5568 mddev->queue);
5395 } 5569 }
5396 5570
5397 return 0; 5571 return 0;
@@ -5702,7 +5876,8 @@ static int check_reshape(struct mddev *mddev)
5702 if (!check_stripe_cache(mddev)) 5876 if (!check_stripe_cache(mddev))
5703 return -ENOSPC; 5877 return -ENOSPC;
5704 5878
5705 return resize_stripes(conf, conf->raid_disks + mddev->delta_disks); 5879 return resize_stripes(conf, (conf->previous_raid_disks
5880 + mddev->delta_disks));
5706} 5881}
5707 5882
5708static int raid5_start_reshape(struct mddev *mddev) 5883static int raid5_start_reshape(struct mddev *mddev)
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index a9fc24901eda..18b2c4a8a1fd 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -298,6 +298,7 @@ enum r5dev_flags {
298 R5_WantReplace, /* We need to update the replacement, we have read 298 R5_WantReplace, /* We need to update the replacement, we have read
299 * data in, and now is a good time to write it out. 299 * data in, and now is a good time to write it out.
300 */ 300 */
301 R5_Discard, /* Discard the stripe */
301}; 302};
302 303
303/* 304/*
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index dd13e3a4c272..4ef0d80b57f4 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -163,19 +163,21 @@ source "drivers/media/common/Kconfig"
163# 163#
164 164
165config MEDIA_SUBDRV_AUTOSELECT 165config MEDIA_SUBDRV_AUTOSELECT
166 bool "Autoselect analog and hybrid tuner modules to build" 166 bool "Autoselect tuners and i2c modules to build"
167 depends on MEDIA_TUNER 167 depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_CAMERA_SUPPORT
168 default y 168 default y
169 help 169 help
170 By default, a TV driver auto-selects all possible tuners 170 By default, a media driver auto-selects all possible i2c
171 thar could be used by the driver. 171 devices that are used by any of the supported devices.
172 172
173 This is generally the right thing to do, except when there 173 This is generally the right thing to do, except when there
174 are strict constraints with regards to the kernel size. 174 are strict constraints with regards to the kernel size,
175 like on embedded systems.
175 176
176 Use this option with care, as deselecting tuner drivers which 177 Use this option with care, as deselecting ancillary drivers which
177 are in fact necessary will result in TV devices which cannot 178 are, in fact, necessary will result in the lack of the needed
178 be tuned due to lack of the tuning driver. 179 functionality for your device (it may not tune or may not have
180 the need demodulers).
179 181
180 If unsure say Y. 182 If unsure say Y.
181 183
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 8f58f241c10d..7e92793260f0 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -966,6 +966,8 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe)
966 break; 966 break;
967 } 967 }
968 968
969 c->lna = LNA_AUTO;
970
969 return 0; 971 return 0;
970} 972}
971 973
@@ -1054,6 +1056,8 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
1054 _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_B, 0, 0), 1056 _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_B, 0, 0),
1055 _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_C, 0, 0), 1057 _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_C, 0, 0),
1056 _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_D, 0, 0), 1058 _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_D, 0, 0),
1059
1060 _DTV_CMD(DTV_LNA, 0, 0),
1057}; 1061};
1058 1062
1059static void dtv_property_dump(struct dvb_frontend *fe, struct dtv_property *tvp) 1063static void dtv_property_dump(struct dvb_frontend *fe, struct dtv_property *tvp)
@@ -1440,6 +1444,10 @@ static int dtv_property_process_get(struct dvb_frontend *fe,
1440 tvp->u.data = fe->dtv_property_cache.atscmh_sccc_code_mode_d; 1444 tvp->u.data = fe->dtv_property_cache.atscmh_sccc_code_mode_d;
1441 break; 1445 break;
1442 1446
1447 case DTV_LNA:
1448 tvp->u.data = c->lna;
1449 break;
1450
1443 default: 1451 default:
1444 return -EINVAL; 1452 return -EINVAL;
1445 } 1453 }
@@ -1731,10 +1739,6 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
1731 case DTV_INTERLEAVING: 1739 case DTV_INTERLEAVING:
1732 c->interleaving = tvp->u.data; 1740 c->interleaving = tvp->u.data;
1733 break; 1741 break;
1734 case DTV_LNA:
1735 if (fe->ops.set_lna)
1736 r = fe->ops.set_lna(fe, tvp->u.data);
1737 break;
1738 1742
1739 /* ISDB-T Support here */ 1743 /* ISDB-T Support here */
1740 case DTV_ISDBT_PARTIAL_RECEPTION: 1744 case DTV_ISDBT_PARTIAL_RECEPTION:
@@ -1806,6 +1810,12 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
1806 fe->dtv_property_cache.atscmh_rs_frame_ensemble = tvp->u.data; 1810 fe->dtv_property_cache.atscmh_rs_frame_ensemble = tvp->u.data;
1807 break; 1811 break;
1808 1812
1813 case DTV_LNA:
1814 c->lna = tvp->u.data;
1815 if (fe->ops.set_lna)
1816 r = fe->ops.set_lna(fe);
1817 break;
1818
1809 default: 1819 default:
1810 return -EINVAL; 1820 return -EINVAL;
1811 } 1821 }
@@ -2309,7 +2319,7 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
2309 fepriv->tune_mode_flags = (unsigned long) parg; 2319 fepriv->tune_mode_flags = (unsigned long) parg;
2310 err = 0; 2320 err = 0;
2311 break; 2321 break;
2312 }; 2322 }
2313 2323
2314 return err; 2324 return err;
2315} 2325}
diff --git a/drivers/media/dvb-core/dvb_frontend.h b/drivers/media/dvb-core/dvb_frontend.h
index 44a445cee74f..97112cd88a17 100644
--- a/drivers/media/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb-core/dvb_frontend.h
@@ -303,7 +303,7 @@ struct dvb_frontend_ops {
303 int (*dishnetwork_send_legacy_command)(struct dvb_frontend* fe, unsigned long cmd); 303 int (*dishnetwork_send_legacy_command)(struct dvb_frontend* fe, unsigned long cmd);
304 int (*i2c_gate_ctrl)(struct dvb_frontend* fe, int enable); 304 int (*i2c_gate_ctrl)(struct dvb_frontend* fe, int enable);
305 int (*ts_bus_ctrl)(struct dvb_frontend* fe, int acquire); 305 int (*ts_bus_ctrl)(struct dvb_frontend* fe, int acquire);
306 int (*set_lna)(struct dvb_frontend *, int); 306 int (*set_lna)(struct dvb_frontend *);
307 307
308 /* These callbacks are for devices that implement their own 308 /* These callbacks are for devices that implement their own
309 * tuning algorithms, rather than a simple swzigzag 309 * tuning algorithms, rather than a simple swzigzag
@@ -391,6 +391,8 @@ struct dtv_frontend_properties {
391 u8 atscmh_sccc_code_mode_b; 391 u8 atscmh_sccc_code_mode_b;
392 u8 atscmh_sccc_code_mode_c; 392 u8 atscmh_sccc_code_mode_c;
393 u8 atscmh_sccc_code_mode_d; 393 u8 atscmh_sccc_code_mode_d;
394
395 u32 lna;
394}; 396};
395 397
396struct dvb_frontend { 398struct dvb_frontend {
diff --git a/drivers/media/dvb-frontends/a8293.c b/drivers/media/dvb-frontends/a8293.c
index cff44a389b40..74fbb5d58bed 100644
--- a/drivers/media/dvb-frontends/a8293.c
+++ b/drivers/media/dvb-frontends/a8293.c
@@ -90,7 +90,7 @@ static int a8293_set_voltage(struct dvb_frontend *fe,
90 default: 90 default:
91 ret = -EINVAL; 91 ret = -EINVAL;
92 goto err; 92 goto err;
93 }; 93 }
94 94
95 ret = a8293_wr(priv, &priv->reg[0], 1); 95 ret = a8293_wr(priv, &priv->reg[0], 1);
96 if (ret) 96 if (ret)
diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
index e9f04a36577b..a204f2828820 100644
--- a/drivers/media/dvb-frontends/af9013.c
+++ b/drivers/media/dvb-frontends/af9013.c
@@ -241,7 +241,7 @@ static int af9013_set_gpio(struct af9013_state *state, u8 gpio, u8 gpioval)
241 KBUILD_MODNAME, gpio); 241 KBUILD_MODNAME, gpio);
242 ret = -EINVAL; 242 ret = -EINVAL;
243 goto err; 243 goto err;
244 }; 244 }
245 245
246 switch (gpio) { 246 switch (gpio) {
247 case 0: 247 case 0:
@@ -253,7 +253,7 @@ static int af9013_set_gpio(struct af9013_state *state, u8 gpio, u8 gpioval)
253 default: 253 default:
254 pos = 4; 254 pos = 4;
255 break; 255 break;
256 }; 256 }
257 257
258 ret = af9013_wr_reg_bits(state, addr, pos, 4, gpioval); 258 ret = af9013_wr_reg_bits(state, addr, pos, 4, gpioval);
259 if (ret) 259 if (ret)
@@ -726,7 +726,7 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
726 default: 726 default:
727 dev_dbg(&state->i2c->dev, "%s: invalid hierarchy\n", __func__); 727 dev_dbg(&state->i2c->dev, "%s: invalid hierarchy\n", __func__);
728 auto_mode = 1; 728 auto_mode = 1;
729 }; 729 }
730 730
731 switch (c->modulation) { 731 switch (c->modulation) {
732 case QAM_AUTO: 732 case QAM_AUTO:
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 8162d939c4b2..464ad878490b 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -408,7 +408,7 @@ static int af9033_set_frontend(struct dvb_frontend *fe)
408{ 408{
409 struct af9033_state *state = fe->demodulator_priv; 409 struct af9033_state *state = fe->demodulator_priv;
410 struct dtv_frontend_properties *c = &fe->dtv_property_cache; 410 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
411 int ret, i, spec_inv; 411 int ret, i, spec_inv, sampling_freq;
412 u8 tmp, buf[3], bandwidth_reg_val; 412 u8 tmp, buf[3], bandwidth_reg_val;
413 u32 if_frequency, freq_cw, adc_freq; 413 u32 if_frequency, freq_cw, adc_freq;
414 414
@@ -465,18 +465,20 @@ static int af9033_set_frontend(struct dvb_frontend *fe)
465 else 465 else
466 if_frequency = 0; 466 if_frequency = 0;
467 467
468 while (if_frequency > (adc_freq / 2)) 468 sampling_freq = if_frequency;
469 if_frequency -= adc_freq;
470 469
471 if (if_frequency >= 0) 470 while (sampling_freq > (adc_freq / 2))
471 sampling_freq -= adc_freq;
472
473 if (sampling_freq >= 0)
472 spec_inv *= -1; 474 spec_inv *= -1;
473 else 475 else
474 if_frequency *= -1; 476 sampling_freq *= -1;
475 477
476 freq_cw = af9033_div(state, if_frequency, adc_freq, 23ul); 478 freq_cw = af9033_div(state, sampling_freq, adc_freq, 23ul);
477 479
478 if (spec_inv == -1) 480 if (spec_inv == -1)
479 freq_cw *= -1; 481 freq_cw = 0x800000 - freq_cw;
480 482
481 /* get adc multiplies */ 483 /* get adc multiplies */
482 ret = af9033_rd_reg(state, 0x800045, &tmp); 484 ret = af9033_rd_reg(state, 0x800045, &tmp);
diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c
index 033cd7ad3ca2..1b77909c0c71 100644
--- a/drivers/media/dvb-frontends/bcm3510.c
+++ b/drivers/media/dvb-frontends/bcm3510.c
@@ -527,7 +527,7 @@ static int bcm3510_set_frontend(struct dvb_frontend *fe)
527 cmd.ACQUIRE1.IF_FREQ = 0x0; 527 cmd.ACQUIRE1.IF_FREQ = 0x0;
528 default: 528 default:
529 return -EINVAL; 529 return -EINVAL;
530 }; 530 }
531 cmd.ACQUIRE0.OFFSET = 0; 531 cmd.ACQUIRE0.OFFSET = 0;
532 cmd.ACQUIRE0.NTSCSWEEP = 1; 532 cmd.ACQUIRE0.NTSCSWEEP = 1;
533 cmd.ACQUIRE0.FA = 1; 533 cmd.ACQUIRE0.FA = 1;
diff --git a/drivers/media/dvb-frontends/cx24110.c b/drivers/media/dvb-frontends/cx24110.c
index 3180f5b2a6a6..0cd6927e654c 100644
--- a/drivers/media/dvb-frontends/cx24110.c
+++ b/drivers/media/dvb-frontends/cx24110.c
@@ -218,7 +218,7 @@ static int cx24110_set_fec (struct cx24110_state* state, fe_code_rate_t fec)
218 } else 218 } else
219 return -EOPNOTSUPP; 219 return -EOPNOTSUPP;
220/* fixme (low): which is the correct return code? */ 220/* fixme (low): which is the correct return code? */
221 }; 221 }
222 return 0; 222 return 0;
223} 223}
224 224
@@ -275,7 +275,7 @@ static int cx24110_set_symbolrate (struct cx24110_state* state, u32 srate)
275 cx24110_writereg(state,0x07,tmp|0x3); 275 cx24110_writereg(state,0x07,tmp|0x3);
276 cx24110_writereg(state,0x06,0x78); 276 cx24110_writereg(state,0x06,0x78);
277 fclk=90999000UL; 277 fclk=90999000UL;
278 }; 278 }
279 dprintk("cx24110 debug: fclk %d Hz\n",fclk); 279 dprintk("cx24110 debug: fclk %d Hz\n",fclk);
280 /* we need to divide two integers with approx. 27 bits in 32 bit 280 /* we need to divide two integers with approx. 27 bits in 32 bit
281 arithmetic giving a 25 bit result */ 281 arithmetic giving a 25 bit result */
@@ -362,7 +362,7 @@ static int cx24110_initfe(struct dvb_frontend* fe)
362 362
363 for(i = 0; i < ARRAY_SIZE(cx24110_regdata); i++) { 363 for(i = 0; i < ARRAY_SIZE(cx24110_regdata); i++) {
364 cx24110_writereg(state, cx24110_regdata[i].reg, cx24110_regdata[i].data); 364 cx24110_writereg(state, cx24110_regdata[i].reg, cx24110_regdata[i].data);
365 }; 365 }
366 366
367 return 0; 367 return 0;
368} 368}
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index 42648643693e..9b658c1cf39a 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -688,7 +688,7 @@ struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *cfg,
688{ 688{
689 struct cxd2820r_priv *priv; 689 struct cxd2820r_priv *priv;
690 int ret; 690 int ret;
691 u8 tmp, gpio[GPIO_COUNT]; 691 u8 tmp;
692 692
693 priv = kzalloc(sizeof(struct cxd2820r_priv), GFP_KERNEL); 693 priv = kzalloc(sizeof(struct cxd2820r_priv), GFP_KERNEL);
694 if (!priv) { 694 if (!priv) {
@@ -735,6 +735,7 @@ struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *cfg,
735 * Use static GPIO configuration if GPIOLIB is undefined. 735 * Use static GPIO configuration if GPIOLIB is undefined.
736 * This is fallback condition. 736 * This is fallback condition.
737 */ 737 */
738 u8 gpio[GPIO_COUNT];
738 gpio[0] = (*gpio_chip_base >> 0) & 0x07; 739 gpio[0] = (*gpio_chip_base >> 0) & 0x07;
739 gpio[1] = (*gpio_chip_base >> 3) & 0x07; 740 gpio[1] = (*gpio_chip_base >> 3) & 0x07;
740 gpio[2] = 0; 741 gpio[2] = 0;
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index f380eb43e9d5..6d9853750d2b 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -991,7 +991,7 @@ static int HI_Command(struct drxd_state *state, u16 cmd, u16 * pResult)
991 if (nrRetries > DRXD_MAX_RETRIES) { 991 if (nrRetries > DRXD_MAX_RETRIES) {
992 status = -1; 992 status = -1;
993 break; 993 break;
994 }; 994 }
995 status = Read16(state, HI_RA_RAM_SRV_CMD__A, &waitCmd, 0); 995 status = Read16(state, HI_RA_RAM_SRV_CMD__A, &waitCmd, 0);
996 } while (waitCmd != 0); 996 } while (waitCmd != 0);
997 997
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index 4c8ac2657c4a..5b639087ce45 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -30,6 +30,7 @@
30#include "ds3000.h" 30#include "ds3000.h"
31 31
32static int debug; 32static int debug;
33static int force_fw_upload;
33 34
34#define dprintk(args...) \ 35#define dprintk(args...) \
35 do { \ 36 do { \
@@ -392,11 +393,13 @@ static int ds3000_firmware_ondemand(struct dvb_frontend *fe)
392 393
393 dprintk("%s()\n", __func__); 394 dprintk("%s()\n", __func__);
394 395
395 if (ds3000_readreg(state, 0xb2) <= 0) 396 ret = ds3000_readreg(state, 0xb2);
397 if (ret < 0)
396 return ret; 398 return ret;
397 399
398 if (state->skip_fw_load) 400 if (state->skip_fw_load || !force_fw_upload)
399 return 0; 401 return 0; /* Firmware already uploaded, skipping */
402
400 /* Load firmware */ 403 /* Load firmware */
401 /* request the firmware, this will block until someone uploads it */ 404 /* request the firmware, this will block until someone uploads it */
402 printk(KERN_INFO "%s: Waiting for firmware upload (%s)...\n", __func__, 405 printk(KERN_INFO "%s: Waiting for firmware upload (%s)...\n", __func__,
@@ -1306,6 +1309,9 @@ static struct dvb_frontend_ops ds3000_ops = {
1306module_param(debug, int, 0644); 1309module_param(debug, int, 0644);
1307MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)"); 1310MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
1308 1311
1312module_param(force_fw_upload, int, 0644);
1313MODULE_PARM_DESC(force_fw_upload, "Force firmware upload (default:0)");
1314
1309MODULE_DESCRIPTION("DVB Frontend module for Montage Technology " 1315MODULE_DESCRIPTION("DVB Frontend module for Montage Technology "
1310 "DS3000/TS2020 hardware"); 1316 "DS3000/TS2020 hardware");
1311MODULE_AUTHOR("Konstantin Dimitrov"); 1317MODULE_AUTHOR("Konstantin Dimitrov");
diff --git a/drivers/media/dvb-frontends/dvb_dummy_fe.c b/drivers/media/dvb-frontends/dvb_dummy_fe.c
index dcfc902c8678..d5acc304786b 100644
--- a/drivers/media/dvb-frontends/dvb_dummy_fe.c
+++ b/drivers/media/dvb-frontends/dvb_dummy_fe.c
@@ -121,16 +121,13 @@ struct dvb_frontend* dvb_dummy_fe_ofdm_attach(void)
121 121
122 /* allocate memory for the internal state */ 122 /* allocate memory for the internal state */
123 state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL); 123 state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL);
124 if (state == NULL) goto error; 124 if (!state)
125 return NULL;
125 126
126 /* create dvb_frontend */ 127 /* create dvb_frontend */
127 memcpy(&state->frontend.ops, &dvb_dummy_fe_ofdm_ops, sizeof(struct dvb_frontend_ops)); 128 memcpy(&state->frontend.ops, &dvb_dummy_fe_ofdm_ops, sizeof(struct dvb_frontend_ops));
128 state->frontend.demodulator_priv = state; 129 state->frontend.demodulator_priv = state;
129 return &state->frontend; 130 return &state->frontend;
130
131error:
132 kfree(state);
133 return NULL;
134} 131}
135 132
136static struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops; 133static struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops;
@@ -141,16 +138,13 @@ struct dvb_frontend *dvb_dummy_fe_qpsk_attach(void)
141 138
142 /* allocate memory for the internal state */ 139 /* allocate memory for the internal state */
143 state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL); 140 state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL);
144 if (state == NULL) goto error; 141 if (!state)
142 return NULL;
145 143
146 /* create dvb_frontend */ 144 /* create dvb_frontend */
147 memcpy(&state->frontend.ops, &dvb_dummy_fe_qpsk_ops, sizeof(struct dvb_frontend_ops)); 145 memcpy(&state->frontend.ops, &dvb_dummy_fe_qpsk_ops, sizeof(struct dvb_frontend_ops));
148 state->frontend.demodulator_priv = state; 146 state->frontend.demodulator_priv = state;
149 return &state->frontend; 147 return &state->frontend;
150
151error:
152 kfree(state);
153 return NULL;
154} 148}
155 149
156static struct dvb_frontend_ops dvb_dummy_fe_qam_ops; 150static struct dvb_frontend_ops dvb_dummy_fe_qam_ops;
@@ -161,16 +155,13 @@ struct dvb_frontend *dvb_dummy_fe_qam_attach(void)
161 155
162 /* allocate memory for the internal state */ 156 /* allocate memory for the internal state */
163 state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL); 157 state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL);
164 if (state == NULL) goto error; 158 if (!state)
159 return NULL;
165 160
166 /* create dvb_frontend */ 161 /* create dvb_frontend */
167 memcpy(&state->frontend.ops, &dvb_dummy_fe_qam_ops, sizeof(struct dvb_frontend_ops)); 162 memcpy(&state->frontend.ops, &dvb_dummy_fe_qam_ops, sizeof(struct dvb_frontend_ops));
168 state->frontend.demodulator_priv = state; 163 state->frontend.demodulator_priv = state;
169 return &state->frontend; 164 return &state->frontend;
170
171error:
172 kfree(state);
173 return NULL;
174} 165}
175 166
176static struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops = { 167static struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops = {
diff --git a/drivers/media/dvb-frontends/isl6405.c b/drivers/media/dvb-frontends/isl6405.c
index 33d33f4d8867..0c642a5bf823 100644
--- a/drivers/media/dvb-frontends/isl6405.c
+++ b/drivers/media/dvb-frontends/isl6405.c
@@ -77,7 +77,7 @@ static int isl6405_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage
77 break; 77 break;
78 default: 78 default:
79 return -EINVAL; 79 return -EINVAL;
80 }; 80 }
81 } 81 }
82 isl6405->config |= isl6405->override_or; 82 isl6405->config |= isl6405->override_or;
83 isl6405->config &= isl6405->override_and; 83 isl6405->config &= isl6405->override_and;
diff --git a/drivers/media/dvb-frontends/isl6421.c b/drivers/media/dvb-frontends/isl6421.c
index 684c8ec166cb..0cb3f0f74c9c 100644
--- a/drivers/media/dvb-frontends/isl6421.c
+++ b/drivers/media/dvb-frontends/isl6421.c
@@ -63,7 +63,7 @@ static int isl6421_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage
63 break; 63 break;
64 default: 64 default:
65 return -EINVAL; 65 return -EINVAL;
66 }; 66 }
67 67
68 isl6421->config |= isl6421->override_or; 68 isl6421->config |= isl6421->override_or;
69 isl6421->config &= isl6421->override_and; 69 isl6421->config &= isl6421->override_and;
diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c
index 316457584fe7..c1c3400b2173 100644
--- a/drivers/media/dvb-frontends/itd1000.c
+++ b/drivers/media/dvb-frontends/itd1000.c
@@ -231,7 +231,7 @@ static void itd1000_set_lo(struct itd1000_state *state, u32 freq_khz)
231 state->frequency = ((plln * 1000) + (pllf * 1000)/1048576) * 2*FREF; 231 state->frequency = ((plln * 1000) + (pllf * 1000)/1048576) * 2*FREF;
232 itd_dbg("frequency: %dkHz (wanted) %dkHz (set), PLLF = %d, PLLN = %d\n", freq_khz, state->frequency, pllf, plln); 232 itd_dbg("frequency: %dkHz (wanted) %dkHz (set), PLLF = %d, PLLN = %d\n", freq_khz, state->frequency, pllf, plln);
233 233
234 itd1000_write_reg(state, PLLNH, 0x80); /* PLLNH */; 234 itd1000_write_reg(state, PLLNH, 0x80); /* PLLNH */
235 itd1000_write_reg(state, PLLNL, plln & 0xff); 235 itd1000_write_reg(state, PLLNL, plln & 0xff);
236 itd1000_write_reg(state, PLLFH, (itd1000_read_reg(state, PLLFH) & 0xf0) | ((pllf >> 16) & 0x0f)); 236 itd1000_write_reg(state, PLLFH, (itd1000_read_reg(state, PLLFH) & 0xf0) | ((pllf >> 16) & 0x0f));
237 itd1000_write_reg(state, PLLFM, (pllf >> 8) & 0xff); 237 itd1000_write_reg(state, PLLFM, (pllf >> 8) & 0xff);
diff --git a/drivers/media/dvb-frontends/lg2160.c b/drivers/media/dvb-frontends/lg2160.c
index cc11260e99df..5fd14f840ab0 100644
--- a/drivers/media/dvb-frontends/lg2160.c
+++ b/drivers/media/dvb-frontends/lg2160.c
@@ -1421,8 +1421,8 @@ struct dvb_frontend *lg2160_attach(const struct lg2160_config *config,
1421 config ? config->i2c_addr : 0); 1421 config ? config->i2c_addr : 0);
1422 1422
1423 state = kzalloc(sizeof(struct lg216x_state), GFP_KERNEL); 1423 state = kzalloc(sizeof(struct lg216x_state), GFP_KERNEL);
1424 if (state == NULL) 1424 if (!state)
1425 goto fail; 1425 return NULL;
1426 1426
1427 state->cfg = config; 1427 state->cfg = config;
1428 state->i2c_adap = i2c_adap; 1428 state->i2c_adap = i2c_adap;
@@ -1449,10 +1449,6 @@ struct dvb_frontend *lg2160_attach(const struct lg2160_config *config,
1449 state->frontend.dtv_property_cache.atscmh_parade_id = 1; 1449 state->frontend.dtv_property_cache.atscmh_parade_id = 1;
1450 1450
1451 return &state->frontend; 1451 return &state->frontend;
1452fail:
1453 lg_warn("unable to detect LG216x hardware\n");
1454 kfree(state);
1455 return NULL;
1456} 1452}
1457EXPORT_SYMBOL(lg2160_attach); 1453EXPORT_SYMBOL(lg2160_attach);
1458 1454
diff --git a/drivers/media/dvb-frontends/lnbp21.c b/drivers/media/dvb-frontends/lnbp21.c
index 13437259eeac..f3ba7b5faa2e 100644
--- a/drivers/media/dvb-frontends/lnbp21.c
+++ b/drivers/media/dvb-frontends/lnbp21.c
@@ -65,7 +65,7 @@ static int lnbp21_set_voltage(struct dvb_frontend *fe,
65 break; 65 break;
66 default: 66 default:
67 return -EINVAL; 67 return -EINVAL;
68 }; 68 }
69 69
70 lnbp21->config |= lnbp21->override_or; 70 lnbp21->config |= lnbp21->override_or;
71 lnbp21->config &= lnbp21->override_and; 71 lnbp21->config &= lnbp21->override_and;
@@ -108,7 +108,7 @@ static int lnbp21_set_tone(struct dvb_frontend *fe,
108 break; 108 break;
109 default: 109 default:
110 return -EINVAL; 110 return -EINVAL;
111 }; 111 }
112 112
113 lnbp21->config |= lnbp21->override_or; 113 lnbp21->config |= lnbp21->override_or;
114 lnbp21->config &= lnbp21->override_and; 114 lnbp21->config &= lnbp21->override_and;
diff --git a/drivers/media/dvb-frontends/lnbp22.c b/drivers/media/dvb-frontends/lnbp22.c
index 84ad0390a4a1..c463da7f6dcc 100644
--- a/drivers/media/dvb-frontends/lnbp22.c
+++ b/drivers/media/dvb-frontends/lnbp22.c
@@ -73,7 +73,7 @@ static int lnbp22_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
73 break; 73 break;
74 default: 74 default:
75 return -EINVAL; 75 return -EINVAL;
76 }; 76 }
77 77
78 dprintk(1, "%s: 0x%02x)\n", __func__, lnbp22->config[3]); 78 dprintk(1, "%s: 0x%02x)\n", __func__, lnbp22->config[3]);
79 return (i2c_transfer(lnbp22->i2c, &msg, 1) == 1) ? 0 : -EIO; 79 return (i2c_transfer(lnbp22->i2c, &msg, 1) == 1) ? 0 : -EIO;
diff --git a/drivers/media/dvb-frontends/s5h1432.c b/drivers/media/dvb-frontends/s5h1432.c
index 8352ce1c9556..6ec16a243741 100644
--- a/drivers/media/dvb-frontends/s5h1432.c
+++ b/drivers/media/dvb-frontends/s5h1432.c
@@ -351,8 +351,8 @@ struct dvb_frontend *s5h1432_attach(const struct s5h1432_config *config,
351 printk(KERN_INFO " Enter s5h1432_attach(). attach success!\n"); 351 printk(KERN_INFO " Enter s5h1432_attach(). attach success!\n");
352 /* allocate memory for the internal state */ 352 /* allocate memory for the internal state */
353 state = kmalloc(sizeof(struct s5h1432_state), GFP_KERNEL); 353 state = kmalloc(sizeof(struct s5h1432_state), GFP_KERNEL);
354 if (state == NULL) 354 if (!state)
355 goto error; 355 return NULL;
356 356
357 /* setup the state */ 357 /* setup the state */
358 state->config = config; 358 state->config = config;
@@ -367,10 +367,6 @@ struct dvb_frontend *s5h1432_attach(const struct s5h1432_config *config,
367 state->frontend.demodulator_priv = state; 367 state->frontend.demodulator_priv = state;
368 368
369 return &state->frontend; 369 return &state->frontend;
370
371error:
372 kfree(state);
373 return NULL;
374} 370}
375EXPORT_SYMBOL(s5h1432_attach); 371EXPORT_SYMBOL(s5h1432_attach);
376 372
diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c
index cd2288c07147..a271ac3eaec0 100644
--- a/drivers/media/dvb-frontends/s921.c
+++ b/drivers/media/dvb-frontends/s921.c
@@ -487,9 +487,9 @@ struct dvb_frontend *s921_attach(const struct s921_config *config,
487 kzalloc(sizeof(struct s921_state), GFP_KERNEL); 487 kzalloc(sizeof(struct s921_state), GFP_KERNEL);
488 488
489 dprintk("\n"); 489 dprintk("\n");
490 if (state == NULL) { 490 if (!state) {
491 rc("Unable to kzalloc\n"); 491 rc("Unable to kzalloc\n");
492 goto rcor; 492 return NULL;
493 } 493 }
494 494
495 /* setup the state */ 495 /* setup the state */
@@ -502,11 +502,6 @@ struct dvb_frontend *s921_attach(const struct s921_config *config,
502 state->frontend.demodulator_priv = state; 502 state->frontend.demodulator_priv = state;
503 503
504 return &state->frontend; 504 return &state->frontend;
505
506rcor:
507 kfree(state);
508
509 return NULL;
510} 505}
511EXPORT_SYMBOL(s921_attach); 506EXPORT_SYMBOL(s921_attach);
512 507
diff --git a/drivers/media/dvb-frontends/si21xx.c b/drivers/media/dvb-frontends/si21xx.c
index a68a64800df7..73b47cc6a13b 100644
--- a/drivers/media/dvb-frontends/si21xx.c
+++ b/drivers/media/dvb-frontends/si21xx.c
@@ -343,7 +343,7 @@ static int si21xx_wait_diseqc_idle(struct si21xx_state *state, int timeout)
343 return -ETIMEDOUT; 343 return -ETIMEDOUT;
344 } 344 }
345 msleep(10); 345 msleep(10);
346 }; 346 }
347 347
348 return 0; 348 return 0;
349} 349}
@@ -472,7 +472,7 @@ static int si21xx_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t volt)
472 break; 472 break;
473 default: 473 default:
474 return -EINVAL; 474 return -EINVAL;
475 }; 475 }
476} 476}
477 477
478static int si21xx_init(struct dvb_frontend *fe) 478static int si21xx_init(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/sp8870.c b/drivers/media/dvb-frontends/sp8870.c
index e37274c8f14e..2aa8ef76eba2 100644
--- a/drivers/media/dvb-frontends/sp8870.c
+++ b/drivers/media/dvb-frontends/sp8870.c
@@ -188,7 +188,7 @@ static int configure_reg0xc05 (struct dtv_frontend_properties *p, u16 *reg0xc05)
188 break; 188 break;
189 default: 189 default:
190 return -EINVAL; 190 return -EINVAL;
191 }; 191 }
192 192
193 switch (p->hierarchy) { 193 switch (p->hierarchy) {
194 case HIERARCHY_NONE: 194 case HIERARCHY_NONE:
@@ -207,7 +207,7 @@ static int configure_reg0xc05 (struct dtv_frontend_properties *p, u16 *reg0xc05)
207 break; 207 break;
208 default: 208 default:
209 return -EINVAL; 209 return -EINVAL;
210 }; 210 }
211 211
212 switch (p->code_rate_HP) { 212 switch (p->code_rate_HP) {
213 case FEC_1_2: 213 case FEC_1_2:
@@ -229,7 +229,7 @@ static int configure_reg0xc05 (struct dtv_frontend_properties *p, u16 *reg0xc05)
229 break; 229 break;
230 default: 230 default:
231 return -EINVAL; 231 return -EINVAL;
232 }; 232 }
233 233
234 if (known_parameters) 234 if (known_parameters)
235 *reg0xc05 |= (2 << 1); /* use specified parameters */ 235 *reg0xc05 |= (2 << 1); /* use specified parameters */
diff --git a/drivers/media/dvb-frontends/sp887x.c b/drivers/media/dvb-frontends/sp887x.c
index f4096ccb226e..1bb81b5ae6e0 100644
--- a/drivers/media/dvb-frontends/sp887x.c
+++ b/drivers/media/dvb-frontends/sp887x.c
@@ -229,7 +229,7 @@ static int configure_reg0xc05(struct dtv_frontend_properties *p, u16 *reg0xc05)
229 break; 229 break;
230 default: 230 default:
231 return -EINVAL; 231 return -EINVAL;
232 }; 232 }
233 233
234 switch (p->hierarchy) { 234 switch (p->hierarchy) {
235 case HIERARCHY_NONE: 235 case HIERARCHY_NONE:
@@ -248,7 +248,7 @@ static int configure_reg0xc05(struct dtv_frontend_properties *p, u16 *reg0xc05)
248 break; 248 break;
249 default: 249 default:
250 return -EINVAL; 250 return -EINVAL;
251 }; 251 }
252 252
253 switch (p->code_rate_HP) { 253 switch (p->code_rate_HP) {
254 case FEC_1_2: 254 case FEC_1_2:
@@ -270,7 +270,7 @@ static int configure_reg0xc05(struct dtv_frontend_properties *p, u16 *reg0xc05)
270 break; 270 break;
271 default: 271 default:
272 return -EINVAL; 272 return -EINVAL;
273 }; 273 }
274 274
275 if (known_parameters) 275 if (known_parameters)
276 *reg0xc05 |= (2 << 1); /* use specified parameters */ 276 *reg0xc05 |= (2 << 1); /* use specified parameters */
diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
index 2e93e65d2cdb..45f9523f968f 100644
--- a/drivers/media/dvb-frontends/stb6100.c
+++ b/drivers/media/dvb-frontends/stb6100.c
@@ -575,8 +575,8 @@ struct dvb_frontend *stb6100_attach(struct dvb_frontend *fe,
575 struct stb6100_state *state = NULL; 575 struct stb6100_state *state = NULL;
576 576
577 state = kzalloc(sizeof (struct stb6100_state), GFP_KERNEL); 577 state = kzalloc(sizeof (struct stb6100_state), GFP_KERNEL);
578 if (state == NULL) 578 if (!state)
579 goto error; 579 return NULL;
580 580
581 state->config = config; 581 state->config = config;
582 state->i2c = i2c; 582 state->i2c = i2c;
@@ -587,10 +587,6 @@ struct dvb_frontend *stb6100_attach(struct dvb_frontend *fe,
587 587
588 printk("%s: Attaching STB6100 \n", __func__); 588 printk("%s: Attaching STB6100 \n", __func__);
589 return fe; 589 return fe;
590
591error:
592 kfree(state);
593 return NULL;
594} 590}
595 591
596static int stb6100_release(struct dvb_frontend *fe) 592static int stb6100_release(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/stv0299.c b/drivers/media/dvb-frontends/stv0299.c
index 057b5f8effc0..92a6075cd82f 100644
--- a/drivers/media/dvb-frontends/stv0299.c
+++ b/drivers/media/dvb-frontends/stv0299.c
@@ -199,7 +199,7 @@ static int stv0299_wait_diseqc_fifo (struct stv0299_state* state, int timeout)
199 return -ETIMEDOUT; 199 return -ETIMEDOUT;
200 } 200 }
201 msleep(10); 201 msleep(10);
202 }; 202 }
203 203
204 return 0; 204 return 0;
205} 205}
@@ -216,7 +216,7 @@ static int stv0299_wait_diseqc_idle (struct stv0299_state* state, int timeout)
216 return -ETIMEDOUT; 216 return -ETIMEDOUT;
217 } 217 }
218 msleep(10); 218 msleep(10);
219 }; 219 }
220 220
221 return 0; 221 return 0;
222} 222}
@@ -387,7 +387,7 @@ static int stv0299_set_voltage (struct dvb_frontend* fe, fe_sec_voltage_t voltag
387 break; 387 break;
388 default: 388 default:
389 return -EINVAL; 389 return -EINVAL;
390 }; 390 }
391 391
392 if (state->config->op0_off) 392 if (state->config->op0_off)
393 reg0x0c &= ~0x10; 393 reg0x0c &= ~0x10;
diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c
index 7f1badaf0d03..262dfa503c2a 100644
--- a/drivers/media/dvb-frontends/stv0900_core.c
+++ b/drivers/media/dvb-frontends/stv0900_core.c
@@ -1552,8 +1552,8 @@ static int stv0900_status(struct stv0900_internal *intp,
1552 bitrate = (stv0900_get_mclk_freq(intp, intp->quartz)/1000000) 1552 bitrate = (stv0900_get_mclk_freq(intp, intp->quartz)/1000000)
1553 * (tsbitrate1_val << 8 | tsbitrate0_val); 1553 * (tsbitrate1_val << 8 | tsbitrate0_val);
1554 bitrate /= 16384; 1554 bitrate /= 16384;
1555 dprintk("TS bitrate = %d Mbit/sec \n", bitrate); 1555 dprintk("TS bitrate = %d Mbit/sec\n", bitrate);
1556 }; 1556 }
1557 1557
1558 return locked; 1558 return locked;
1559} 1559}
diff --git a/drivers/media/dvb-frontends/tda665x.c b/drivers/media/dvb-frontends/tda665x.c
index 2c1c759a4f42..63cc12378d9a 100644
--- a/drivers/media/dvb-frontends/tda665x.c
+++ b/drivers/media/dvb-frontends/tda665x.c
@@ -228,8 +228,8 @@ struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe,
228 struct dvb_tuner_info *info; 228 struct dvb_tuner_info *info;
229 229
230 state = kzalloc(sizeof(struct tda665x_state), GFP_KERNEL); 230 state = kzalloc(sizeof(struct tda665x_state), GFP_KERNEL);
231 if (state == NULL) 231 if (!state)
232 goto exit; 232 return NULL;
233 233
234 state->config = config; 234 state->config = config;
235 state->i2c = i2c; 235 state->i2c = i2c;
@@ -246,10 +246,6 @@ struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe,
246 printk(KERN_DEBUG "%s: Attaching TDA665x (%s) tuner\n", __func__, info->name); 246 printk(KERN_DEBUG "%s: Attaching TDA665x (%s) tuner\n", __func__, info->name);
247 247
248 return fe; 248 return fe;
249
250exit:
251 kfree(state);
252 return NULL;
253} 249}
254EXPORT_SYMBOL(tda665x_attach); 250EXPORT_SYMBOL(tda665x_attach);
255 251
diff --git a/drivers/media/dvb-frontends/tda8083.c b/drivers/media/dvb-frontends/tda8083.c
index 15912c96926a..9d08350fe4b0 100644
--- a/drivers/media/dvb-frontends/tda8083.c
+++ b/drivers/media/dvb-frontends/tda8083.c
@@ -175,7 +175,7 @@ static void tda8083_wait_diseqc_fifo (struct tda8083_state* state, int timeout)
175 !(tda8083_readreg(state, 0x02) & 0x80)) 175 !(tda8083_readreg(state, 0x02) & 0x80))
176 { 176 {
177 msleep(50); 177 msleep(50);
178 }; 178 }
179} 179}
180 180
181static int tda8083_set_tone (struct tda8083_state* state, fe_sec_tone_mode_t tone) 181static int tda8083_set_tone (struct tda8083_state* state, fe_sec_tone_mode_t tone)
@@ -215,7 +215,7 @@ static int tda8083_send_diseqc_burst (struct tda8083_state* state, fe_sec_mini_c
215 break; 215 break;
216 default: 216 default:
217 return -EINVAL; 217 return -EINVAL;
218 }; 218 }
219 219
220 tda8083_wait_diseqc_fifo (state, 100); 220 tda8083_wait_diseqc_fifo (state, 100);
221 221
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index d8eac3e30a7e..2cee69e34184 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -599,7 +599,7 @@ static void cx23885_initialize(struct i2c_client *client)
599 cx25840_write4(client, 0x114, 0x01bf0c9e); 599 cx25840_write4(client, 0x114, 0x01bf0c9e);
600 cx25840_write4(client, 0x110, 0x000a030c); 600 cx25840_write4(client, 0x110, 0x000a030c);
601 break; 601 break;
602 }; 602 }
603 603
604 /* ADC2 input select */ 604 /* ADC2 input select */
605 cx25840_write(client, 0x102, 0x10); 605 cx25840_write(client, 0x102, 0x10);
diff --git a/drivers/media/i2c/m5mols/m5mols.h b/drivers/media/i2c/m5mols/m5mols.h
index 86c815be348c..90a6c520f115 100644
--- a/drivers/media/i2c/m5mols/m5mols.h
+++ b/drivers/media/i2c/m5mols/m5mols.h
@@ -16,9 +16,17 @@
16#ifndef M5MOLS_H 16#ifndef M5MOLS_H
17#define M5MOLS_H 17#define M5MOLS_H
18 18
19#include <linux/sizes.h>
19#include <media/v4l2-subdev.h> 20#include <media/v4l2-subdev.h>
20#include "m5mols_reg.h" 21#include "m5mols_reg.h"
21 22
23
24/* An amount of data transmitted in addition to the value
25 * determined by CAPP_JPEG_SIZE_MAX register.
26 */
27#define M5MOLS_JPEG_TAGS_SIZE 0x20000
28#define M5MOLS_MAIN_JPEG_SIZE_MAX (5 * SZ_1M)
29
22extern int m5mols_debug; 30extern int m5mols_debug;
23 31
24enum m5mols_restype { 32enum m5mols_restype {
@@ -67,12 +75,14 @@ struct m5mols_exif {
67/** 75/**
68 * struct m5mols_capture - Structure for the capture capability 76 * struct m5mols_capture - Structure for the capture capability
69 * @exif: EXIF information 77 * @exif: EXIF information
78 * @buf_size: internal JPEG frame buffer size, in bytes
70 * @main: size in bytes of the main image 79 * @main: size in bytes of the main image
71 * @thumb: size in bytes of the thumb image, if it was accompanied 80 * @thumb: size in bytes of the thumb image, if it was accompanied
72 * @total: total size in bytes of the produced image 81 * @total: total size in bytes of the produced image
73 */ 82 */
74struct m5mols_capture { 83struct m5mols_capture {
75 struct m5mols_exif exif; 84 struct m5mols_exif exif;
85 unsigned int buf_size;
76 u32 main; 86 u32 main;
77 u32 thumb; 87 u32 thumb;
78 u32 total; 88 u32 total;
diff --git a/drivers/media/i2c/m5mols/m5mols_capture.c b/drivers/media/i2c/m5mols/m5mols_capture.c
index cb243bd278ce..ab34ccedf31e 100644
--- a/drivers/media/i2c/m5mols/m5mols_capture.c
+++ b/drivers/media/i2c/m5mols/m5mols_capture.c
@@ -105,6 +105,7 @@ static int m5mols_capture_info(struct m5mols_info *info)
105 105
106int m5mols_start_capture(struct m5mols_info *info) 106int m5mols_start_capture(struct m5mols_info *info)
107{ 107{
108 unsigned int framesize = info->cap.buf_size - M5MOLS_JPEG_TAGS_SIZE;
108 struct v4l2_subdev *sd = &info->sd; 109 struct v4l2_subdev *sd = &info->sd;
109 int ret; 110 int ret;
110 111
@@ -121,6 +122,8 @@ int m5mols_start_capture(struct m5mols_info *info)
121 if (!ret) 122 if (!ret)
122 ret = m5mols_write(sd, CAPP_MAIN_IMAGE_SIZE, info->resolution); 123 ret = m5mols_write(sd, CAPP_MAIN_IMAGE_SIZE, info->resolution);
123 if (!ret) 124 if (!ret)
125 ret = m5mols_write(sd, CAPP_JPEG_SIZE_MAX, framesize);
126 if (!ret)
124 ret = m5mols_set_mode(info, REG_CAPTURE); 127 ret = m5mols_set_mode(info, REG_CAPTURE);
125 if (!ret) 128 if (!ret)
126 /* Wait until a frame is captured to ISP internal memory */ 129 /* Wait until a frame is captured to ISP internal memory */
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index 2f490ef26c38..8131d651de9e 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -599,6 +599,51 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
599 return ret; 599 return ret;
600} 600}
601 601
602static int m5mols_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
603 struct v4l2_mbus_frame_desc *fd)
604{
605 struct m5mols_info *info = to_m5mols(sd);
606
607 if (pad != 0 || fd == NULL)
608 return -EINVAL;
609
610 mutex_lock(&info->lock);
611 /*
612 * .get_frame_desc is only used for compressed formats,
613 * thus we always return the capture frame parameters here.
614 */
615 fd->entry[0].length = info->cap.buf_size;
616 fd->entry[0].pixelcode = info->ffmt[M5MOLS_RESTYPE_CAPTURE].code;
617 mutex_unlock(&info->lock);
618
619 fd->entry[0].flags = V4L2_MBUS_FRAME_DESC_FL_LEN_MAX;
620 fd->num_entries = 1;
621
622 return 0;
623}
624
625static int m5mols_set_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
626 struct v4l2_mbus_frame_desc *fd)
627{
628 struct m5mols_info *info = to_m5mols(sd);
629 struct v4l2_mbus_framefmt *mf = &info->ffmt[M5MOLS_RESTYPE_CAPTURE];
630
631 if (pad != 0 || fd == NULL)
632 return -EINVAL;
633
634 fd->entry[0].flags = V4L2_MBUS_FRAME_DESC_FL_LEN_MAX;
635 fd->num_entries = 1;
636 fd->entry[0].length = clamp_t(u32, fd->entry[0].length,
637 mf->width * mf->height,
638 M5MOLS_MAIN_JPEG_SIZE_MAX);
639 mutex_lock(&info->lock);
640 info->cap.buf_size = fd->entry[0].length;
641 mutex_unlock(&info->lock);
642
643 return 0;
644}
645
646
602static int m5mols_enum_mbus_code(struct v4l2_subdev *sd, 647static int m5mols_enum_mbus_code(struct v4l2_subdev *sd,
603 struct v4l2_subdev_fh *fh, 648 struct v4l2_subdev_fh *fh,
604 struct v4l2_subdev_mbus_code_enum *code) 649 struct v4l2_subdev_mbus_code_enum *code)
@@ -615,6 +660,8 @@ static struct v4l2_subdev_pad_ops m5mols_pad_ops = {
615 .enum_mbus_code = m5mols_enum_mbus_code, 660 .enum_mbus_code = m5mols_enum_mbus_code,
616 .get_fmt = m5mols_get_fmt, 661 .get_fmt = m5mols_get_fmt,
617 .set_fmt = m5mols_set_fmt, 662 .set_fmt = m5mols_set_fmt,
663 .get_frame_desc = m5mols_get_frame_desc,
664 .set_frame_desc = m5mols_set_frame_desc,
618}; 665};
619 666
620/** 667/**
diff --git a/drivers/media/i2c/m5mols/m5mols_reg.h b/drivers/media/i2c/m5mols/m5mols_reg.h
index 14d4be72aeff..58d8027508df 100644
--- a/drivers/media/i2c/m5mols/m5mols_reg.h
+++ b/drivers/media/i2c/m5mols/m5mols_reg.h
@@ -310,6 +310,7 @@
310#define REG_JPEG 0x10 310#define REG_JPEG 0x10
311 311
312#define CAPP_MAIN_IMAGE_SIZE I2C_REG(CAT_CAPT_PARM, 0x01, 1) 312#define CAPP_MAIN_IMAGE_SIZE I2C_REG(CAT_CAPT_PARM, 0x01, 1)
313#define CAPP_JPEG_SIZE_MAX I2C_REG(CAT_CAPT_PARM, 0x0f, 4)
313#define CAPP_JPEG_RATIO I2C_REG(CAT_CAPT_PARM, 0x17, 1) 314#define CAPP_JPEG_RATIO I2C_REG(CAT_CAPT_PARM, 0x17, 1)
314 315
315#define CAPP_MCC_MODE I2C_REG(CAT_CAPT_PARM, 0x1d, 1) 316#define CAPP_MCC_MODE I2C_REG(CAT_CAPT_PARM, 0x1d, 1)
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 2c0f4077c491..e32833262d32 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -574,7 +574,6 @@ static int mt9p031_set_crop(struct v4l2_subdev *subdev,
574 * V4L2 subdev control operations 574 * V4L2 subdev control operations
575 */ 575 */
576 576
577#define V4L2_CID_TEST_PATTERN (V4L2_CID_USER_BASE | 0x1001)
578#define V4L2_CID_BLC_AUTO (V4L2_CID_USER_BASE | 0x1002) 577#define V4L2_CID_BLC_AUTO (V4L2_CID_USER_BASE | 0x1002)
579#define V4L2_CID_BLC_TARGET_LEVEL (V4L2_CID_USER_BASE | 0x1003) 578#define V4L2_CID_BLC_TARGET_LEVEL (V4L2_CID_USER_BASE | 0x1003)
580#define V4L2_CID_BLC_ANALOG_OFFSET (V4L2_CID_USER_BASE | 0x1004) 579#define V4L2_CID_BLC_ANALOG_OFFSET (V4L2_CID_USER_BASE | 0x1004)
@@ -740,18 +739,6 @@ static const char * const mt9p031_test_pattern_menu[] = {
740static const struct v4l2_ctrl_config mt9p031_ctrls[] = { 739static const struct v4l2_ctrl_config mt9p031_ctrls[] = {
741 { 740 {
742 .ops = &mt9p031_ctrl_ops, 741 .ops = &mt9p031_ctrl_ops,
743 .id = V4L2_CID_TEST_PATTERN,
744 .type = V4L2_CTRL_TYPE_MENU,
745 .name = "Test Pattern",
746 .min = 0,
747 .max = ARRAY_SIZE(mt9p031_test_pattern_menu) - 1,
748 .step = 0,
749 .def = 0,
750 .flags = 0,
751 .menu_skip_mask = 0,
752 .qmenu = mt9p031_test_pattern_menu,
753 }, {
754 .ops = &mt9p031_ctrl_ops,
755 .id = V4L2_CID_BLC_AUTO, 742 .id = V4L2_CID_BLC_AUTO,
756 .type = V4L2_CTRL_TYPE_BOOLEAN, 743 .type = V4L2_CTRL_TYPE_BOOLEAN,
757 .name = "BLC, Auto", 744 .name = "BLC, Auto",
@@ -950,7 +937,7 @@ static int mt9p031_probe(struct i2c_client *client,
950 mt9p031->model = did->driver_data; 937 mt9p031->model = did->driver_data;
951 mt9p031->reset = -1; 938 mt9p031->reset = -1;
952 939
953 v4l2_ctrl_handler_init(&mt9p031->ctrls, ARRAY_SIZE(mt9p031_ctrls) + 5); 940 v4l2_ctrl_handler_init(&mt9p031->ctrls, ARRAY_SIZE(mt9p031_ctrls) + 6);
954 941
955 v4l2_ctrl_new_std(&mt9p031->ctrls, &mt9p031_ctrl_ops, 942 v4l2_ctrl_new_std(&mt9p031->ctrls, &mt9p031_ctrl_ops,
956 V4L2_CID_EXPOSURE, MT9P031_SHUTTER_WIDTH_MIN, 943 V4L2_CID_EXPOSURE, MT9P031_SHUTTER_WIDTH_MIN,
@@ -966,6 +953,10 @@ static int mt9p031_probe(struct i2c_client *client,
966 v4l2_ctrl_new_std(&mt9p031->ctrls, &mt9p031_ctrl_ops, 953 v4l2_ctrl_new_std(&mt9p031->ctrls, &mt9p031_ctrl_ops,
967 V4L2_CID_PIXEL_RATE, pdata->target_freq, 954 V4L2_CID_PIXEL_RATE, pdata->target_freq,
968 pdata->target_freq, 1, pdata->target_freq); 955 pdata->target_freq, 1, pdata->target_freq);
956 v4l2_ctrl_new_std_menu_items(&mt9p031->ctrls, &mt9p031_ctrl_ops,
957 V4L2_CID_TEST_PATTERN,
958 ARRAY_SIZE(mt9p031_test_pattern_menu) - 1, 0,
959 0, mt9p031_test_pattern_menu);
969 960
970 for (i = 0; i < ARRAY_SIZE(mt9p031_ctrls); ++i) 961 for (i = 0; i < ARRAY_SIZE(mt9p031_ctrls); ++i)
971 v4l2_ctrl_new_custom(&mt9p031->ctrls, &mt9p031_ctrls[i], NULL); 962 v4l2_ctrl_new_custom(&mt9p031->ctrls, &mt9p031_ctrls[i], NULL);
diff --git a/drivers/media/i2c/mt9t001.c b/drivers/media/i2c/mt9t001.c
index 6d343adf891d..2e189d8b71bb 100644
--- a/drivers/media/i2c/mt9t001.c
+++ b/drivers/media/i2c/mt9t001.c
@@ -371,7 +371,7 @@ static int mt9t001_set_crop(struct v4l2_subdev *subdev,
371 * V4L2 subdev control operations 371 * V4L2 subdev control operations
372 */ 372 */
373 373
374#define V4L2_CID_TEST_PATTERN (V4L2_CID_USER_BASE | 0x1001) 374#define V4L2_CID_TEST_PATTERN_COLOR (V4L2_CID_USER_BASE | 0x1001)
375#define V4L2_CID_BLACK_LEVEL_AUTO (V4L2_CID_USER_BASE | 0x1002) 375#define V4L2_CID_BLACK_LEVEL_AUTO (V4L2_CID_USER_BASE | 0x1002)
376#define V4L2_CID_BLACK_LEVEL_OFFSET (V4L2_CID_USER_BASE | 0x1003) 376#define V4L2_CID_BLACK_LEVEL_OFFSET (V4L2_CID_USER_BASE | 0x1003)
377#define V4L2_CID_BLACK_LEVEL_CALIBRATE (V4L2_CID_USER_BASE | 0x1004) 377#define V4L2_CID_BLACK_LEVEL_CALIBRATE (V4L2_CID_USER_BASE | 0x1004)
@@ -487,12 +487,11 @@ static int mt9t001_s_ctrl(struct v4l2_ctrl *ctrl)
487 ctrl->val >> 16); 487 ctrl->val >> 16);
488 488
489 case V4L2_CID_TEST_PATTERN: 489 case V4L2_CID_TEST_PATTERN:
490 ret = mt9t001_set_output_control(mt9t001, 490 return mt9t001_set_output_control(mt9t001,
491 ctrl->val ? 0 : MT9T001_OUTPUT_CONTROL_TEST_DATA, 491 ctrl->val ? 0 : MT9T001_OUTPUT_CONTROL_TEST_DATA,
492 ctrl->val ? MT9T001_OUTPUT_CONTROL_TEST_DATA : 0); 492 ctrl->val ? MT9T001_OUTPUT_CONTROL_TEST_DATA : 0);
493 if (ret < 0)
494 return ret;
495 493
494 case V4L2_CID_TEST_PATTERN_COLOR:
496 return mt9t001_write(client, MT9T001_TEST_DATA, ctrl->val << 2); 495 return mt9t001_write(client, MT9T001_TEST_DATA, ctrl->val << 2);
497 496
498 case V4L2_CID_BLACK_LEVEL_AUTO: 497 case V4L2_CID_BLACK_LEVEL_AUTO:
@@ -533,12 +532,17 @@ static struct v4l2_ctrl_ops mt9t001_ctrl_ops = {
533 .s_ctrl = mt9t001_s_ctrl, 532 .s_ctrl = mt9t001_s_ctrl,
534}; 533};
535 534
535static const char * const mt9t001_test_pattern_menu[] = {
536 "Disabled",
537 "Enabled",
538};
539
536static const struct v4l2_ctrl_config mt9t001_ctrls[] = { 540static const struct v4l2_ctrl_config mt9t001_ctrls[] = {
537 { 541 {
538 .ops = &mt9t001_ctrl_ops, 542 .ops = &mt9t001_ctrl_ops,
539 .id = V4L2_CID_TEST_PATTERN, 543 .id = V4L2_CID_TEST_PATTERN_COLOR,
540 .type = V4L2_CTRL_TYPE_INTEGER, 544 .type = V4L2_CTRL_TYPE_INTEGER,
541 .name = "Test pattern", 545 .name = "Test Pattern Color",
542 .min = 0, 546 .min = 0,
543 .max = 1023, 547 .max = 1023,
544 .step = 1, 548 .step = 1,
@@ -741,7 +745,7 @@ static int mt9t001_probe(struct i2c_client *client,
741 return -ENOMEM; 745 return -ENOMEM;
742 746
743 v4l2_ctrl_handler_init(&mt9t001->ctrls, ARRAY_SIZE(mt9t001_ctrls) + 747 v4l2_ctrl_handler_init(&mt9t001->ctrls, ARRAY_SIZE(mt9t001_ctrls) +
744 ARRAY_SIZE(mt9t001_gains) + 3); 748 ARRAY_SIZE(mt9t001_gains) + 4);
745 749
746 v4l2_ctrl_new_std(&mt9t001->ctrls, &mt9t001_ctrl_ops, 750 v4l2_ctrl_new_std(&mt9t001->ctrls, &mt9t001_ctrl_ops,
747 V4L2_CID_EXPOSURE, MT9T001_SHUTTER_WIDTH_MIN, 751 V4L2_CID_EXPOSURE, MT9T001_SHUTTER_WIDTH_MIN,
@@ -752,6 +756,10 @@ static int mt9t001_probe(struct i2c_client *client,
752 v4l2_ctrl_new_std(&mt9t001->ctrls, &mt9t001_ctrl_ops, 756 v4l2_ctrl_new_std(&mt9t001->ctrls, &mt9t001_ctrl_ops,
753 V4L2_CID_PIXEL_RATE, pdata->ext_clk, pdata->ext_clk, 757 V4L2_CID_PIXEL_RATE, pdata->ext_clk, pdata->ext_clk,
754 1, pdata->ext_clk); 758 1, pdata->ext_clk);
759 v4l2_ctrl_new_std_menu_items(&mt9t001->ctrls, &mt9t001_ctrl_ops,
760 V4L2_CID_TEST_PATTERN,
761 ARRAY_SIZE(mt9t001_test_pattern_menu) - 1, 0,
762 0, mt9t001_test_pattern_menu);
755 763
756 for (i = 0; i < ARRAY_SIZE(mt9t001_ctrls); ++i) 764 for (i = 0; i < ARRAY_SIZE(mt9t001_ctrls); ++i)
757 v4l2_ctrl_new_custom(&mt9t001->ctrls, &mt9t001_ctrls[i], NULL); 765 v4l2_ctrl_new_custom(&mt9t001->ctrls, &mt9t001_ctrls[i], NULL);
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index e2177405dad2..3f356cb28256 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -141,6 +141,10 @@ struct mt9v032 {
141 u16 chip_control; 141 u16 chip_control;
142 u16 aec_agc; 142 u16 aec_agc;
143 u16 hblank; 143 u16 hblank;
144 struct {
145 struct v4l2_ctrl *test_pattern;
146 struct v4l2_ctrl *test_pattern_color;
147 };
144}; 148};
145 149
146static struct mt9v032 *to_mt9v032(struct v4l2_subdev *sd) 150static struct mt9v032 *to_mt9v032(struct v4l2_subdev *sd)
@@ -500,7 +504,7 @@ static int mt9v032_set_crop(struct v4l2_subdev *subdev,
500 * V4L2 subdev control operations 504 * V4L2 subdev control operations
501 */ 505 */
502 506
503#define V4L2_CID_TEST_PATTERN (V4L2_CID_USER_BASE | 0x1001) 507#define V4L2_CID_TEST_PATTERN_COLOR (V4L2_CID_USER_BASE | 0x1001)
504 508
505static int mt9v032_s_ctrl(struct v4l2_ctrl *ctrl) 509static int mt9v032_s_ctrl(struct v4l2_ctrl *ctrl)
506{ 510{
@@ -545,7 +549,7 @@ static int mt9v032_s_ctrl(struct v4l2_ctrl *ctrl)
545 break; 549 break;
546 550
547 case V4L2_CID_TEST_PATTERN: 551 case V4L2_CID_TEST_PATTERN:
548 switch (ctrl->val) { 552 switch (mt9v032->test_pattern->val) {
549 case 0: 553 case 0:
550 data = 0; 554 data = 0;
551 break; 555 break;
@@ -562,13 +566,13 @@ static int mt9v032_s_ctrl(struct v4l2_ctrl *ctrl)
562 | MT9V032_TEST_PATTERN_ENABLE; 566 | MT9V032_TEST_PATTERN_ENABLE;
563 break; 567 break;
564 default: 568 default:
565 data = (ctrl->val << MT9V032_TEST_PATTERN_DATA_SHIFT) 569 data = (mt9v032->test_pattern_color->val <<
570 MT9V032_TEST_PATTERN_DATA_SHIFT)
566 | MT9V032_TEST_PATTERN_USE_DATA 571 | MT9V032_TEST_PATTERN_USE_DATA
567 | MT9V032_TEST_PATTERN_ENABLE 572 | MT9V032_TEST_PATTERN_ENABLE
568 | MT9V032_TEST_PATTERN_FLIP; 573 | MT9V032_TEST_PATTERN_FLIP;
569 break; 574 break;
570 } 575 }
571
572 return mt9v032_write(client, MT9V032_TEST_PATTERN, data); 576 return mt9v032_write(client, MT9V032_TEST_PATTERN, data);
573 } 577 }
574 578
@@ -579,18 +583,24 @@ static struct v4l2_ctrl_ops mt9v032_ctrl_ops = {
579 .s_ctrl = mt9v032_s_ctrl, 583 .s_ctrl = mt9v032_s_ctrl,
580}; 584};
581 585
582static const struct v4l2_ctrl_config mt9v032_ctrls[] = { 586static const char * const mt9v032_test_pattern_menu[] = {
583 { 587 "Disabled",
584 .ops = &mt9v032_ctrl_ops, 588 "Gray Vertical Shade",
585 .id = V4L2_CID_TEST_PATTERN, 589 "Gray Horizontal Shade",
586 .type = V4L2_CTRL_TYPE_INTEGER, 590 "Gray Diagonal Shade",
587 .name = "Test pattern", 591 "Plain",
588 .min = 0, 592};
589 .max = 1023, 593
590 .step = 1, 594static const struct v4l2_ctrl_config mt9v032_test_pattern_color = {
591 .def = 0, 595 .ops = &mt9v032_ctrl_ops,
592 .flags = 0, 596 .id = V4L2_CID_TEST_PATTERN_COLOR,
593 } 597 .type = V4L2_CTRL_TYPE_INTEGER,
598 .name = "Test Pattern Color",
599 .min = 0,
600 .max = 1023,
601 .step = 1,
602 .def = 0,
603 .flags = 0,
594}; 604};
595 605
596/* ----------------------------------------------------------------------------- 606/* -----------------------------------------------------------------------------
@@ -741,7 +751,7 @@ static int mt9v032_probe(struct i2c_client *client,
741 mutex_init(&mt9v032->power_lock); 751 mutex_init(&mt9v032->power_lock);
742 mt9v032->pdata = pdata; 752 mt9v032->pdata = pdata;
743 753
744 v4l2_ctrl_handler_init(&mt9v032->ctrls, ARRAY_SIZE(mt9v032_ctrls) + 8); 754 v4l2_ctrl_handler_init(&mt9v032->ctrls, 10);
745 755
746 v4l2_ctrl_new_std(&mt9v032->ctrls, &mt9v032_ctrl_ops, 756 v4l2_ctrl_new_std(&mt9v032->ctrls, &mt9v032_ctrl_ops,
747 V4L2_CID_AUTOGAIN, 0, 1, 1, 1); 757 V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
@@ -763,6 +773,14 @@ static int mt9v032_probe(struct i2c_client *client,
763 V4L2_CID_VBLANK, MT9V032_VERTICAL_BLANKING_MIN, 773 V4L2_CID_VBLANK, MT9V032_VERTICAL_BLANKING_MIN,
764 MT9V032_VERTICAL_BLANKING_MAX, 1, 774 MT9V032_VERTICAL_BLANKING_MAX, 1,
765 MT9V032_VERTICAL_BLANKING_DEF); 775 MT9V032_VERTICAL_BLANKING_DEF);
776 mt9v032->test_pattern = v4l2_ctrl_new_std_menu_items(&mt9v032->ctrls,
777 &mt9v032_ctrl_ops, V4L2_CID_TEST_PATTERN,
778 ARRAY_SIZE(mt9v032_test_pattern_menu) - 1, 0, 0,
779 mt9v032_test_pattern_menu);
780 mt9v032->test_pattern_color = v4l2_ctrl_new_custom(&mt9v032->ctrls,
781 &mt9v032_test_pattern_color, NULL);
782
783 v4l2_ctrl_cluster(2, &mt9v032->test_pattern);
766 784
767 mt9v032->pixel_rate = 785 mt9v032->pixel_rate =
768 v4l2_ctrl_new_std(&mt9v032->ctrls, &mt9v032_ctrl_ops, 786 v4l2_ctrl_new_std(&mt9v032->ctrls, &mt9v032_ctrl_ops,
@@ -784,8 +802,6 @@ static int mt9v032_probe(struct i2c_client *client,
784 v4l2_ctrl_cluster(2, &mt9v032->link_freq); 802 v4l2_ctrl_cluster(2, &mt9v032->link_freq);
785 } 803 }
786 804
787 for (i = 0; i < ARRAY_SIZE(mt9v032_ctrls); ++i)
788 v4l2_ctrl_new_custom(&mt9v032->ctrls, &mt9v032_ctrls[i], NULL);
789 805
790 mt9v032->subdev.ctrl_handler = &mt9v032->ctrls; 806 mt9v032->subdev.ctrl_handler = &mt9v032->ctrls;
791 807
diff --git a/drivers/media/i2c/soc_camera/ov2640.c b/drivers/media/i2c/soc_camera/ov2640.c
index 78ac5744cb5d..d2d298b6354e 100644
--- a/drivers/media/i2c/soc_camera/ov2640.c
+++ b/drivers/media/i2c/soc_camera/ov2640.c
@@ -684,6 +684,11 @@ static int ov2640_s_ctrl(struct v4l2_ctrl *ctrl)
684 &container_of(ctrl->handler, struct ov2640_priv, hdl)->subdev; 684 &container_of(ctrl->handler, struct ov2640_priv, hdl)->subdev;
685 struct i2c_client *client = v4l2_get_subdevdata(sd); 685 struct i2c_client *client = v4l2_get_subdevdata(sd);
686 u8 val; 686 u8 val;
687 int ret;
688
689 ret = i2c_smbus_write_byte_data(client, BANK_SEL, BANK_SEL_SENS);
690 if (ret < 0)
691 return ret;
687 692
688 switch (ctrl->id) { 693 switch (ctrl->id) {
689 case V4L2_CID_VFLIP: 694 case V4L2_CID_VFLIP:
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index e5c0eedebc58..c31cc04fffd2 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -28,6 +28,18 @@
28#include <media/v4l2-subdev.h> 28#include <media/v4l2-subdev.h>
29#include <media/v4l2-chip-ident.h> 29#include <media/v4l2-chip-ident.h>
30 30
31#define THS7303_CHANNEL_1 1
32#define THS7303_CHANNEL_2 2
33#define THS7303_CHANNEL_3 3
34
35enum ths7303_filter_mode {
36 THS7303_FILTER_MODE_480I_576I,
37 THS7303_FILTER_MODE_480P_576P,
38 THS7303_FILTER_MODE_720P_1080I,
39 THS7303_FILTER_MODE_1080P,
40 THS7303_FILTER_MODE_DISABLE
41};
42
31MODULE_DESCRIPTION("TI THS7303 video amplifier driver"); 43MODULE_DESCRIPTION("TI THS7303 video amplifier driver");
32MODULE_AUTHOR("Chaithrika U S"); 44MODULE_AUTHOR("Chaithrika U S");
33MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
@@ -37,35 +49,96 @@ module_param(debug, int, 0644);
37MODULE_PARM_DESC(debug, "Debug level 0-1"); 49MODULE_PARM_DESC(debug, "Debug level 0-1");
38 50
39/* following function is used to set ths7303 */ 51/* following function is used to set ths7303 */
40static int ths7303_setvalue(struct v4l2_subdev *sd, v4l2_std_id std) 52int ths7303_setval(struct v4l2_subdev *sd, enum ths7303_filter_mode mode)
41{ 53{
54 u8 input_bias_chroma = 3;
55 u8 input_bias_luma = 3;
56 int disable = 0;
42 int err = 0; 57 int err = 0;
43 u8 val; 58 u8 val = 0;
44 struct i2c_client *client; 59 u8 temp;
45 60
46 client = v4l2_get_subdevdata(sd); 61 struct i2c_client *client = v4l2_get_subdevdata(sd);
47 62
48 if (std & (V4L2_STD_ALL & ~V4L2_STD_SECAM)) { 63 if (!client)
49 val = 0x02; 64 return -EINVAL;
50 v4l2_dbg(1, debug, sd, "setting value for SDTV format\n"); 65
51 } else { 66 switch (mode) {
52 val = 0x00; 67 case THS7303_FILTER_MODE_1080P:
53 v4l2_dbg(1, debug, sd, "disabling all channels\n"); 68 val = (3 << 6);
69 val |= (3 << 3);
70 break;
71 case THS7303_FILTER_MODE_720P_1080I:
72 val = (2 << 6);
73 val |= (2 << 3);
74 break;
75 case THS7303_FILTER_MODE_480P_576P:
76 val = (1 << 6);
77 val |= (1 << 3);
78 break;
79 case THS7303_FILTER_MODE_480I_576I:
80 break;
81 case THS7303_FILTER_MODE_DISABLE:
82 pr_info("mode disabled\n");
83 /* disable all channels */
84 disable = 1;
85 default:
86 /* disable all channels */
87 disable = 1;
54 } 88 }
89 /* Setup channel 2 - Luma - Green */
90 temp = val;
91 if (!disable)
92 val |= input_bias_luma;
93 err = i2c_smbus_write_byte_data(client, THS7303_CHANNEL_2, val);
94 if (err)
95 goto out;
55 96
56 err |= i2c_smbus_write_byte_data(client, 0x01, val); 97 /* setup two chroma channels */
57 err |= i2c_smbus_write_byte_data(client, 0x02, val); 98 if (!disable)
58 err |= i2c_smbus_write_byte_data(client, 0x03, val); 99 temp |= input_bias_chroma;
59 100
101 err = i2c_smbus_write_byte_data(client, THS7303_CHANNEL_1, temp);
60 if (err) 102 if (err)
61 v4l2_err(sd, "write failed\n"); 103 goto out;
62 104
105 err = i2c_smbus_write_byte_data(client, THS7303_CHANNEL_3, temp);
106 if (err)
107 goto out;
108 return err;
109out:
110 pr_info("write byte data failed\n");
63 return err; 111 return err;
64} 112}
65 113
66static int ths7303_s_std_output(struct v4l2_subdev *sd, v4l2_std_id norm) 114static int ths7303_s_std_output(struct v4l2_subdev *sd, v4l2_std_id norm)
67{ 115{
68 return ths7303_setvalue(sd, norm); 116 if (norm & (V4L2_STD_ALL & ~V4L2_STD_SECAM))
117 return ths7303_setval(sd, THS7303_FILTER_MODE_480I_576I);
118 else
119 return ths7303_setval(sd, THS7303_FILTER_MODE_DISABLE);
120}
121
122/* for setting filter for HD output */
123static int ths7303_s_dv_timings(struct v4l2_subdev *sd,
124 struct v4l2_dv_timings *dv_timings)
125{
126 u32 height = dv_timings->bt.height;
127 int interlaced = dv_timings->bt.interlaced;
128 int res = 0;
129
130 if (height == 1080 && !interlaced)
131 res = ths7303_setval(sd, THS7303_FILTER_MODE_1080P);
132 else if ((height == 720 && !interlaced) ||
133 (height == 1080 && interlaced))
134 res = ths7303_setval(sd, THS7303_FILTER_MODE_720P_1080I);
135 else if ((height == 480 || height == 576) && !interlaced)
136 res = ths7303_setval(sd, THS7303_FILTER_MODE_480P_576P);
137 else
138 /* disable all channels */
139 res = ths7303_setval(sd, THS7303_FILTER_MODE_DISABLE);
140
141 return res;
69} 142}
70 143
71static int ths7303_g_chip_ident(struct v4l2_subdev *sd, 144static int ths7303_g_chip_ident(struct v4l2_subdev *sd,
@@ -78,6 +151,7 @@ static int ths7303_g_chip_ident(struct v4l2_subdev *sd,
78 151
79static const struct v4l2_subdev_video_ops ths7303_video_ops = { 152static const struct v4l2_subdev_video_ops ths7303_video_ops = {
80 .s_std_output = ths7303_s_std_output, 153 .s_std_output = ths7303_s_std_output,
154 .s_dv_timings = ths7303_s_dv_timings,
81}; 155};
82 156
83static const struct v4l2_subdev_core_ops ths7303_core_ops = { 157static const struct v4l2_subdev_core_ops ths7303_core_ops = {
@@ -107,7 +181,7 @@ static int ths7303_probe(struct i2c_client *client,
107 181
108 v4l2_i2c_subdev_init(sd, client, &ths7303_ops); 182 v4l2_i2c_subdev_init(sd, client, &ths7303_ops);
109 183
110 return ths7303_setvalue(sd, std_id); 184 return ths7303_s_std_output(sd, std_id);
111} 185}
112 186
113static int ths7303_remove(struct i2c_client *client) 187static int ths7303_remove(struct i2c_client *client)
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index 1f3943bb87d5..d5e10215a28f 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -519,6 +519,12 @@ static int tvp514x_querystd(struct v4l2_subdev *sd, v4l2_std_id *std_id)
519 519
520 *std_id = V4L2_STD_UNKNOWN; 520 *std_id = V4L2_STD_UNKNOWN;
521 521
522 /* To query the standard the TVP514x must power on the ADCs. */
523 if (!decoder->streaming) {
524 tvp514x_s_stream(sd, 1);
525 msleep(LOCK_RETRY_DELAY);
526 }
527
522 /* query the current standard */ 528 /* query the current standard */
523 current_std = tvp514x_query_current_std(sd); 529 current_std = tvp514x_query_current_std(sd);
524 if (current_std == STD_INVALID) 530 if (current_std == STD_INVALID)
@@ -625,25 +631,12 @@ static int tvp514x_s_routing(struct v4l2_subdev *sd,
625 int err; 631 int err;
626 enum tvp514x_input input_sel; 632 enum tvp514x_input input_sel;
627 enum tvp514x_output output_sel; 633 enum tvp514x_output output_sel;
628 u8 sync_lock_status, lock_mask;
629 int try_count = LOCK_RETRY_COUNT;
630 634
631 if ((input >= INPUT_INVALID) || 635 if ((input >= INPUT_INVALID) ||
632 (output >= OUTPUT_INVALID)) 636 (output >= OUTPUT_INVALID))
633 /* Index out of bound */ 637 /* Index out of bound */
634 return -EINVAL; 638 return -EINVAL;
635 639
636 /*
637 * For the sequence streamon -> streamoff and again s_input
638 * it fails to lock the signal, since streamoff puts TVP514x
639 * into power off state which leads to failure in sub-sequent s_input.
640 *
641 * So power up the TVP514x device here, since it is important to lock
642 * the signal at this stage.
643 */
644 if (!decoder->streaming)
645 tvp514x_s_stream(sd, 1);
646
647 input_sel = input; 640 input_sel = input;
648 output_sel = output; 641 output_sel = output;
649 642
@@ -660,64 +653,6 @@ static int tvp514x_s_routing(struct v4l2_subdev *sd,
660 653
661 decoder->tvp514x_regs[REG_INPUT_SEL].val = input_sel; 654 decoder->tvp514x_regs[REG_INPUT_SEL].val = input_sel;
662 decoder->tvp514x_regs[REG_OUTPUT_FORMATTER1].val = output_sel; 655 decoder->tvp514x_regs[REG_OUTPUT_FORMATTER1].val = output_sel;
663
664 /* Clear status */
665 msleep(LOCK_RETRY_DELAY);
666 err =
667 tvp514x_write_reg(sd, REG_CLEAR_LOST_LOCK, 0x01);
668 if (err)
669 return err;
670
671 switch (input_sel) {
672 case INPUT_CVBS_VI1A:
673 case INPUT_CVBS_VI1B:
674 case INPUT_CVBS_VI1C:
675 case INPUT_CVBS_VI2A:
676 case INPUT_CVBS_VI2B:
677 case INPUT_CVBS_VI2C:
678 case INPUT_CVBS_VI3A:
679 case INPUT_CVBS_VI3B:
680 case INPUT_CVBS_VI3C:
681 case INPUT_CVBS_VI4A:
682 lock_mask = STATUS_CLR_SUBCAR_LOCK_BIT |
683 STATUS_HORZ_SYNC_LOCK_BIT |
684 STATUS_VIRT_SYNC_LOCK_BIT;
685 break;
686
687 case INPUT_SVIDEO_VI2A_VI1A:
688 case INPUT_SVIDEO_VI2B_VI1B:
689 case INPUT_SVIDEO_VI2C_VI1C:
690 case INPUT_SVIDEO_VI2A_VI3A:
691 case INPUT_SVIDEO_VI2B_VI3B:
692 case INPUT_SVIDEO_VI2C_VI3C:
693 case INPUT_SVIDEO_VI4A_VI1A:
694 case INPUT_SVIDEO_VI4A_VI1B:
695 case INPUT_SVIDEO_VI4A_VI1C:
696 case INPUT_SVIDEO_VI4A_VI3A:
697 case INPUT_SVIDEO_VI4A_VI3B:
698 case INPUT_SVIDEO_VI4A_VI3C:
699 lock_mask = STATUS_HORZ_SYNC_LOCK_BIT |
700 STATUS_VIRT_SYNC_LOCK_BIT;
701 break;
702 /* Need to add other interfaces*/
703 default:
704 return -EINVAL;
705 }
706
707 while (try_count-- > 0) {
708 /* Allow decoder to sync up with new input */
709 msleep(LOCK_RETRY_DELAY);
710
711 sync_lock_status = tvp514x_read_reg(sd,
712 REG_STATUS1);
713 if (lock_mask == (sync_lock_status & lock_mask))
714 /* Input detected */
715 break;
716 }
717
718 if (try_count < 0)
719 return -EINVAL;
720
721 decoder->input = input; 656 decoder->input = input;
722 decoder->output = output; 657 decoder->output = output;
723 658
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index b68918c97f66..56c6c77793d7 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -668,6 +668,12 @@ static const struct v4l2_queryctrl bttv_ctls[] = {
668 .default_value = 32768, 668 .default_value = 32768,
669 .type = V4L2_CTRL_TYPE_INTEGER, 669 .type = V4L2_CTRL_TYPE_INTEGER,
670 },{ 670 },{
671 .id = V4L2_CID_COLOR_KILLER,
672 .name = "Color killer",
673 .minimum = 0,
674 .maximum = 1,
675 .type = V4L2_CTRL_TYPE_BOOLEAN,
676 }, {
671 .id = V4L2_CID_HUE, 677 .id = V4L2_CID_HUE,
672 .name = "Hue", 678 .name = "Hue",
673 .minimum = 0, 679 .minimum = 0,
@@ -1474,6 +1480,9 @@ static int bttv_g_ctrl(struct file *file, void *priv,
1474 case V4L2_CID_SATURATION: 1480 case V4L2_CID_SATURATION:
1475 c->value = btv->saturation; 1481 c->value = btv->saturation;
1476 break; 1482 break;
1483 case V4L2_CID_COLOR_KILLER:
1484 c->value = btv->opt_color_killer;
1485 break;
1477 1486
1478 case V4L2_CID_AUDIO_MUTE: 1487 case V4L2_CID_AUDIO_MUTE:
1479 case V4L2_CID_AUDIO_VOLUME: 1488 case V4L2_CID_AUDIO_VOLUME:
@@ -1526,7 +1535,6 @@ static int bttv_s_ctrl(struct file *file, void *f,
1526 struct v4l2_control *c) 1535 struct v4l2_control *c)
1527{ 1536{
1528 int err; 1537 int err;
1529 int val;
1530 struct bttv_fh *fh = f; 1538 struct bttv_fh *fh = f;
1531 struct bttv *btv = fh->btv; 1539 struct bttv *btv = fh->btv;
1532 1540
@@ -1547,6 +1555,16 @@ static int bttv_s_ctrl(struct file *file, void *f,
1547 case V4L2_CID_SATURATION: 1555 case V4L2_CID_SATURATION:
1548 bt848_sat(btv, c->value); 1556 bt848_sat(btv, c->value);
1549 break; 1557 break;
1558 case V4L2_CID_COLOR_KILLER:
1559 btv->opt_color_killer = c->value;
1560 if (btv->opt_color_killer) {
1561 btor(BT848_SCLOOP_CKILL, BT848_E_SCLOOP);
1562 btor(BT848_SCLOOP_CKILL, BT848_O_SCLOOP);
1563 } else {
1564 btand(~BT848_SCLOOP_CKILL, BT848_E_SCLOOP);
1565 btand(~BT848_SCLOOP_CKILL, BT848_O_SCLOOP);
1566 }
1567 break;
1550 case V4L2_CID_AUDIO_MUTE: 1568 case V4L2_CID_AUDIO_MUTE:
1551 audio_mute(btv, c->value); 1569 audio_mute(btv, c->value);
1552 /* fall through */ 1570 /* fall through */
@@ -1564,9 +1582,13 @@ static int bttv_s_ctrl(struct file *file, void *f,
1564 1582
1565 case V4L2_CID_PRIVATE_CHROMA_AGC: 1583 case V4L2_CID_PRIVATE_CHROMA_AGC:
1566 btv->opt_chroma_agc = c->value; 1584 btv->opt_chroma_agc = c->value;
1567 val = btv->opt_chroma_agc ? BT848_SCLOOP_CAGC : 0; 1585 if (btv->opt_chroma_agc) {
1568 btwrite(val, BT848_E_SCLOOP); 1586 btor(BT848_SCLOOP_CAGC, BT848_E_SCLOOP);
1569 btwrite(val, BT848_O_SCLOOP); 1587 btor(BT848_SCLOOP_CAGC, BT848_O_SCLOOP);
1588 } else {
1589 btand(~BT848_SCLOOP_CAGC, BT848_E_SCLOOP);
1590 btand(~BT848_SCLOOP_CAGC, BT848_O_SCLOOP);
1591 }
1570 break; 1592 break;
1571 case V4L2_CID_PRIVATE_COMBFILTER: 1593 case V4L2_CID_PRIVATE_COMBFILTER:
1572 btv->opt_combfilter = c->value; 1594 btv->opt_combfilter = c->value;
diff --git a/drivers/media/pci/bt8xx/bttvp.h b/drivers/media/pci/bt8xx/bttvp.h
index 70fd4f23f605..9ec0adba236c 100644
--- a/drivers/media/pci/bt8xx/bttvp.h
+++ b/drivers/media/pci/bt8xx/bttvp.h
@@ -429,6 +429,7 @@ struct bttv {
429 int opt_lumafilter; 429 int opt_lumafilter;
430 int opt_automute; 430 int opt_automute;
431 int opt_chroma_agc; 431 int opt_chroma_agc;
432 int opt_color_killer;
432 int opt_adc_crush; 433 int opt_adc_crush;
433 int opt_vcr_hack; 434 int opt_vcr_hack;
434 int opt_whitecrush_upper; 435 int opt_whitecrush_upper;
diff --git a/drivers/media/pci/bt8xx/dst_ca.c b/drivers/media/pci/bt8xx/dst_ca.c
index ee3884fbc9ce..7d96fab7d246 100644
--- a/drivers/media/pci/bt8xx/dst_ca.c
+++ b/drivers/media/pci/bt8xx/dst_ca.c
@@ -646,7 +646,7 @@ static long dst_ca_ioctl(struct file *file, unsigned int cmd, unsigned long ioct
646 dprintk(verbose, DST_CA_INFO, 1, " -->CA_SET_PID Success !"); 646 dprintk(verbose, DST_CA_INFO, 1, " -->CA_SET_PID Success !");
647 default: 647 default:
648 result = -EOPNOTSUPP; 648 result = -EOPNOTSUPP;
649 }; 649 }
650 free_mem_and_exit: 650 free_mem_and_exit:
651 kfree (p_ca_message); 651 kfree (p_ca_message);
652 kfree (p_ca_slot_info); 652 kfree (p_ca_slot_info);
diff --git a/drivers/media/pci/cx23885/altera-ci.c b/drivers/media/pci/cx23885/altera-ci.c
index aee7f0dacff1..495781ee4711 100644
--- a/drivers/media/pci/cx23885/altera-ci.c
+++ b/drivers/media/pci/cx23885/altera-ci.c
@@ -416,7 +416,7 @@ static void netup_read_ci_status(struct work_struct *work)
416 DVB_CA_EN50221_POLL_CAM_READY : 0); 416 DVB_CA_EN50221_POLL_CAM_READY : 0);
417 ci_dbg_print("%s: setting CI[1] status = 0x%x\n", 417 ci_dbg_print("%s: setting CI[1] status = 0x%x\n",
418 __func__, inter->state[1]->status); 418 __func__, inter->state[1]->status);
419 }; 419 }
420 420
421 if (inter->state[0] != NULL) { 421 if (inter->state[0] != NULL) {
422 inter->state[0]->status = 422 inter->state[0]->status =
@@ -425,7 +425,7 @@ static void netup_read_ci_status(struct work_struct *work)
425 DVB_CA_EN50221_POLL_CAM_READY : 0); 425 DVB_CA_EN50221_POLL_CAM_READY : 0);
426 ci_dbg_print("%s: setting CI[0] status = 0x%x\n", 426 ci_dbg_print("%s: setting CI[0] status = 0x%x\n",
427 __func__, inter->state[0]->status); 427 __func__, inter->state[0]->status);
428 }; 428 }
429} 429}
430 430
431/* CI irq handler */ 431/* CI irq handler */
diff --git a/drivers/media/pci/cx23885/cimax2.c b/drivers/media/pci/cx23885/cimax2.c
index c9f15d6dec40..6617774a326a 100644
--- a/drivers/media/pci/cx23885/cimax2.c
+++ b/drivers/media/pci/cx23885/cimax2.c
@@ -193,7 +193,7 @@ int netup_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
193 0, &store, 1); 193 0, &store, 1);
194 if (ret != 0) 194 if (ret != 0)
195 return ret; 195 return ret;
196 }; 196 }
197 state->current_ci_flag = flag; 197 state->current_ci_flag = flag;
198 198
199 mutex_lock(&dev->gpio_lock); 199 mutex_lock(&dev->gpio_lock);
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 39a4a4b9ed7e..5acdf954ff6b 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -542,11 +542,13 @@ struct cx23885_board cx23885_boards[] = {
542 { 542 {
543 .type = CX23885_VMUX_COMPOSITE1, 543 .type = CX23885_VMUX_COMPOSITE1,
544 .vmux = CX25840_COMPOSITE8, 544 .vmux = CX25840_COMPOSITE8,
545 .amux = CX25840_AUDIO7,
545 }, 546 },
546 { 547 {
547 .type = CX23885_VMUX_SVIDEO, 548 .type = CX23885_VMUX_SVIDEO,
548 .vmux = CX25840_SVIDEO_LUMA3 | 549 .vmux = CX25840_SVIDEO_LUMA3 |
549 CX25840_SVIDEO_CHROMA4, 550 CX25840_SVIDEO_CHROMA4,
551 .amux = CX25840_AUDIO7,
550 }, 552 },
551 { 553 {
552 .type = CX23885_VMUX_COMPONENT, 554 .type = CX23885_VMUX_COMPONENT,
@@ -554,6 +556,7 @@ struct cx23885_board cx23885_boards[] = {
554 CX25840_VIN1_CH1 | 556 CX25840_VIN1_CH1 |
555 CX25840_VIN6_CH2 | 557 CX25840_VIN6_CH2 |
556 CX25840_VIN7_CH3, 558 CX25840_VIN7_CH3,
559 .amux = CX25840_AUDIO7,
557 }, 560 },
558 }, 561 },
559 }, 562 },
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 8c4a9a5f9a50..1a21926ca412 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -508,7 +508,8 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
508 (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1250) || 508 (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1250) ||
509 (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255) || 509 (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255) ||
510 (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255_22111) || 510 (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255_22111) ||
511 (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850)) { 511 (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850) ||
512 (dev->board == CX23885_BOARD_MYGICA_X8507)) {
512 /* Configure audio routing */ 513 /* Configure audio routing */
513 v4l2_subdev_call(dev->sd_cx25840, audio, s_routing, 514 v4l2_subdev_call(dev->sd_cx25840, audio, s_routing,
514 INPUT(input)->amux, 0, 0); 515 INPUT(input)->amux, 0, 0);
diff --git a/drivers/media/pci/cx25821/cx25821-video-upstream-ch2.c b/drivers/media/pci/cx25821/cx25821-video-upstream-ch2.c
index c8c94fbf5d8d..d33fc1a23030 100644
--- a/drivers/media/pci/cx25821/cx25821-video-upstream-ch2.c
+++ b/drivers/media/pci/cx25821/cx25821-video-upstream-ch2.c
@@ -761,7 +761,7 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
761 } 761 }
762 762
763 /* Default if filename is empty string */ 763 /* Default if filename is empty string */
764 if (strcmp(dev->input_filename_ch2, "") == 0) { 764 if (strcmp(dev->_filename_ch2, "") == 0) {
765 if (dev->_isNTSC_ch2) { 765 if (dev->_isNTSC_ch2) {
766 dev->_filename_ch2 = (dev->_pixel_format_ch2 == 766 dev->_filename_ch2 = (dev->_pixel_format_ch2 ==
767 PIXEL_FRMT_411) ? "/root/vid411.yuv" : 767 PIXEL_FRMT_411) ? "/root/vid411.yuv" :
diff --git a/drivers/media/pci/cx25821/cx25821-video-upstream.c b/drivers/media/pci/cx25821/cx25821-video-upstream.c
index 52c13e0b6492..6759fff8eb64 100644
--- a/drivers/media/pci/cx25821/cx25821-video-upstream.c
+++ b/drivers/media/pci/cx25821/cx25821-video-upstream.c
@@ -808,7 +808,7 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
808 } 808 }
809 809
810 /* Default if filename is empty string */ 810 /* Default if filename is empty string */
811 if (strcmp(dev->input_filename, "") == 0) { 811 if (strcmp(dev->_filename, "") == 0) {
812 if (dev->_isNTSC) { 812 if (dev->_isNTSC) {
813 dev->_filename = 813 dev->_filename =
814 (dev->_pixel_format == PIXEL_FRMT_411) ? 814 (dev->_pixel_format == PIXEL_FRMT_411) ?
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index def363fb71c0..62184eb919e5 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -721,7 +721,7 @@ static int vidioc_g_fmt_vid_cap (struct file *file, void *priv,
721 721
722 f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; 722 f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
723 f->fmt.pix.bytesperline = 0; 723 f->fmt.pix.bytesperline = 0;
724 f->fmt.pix.sizeimage = 188 * 4 * mpegbufs; /* 188 * 4 * 1024; */; 724 f->fmt.pix.sizeimage = 188 * 4 * mpegbufs; /* 188 * 4 * 1024; */
725 f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; 725 f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
726 f->fmt.pix.width = dev->width; 726 f->fmt.pix.width = dev->width;
727 f->fmt.pix.height = dev->height; 727 f->fmt.pix.height = dev->height;
@@ -739,7 +739,7 @@ static int vidioc_try_fmt_vid_cap (struct file *file, void *priv,
739 739
740 f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; 740 f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
741 f->fmt.pix.bytesperline = 0; 741 f->fmt.pix.bytesperline = 0;
742 f->fmt.pix.sizeimage = 188 * 4 * mpegbufs; /* 188 * 4 * 1024; */; 742 f->fmt.pix.sizeimage = 188 * 4 * mpegbufs; /* 188 * 4 * 1024; */
743 f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; 743 f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
744 dprintk(1, "VIDIOC_TRY_FMT: w: %d, h: %d, f: %d\n", 744 dprintk(1, "VIDIOC_TRY_FMT: w: %d, h: %d, f: %d\n",
745 dev->width, dev->height, fh->mpegq.field ); 745 dev->width, dev->height, fh->mpegq.field );
@@ -755,7 +755,7 @@ static int vidioc_s_fmt_vid_cap (struct file *file, void *priv,
755 755
756 f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; 756 f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
757 f->fmt.pix.bytesperline = 0; 757 f->fmt.pix.bytesperline = 0;
758 f->fmt.pix.sizeimage = 188 * 4 * mpegbufs; /* 188 * 4 * 1024; */; 758 f->fmt.pix.sizeimage = 188 * 4 * mpegbufs; /* 188 * 4 * 1024; */
759 f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; 759 f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
760 dev->width = f->fmt.pix.width; 760 dev->width = f->fmt.pix.width;
761 dev->height = f->fmt.pix.height; 761 dev->height = f->fmt.pix.height;
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index d803bba09525..666f83b2f3c0 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -896,7 +896,7 @@ static int samsung_smt_7020_set_voltage(struct dvb_frontend *fe,
896 break; 896 break;
897 default: 897 default:
898 return -EINVAL; 898 return -EINVAL;
899 }; 899 }
900 900
901 return (i2c_transfer(&dev->core->i2c_adap, &msg, 1) == 1) ? 0 : -EIO; 901 return (i2c_transfer(&dev->core->i2c_adap, &msg, 1) == 1) ? 0 : -EIO;
902} 902}
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index c04fb618e10b..d154bc197356 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -450,7 +450,7 @@ static irqreturn_t cx8802_irq(int irq, void *dev_id)
450 cx88_core_irq(core,status); 450 cx88_core_irq(core,status);
451 if (status & PCI_INT_TSINT) 451 if (status & PCI_INT_TSINT)
452 cx8802_mpeg_irq(dev); 452 cx8802_mpeg_irq(dev);
453 }; 453 }
454 if (MAX_IRQ_LOOP == loop) { 454 if (MAX_IRQ_LOOP == loop) {
455 dprintk( 0, "clearing mask\n" ); 455 dprintk( 0, "clearing mask\n" );
456 printk(KERN_WARNING "%s/0: irq loop -- clearing mask\n", 456 printk(KERN_WARNING "%s/0: irq loop -- clearing mask\n",
diff --git a/drivers/media/pci/cx88/cx88-tvaudio.c b/drivers/media/pci/cx88/cx88-tvaudio.c
index 770ec05b5e9b..424fd97495dc 100644
--- a/drivers/media/pci/cx88/cx88-tvaudio.c
+++ b/drivers/media/pci/cx88/cx88-tvaudio.c
@@ -373,7 +373,7 @@ static void set_audio_standard_NICAM(struct cx88_core *core, u32 mode)
373 set_audio_registers(core, nicam_bgdki_common); 373 set_audio_registers(core, nicam_bgdki_common);
374 set_audio_registers(core, nicam_default); 374 set_audio_registers(core, nicam_default);
375 break; 375 break;
376 }; 376 }
377 377
378 mode |= EN_DMTRX_LR | EN_DMTRX_BYPASS; 378 mode |= EN_DMTRX_LR | EN_DMTRX_BYPASS;
379 set_audio_finish(core, mode); 379 set_audio_finish(core, mode);
@@ -639,7 +639,7 @@ static void set_audio_standard_A2(struct cx88_core *core, u32 mode)
639 dprintk("%s Warning: wrong value\n", __func__); 639 dprintk("%s Warning: wrong value\n", __func__);
640 return; 640 return;
641 break; 641 break;
642 }; 642 }
643 643
644 mode |= EN_FMRADIO_EN_RDS | EN_DMTRX_SUMDIFF; 644 mode |= EN_FMRADIO_EN_RDS | EN_DMTRX_SUMDIFF;
645 set_audio_finish(core, mode); 645 set_audio_finish(core, mode);
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index a146d50d7795..05171457bf28 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1535,7 +1535,7 @@ static irqreturn_t cx8800_irq(int irq, void *dev_id)
1535 cx88_core_irq(core,status); 1535 cx88_core_irq(core,status);
1536 if (status & PCI_INT_VIDINT) 1536 if (status & PCI_INT_VIDINT)
1537 cx8800_vid_irq(dev); 1537 cx8800_vid_irq(dev);
1538 }; 1538 }
1539 if (10 == loop) { 1539 if (10 == loop) {
1540 printk(KERN_WARNING "%s/0: irq loop -- clearing mask\n", 1540 printk(KERN_WARNING "%s/0: irq loop -- clearing mask\n",
1541 core->name); 1541 core->name);
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 22f8758d047f..4a77124ee70e 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1204,7 +1204,7 @@ int saa7134_s_ctrl_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, str
1204 break; 1204 break;
1205 default: 1205 default:
1206 /* nothing */; 1206 /* nothing */;
1207 }; 1207 }
1208 switch (c->id) { 1208 switch (c->id) {
1209 case V4L2_CID_BRIGHTNESS: 1209 case V4L2_CID_BRIGHTNESS:
1210 dev->ctl_bright = c->value; 1210 dev->ctl_bright = c->value;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index f588d6296c76..181c7686e412 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -165,12 +165,12 @@ config VIDEO_SAMSUNG_S5P_JPEG
165 This is a v4l2 driver for Samsung S5P and EXYNOS4 JPEG codec 165 This is a v4l2 driver for Samsung S5P and EXYNOS4 JPEG codec
166 166
167config VIDEO_SAMSUNG_S5P_MFC 167config VIDEO_SAMSUNG_S5P_MFC
168 tristate "Samsung S5P MFC 5.1 Video Codec" 168 tristate "Samsung S5P MFC Video Codec"
169 depends on VIDEO_DEV && VIDEO_V4L2 && PLAT_S5P 169 depends on VIDEO_DEV && VIDEO_V4L2 && PLAT_S5P
170 select VIDEOBUF2_DMA_CONTIG 170 select VIDEOBUF2_DMA_CONTIG
171 default n 171 default n
172 help 172 help
173 MFC 5.1 driver for V4L2. 173 MFC 5.1 and 6.x driver for V4L2
174 174
175config VIDEO_MX2_EMMAPRP 175config VIDEO_MX2_EMMAPRP
176 tristate "MX2 eMMa-PrP support" 176 tristate "MX2 eMMa-PrP support"
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index c4a82a1a8a97..69d7a58c92c3 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -174,26 +174,6 @@ static int vpbe_get_current_mode_info(struct vpbe_device *vpbe_dev,
174 return 0; 174 return 0;
175} 175}
176 176
177static int vpbe_get_dv_preset_info(struct vpbe_device *vpbe_dev,
178 unsigned int dv_preset)
179{
180 struct vpbe_config *cfg = vpbe_dev->cfg;
181 struct vpbe_enc_mode_info var;
182 int curr_output = vpbe_dev->current_out_index;
183 int i;
184
185 for (i = 0; i < vpbe_dev->cfg->outputs[curr_output].num_modes; i++) {
186 var = cfg->outputs[curr_output].modes[i];
187 if ((var.timings_type & VPBE_ENC_DV_PRESET) &&
188 (var.timings.dv_preset == dv_preset)) {
189 vpbe_dev->current_timings = var;
190 return 0;
191 }
192 }
193
194 return -EINVAL;
195}
196
197/* Get std by std id */ 177/* Get std by std id */
198static int vpbe_get_std_info(struct vpbe_device *vpbe_dev, 178static int vpbe_get_std_info(struct vpbe_device *vpbe_dev,
199 v4l2_std_id std_id) 179 v4l2_std_id std_id)
@@ -206,7 +186,7 @@ static int vpbe_get_std_info(struct vpbe_device *vpbe_dev,
206 for (i = 0; i < vpbe_dev->cfg->outputs[curr_output].num_modes; i++) { 186 for (i = 0; i < vpbe_dev->cfg->outputs[curr_output].num_modes; i++) {
207 var = cfg->outputs[curr_output].modes[i]; 187 var = cfg->outputs[curr_output].modes[i];
208 if ((var.timings_type & VPBE_ENC_STD) && 188 if ((var.timings_type & VPBE_ENC_STD) &&
209 (var.timings.std_id & std_id)) { 189 (var.std_id & std_id)) {
210 vpbe_dev->current_timings = var; 190 vpbe_dev->current_timings = var;
211 return 0; 191 return 0;
212 } 192 }
@@ -344,38 +324,42 @@ static unsigned int vpbe_get_output(struct vpbe_device *vpbe_dev)
344} 324}
345 325
346/** 326/**
347 * vpbe_s_dv_preset - Set the given preset timings in the encoder 327 * vpbe_s_dv_timings - Set the given preset timings in the encoder
348 * 328 *
349 * Sets the preset if supported by the current encoder. Return the status. 329 * Sets the timings if supported by the current encoder. Return the status.
350 * 0 - success & -EINVAL on error 330 * 0 - success & -EINVAL on error
351 */ 331 */
352static int vpbe_s_dv_preset(struct vpbe_device *vpbe_dev, 332static int vpbe_s_dv_timings(struct vpbe_device *vpbe_dev,
353 struct v4l2_dv_preset *dv_preset) 333 struct v4l2_dv_timings *dv_timings)
354{ 334{
355 struct vpbe_config *cfg = vpbe_dev->cfg; 335 struct vpbe_config *cfg = vpbe_dev->cfg;
356 int out_index = vpbe_dev->current_out_index; 336 int out_index = vpbe_dev->current_out_index;
337 struct vpbe_output *output = &cfg->outputs[out_index];
357 int sd_index = vpbe_dev->current_sd_index; 338 int sd_index = vpbe_dev->current_sd_index;
358 int ret; 339 int ret, i;
359 340
360 341
361 if (!(cfg->outputs[out_index].output.capabilities & 342 if (!(cfg->outputs[out_index].output.capabilities &
362 V4L2_OUT_CAP_PRESETS)) 343 V4L2_OUT_CAP_DV_TIMINGS))
363 return -EINVAL; 344 return -EINVAL;
364 345
365 ret = vpbe_get_dv_preset_info(vpbe_dev, dv_preset->preset); 346 for (i = 0; i < output->num_modes; i++) {
366 347 if (output->modes[i].timings_type == VPBE_ENC_CUSTOM_TIMINGS &&
367 if (ret) 348 !memcmp(&output->modes[i].dv_timings,
368 return ret; 349 dv_timings, sizeof(*dv_timings)))
369 350 break;
351 }
352 if (i >= output->num_modes)
353 return -EINVAL;
354 vpbe_dev->current_timings = output->modes[i];
370 mutex_lock(&vpbe_dev->lock); 355 mutex_lock(&vpbe_dev->lock);
371 356
372
373 ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video, 357 ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
374 s_dv_preset, dv_preset); 358 s_dv_timings, dv_timings);
375 if (!ret && (vpbe_dev->amp != NULL)) { 359 if (!ret && (vpbe_dev->amp != NULL)) {
376 /* Call amplifier subdevice */ 360 /* Call amplifier subdevice */
377 ret = v4l2_subdev_call(vpbe_dev->amp, video, 361 ret = v4l2_subdev_call(vpbe_dev->amp, video,
378 s_dv_preset, dv_preset); 362 s_dv_timings, dv_timings);
379 } 363 }
380 /* set the lcd controller output for the given mode */ 364 /* set the lcd controller output for the given mode */
381 if (!ret) { 365 if (!ret) {
@@ -392,17 +376,17 @@ static int vpbe_s_dv_preset(struct vpbe_device *vpbe_dev,
392} 376}
393 377
394/** 378/**
395 * vpbe_g_dv_preset - Get the preset in the current encoder 379 * vpbe_g_dv_timings - Get the timings in the current encoder
396 * 380 *
397 * Get the preset in the current encoder. Return the status. 0 - success 381 * Get the timings in the current encoder. Return the status. 0 - success
398 * -EINVAL on error 382 * -EINVAL on error
399 */ 383 */
400static int vpbe_g_dv_preset(struct vpbe_device *vpbe_dev, 384static int vpbe_g_dv_timings(struct vpbe_device *vpbe_dev,
401 struct v4l2_dv_preset *dv_preset) 385 struct v4l2_dv_timings *dv_timings)
402{ 386{
403 if (vpbe_dev->current_timings.timings_type & 387 if (vpbe_dev->current_timings.timings_type &
404 VPBE_ENC_DV_PRESET) { 388 VPBE_ENC_CUSTOM_TIMINGS) {
405 dv_preset->preset = vpbe_dev->current_timings.timings.dv_preset; 389 *dv_timings = vpbe_dev->current_timings.dv_timings;
406 return 0; 390 return 0;
407 } 391 }
408 392
@@ -410,13 +394,13 @@ static int vpbe_g_dv_preset(struct vpbe_device *vpbe_dev,
410} 394}
411 395
412/** 396/**
413 * vpbe_enum_dv_presets - Enumerate the dv presets in the current encoder 397 * vpbe_enum_dv_timings - Enumerate the dv timings in the current encoder
414 * 398 *
415 * Get the preset in the current encoder. Return the status. 0 - success 399 * Get the timings in the current encoder. Return the status. 0 - success
416 * -EINVAL on error 400 * -EINVAL on error
417 */ 401 */
418static int vpbe_enum_dv_presets(struct vpbe_device *vpbe_dev, 402static int vpbe_enum_dv_timings(struct vpbe_device *vpbe_dev,
419 struct v4l2_dv_enum_preset *preset_info) 403 struct v4l2_enum_dv_timings *timings)
420{ 404{
421 struct vpbe_config *cfg = vpbe_dev->cfg; 405 struct vpbe_config *cfg = vpbe_dev->cfg;
422 int out_index = vpbe_dev->current_out_index; 406 int out_index = vpbe_dev->current_out_index;
@@ -424,12 +408,12 @@ static int vpbe_enum_dv_presets(struct vpbe_device *vpbe_dev,
424 int j = 0; 408 int j = 0;
425 int i; 409 int i;
426 410
427 if (!(output->output.capabilities & V4L2_OUT_CAP_PRESETS)) 411 if (!(output->output.capabilities & V4L2_OUT_CAP_DV_TIMINGS))
428 return -EINVAL; 412 return -EINVAL;
429 413
430 for (i = 0; i < output->num_modes; i++) { 414 for (i = 0; i < output->num_modes; i++) {
431 if (output->modes[i].timings_type == VPBE_ENC_DV_PRESET) { 415 if (output->modes[i].timings_type == VPBE_ENC_CUSTOM_TIMINGS) {
432 if (j == preset_info->index) 416 if (j == timings->index)
433 break; 417 break;
434 j++; 418 j++;
435 } 419 }
@@ -437,9 +421,8 @@ static int vpbe_enum_dv_presets(struct vpbe_device *vpbe_dev,
437 421
438 if (i == output->num_modes) 422 if (i == output->num_modes)
439 return -EINVAL; 423 return -EINVAL;
440 424 timings->timings = output->modes[i].dv_timings;
441 return v4l_fill_dv_preset_info(output->modes[i].timings.dv_preset, 425 return 0;
442 preset_info);
443} 426}
444 427
445/** 428/**
@@ -489,10 +472,10 @@ static int vpbe_s_std(struct vpbe_device *vpbe_dev, v4l2_std_id *std_id)
489 */ 472 */
490static int vpbe_g_std(struct vpbe_device *vpbe_dev, v4l2_std_id *std_id) 473static int vpbe_g_std(struct vpbe_device *vpbe_dev, v4l2_std_id *std_id)
491{ 474{
492 struct vpbe_enc_mode_info cur_timings = vpbe_dev->current_timings; 475 struct vpbe_enc_mode_info *cur_timings = &vpbe_dev->current_timings;
493 476
494 if (cur_timings.timings_type & VPBE_ENC_STD) { 477 if (cur_timings->timings_type & VPBE_ENC_STD) {
495 *std_id = cur_timings.timings.std_id; 478 *std_id = cur_timings->std_id;
496 return 0; 479 return 0;
497 } 480 }
498 481
@@ -511,7 +494,7 @@ static int vpbe_set_mode(struct vpbe_device *vpbe_dev,
511{ 494{
512 struct vpbe_enc_mode_info *preset_mode = NULL; 495 struct vpbe_enc_mode_info *preset_mode = NULL;
513 struct vpbe_config *cfg = vpbe_dev->cfg; 496 struct vpbe_config *cfg = vpbe_dev->cfg;
514 struct v4l2_dv_preset dv_preset; 497 struct v4l2_dv_timings dv_timings;
515 struct osd_state *osd_device; 498 struct osd_state *osd_device;
516 int out_index = vpbe_dev->current_out_index; 499 int out_index = vpbe_dev->current_out_index;
517 int ret = 0; 500 int ret = 0;
@@ -530,11 +513,12 @@ static int vpbe_set_mode(struct vpbe_device *vpbe_dev,
530 */ 513 */
531 if (preset_mode->timings_type & VPBE_ENC_STD) 514 if (preset_mode->timings_type & VPBE_ENC_STD)
532 return vpbe_s_std(vpbe_dev, 515 return vpbe_s_std(vpbe_dev,
533 &preset_mode->timings.std_id); 516 &preset_mode->std_id);
534 if (preset_mode->timings_type & VPBE_ENC_DV_PRESET) { 517 if (preset_mode->timings_type &
535 dv_preset.preset = 518 VPBE_ENC_CUSTOM_TIMINGS) {
536 preset_mode->timings.dv_preset; 519 dv_timings =
537 return vpbe_s_dv_preset(vpbe_dev, &dv_preset); 520 preset_mode->dv_timings;
521 return vpbe_s_dv_timings(vpbe_dev, &dv_timings);
538 } 522 }
539 } 523 }
540 } 524 }
@@ -626,11 +610,11 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
626 vpbe_dev->dac_clk = clk_get(vpbe_dev->pdev, "vpss_dac"); 610 vpbe_dev->dac_clk = clk_get(vpbe_dev->pdev, "vpss_dac");
627 if (IS_ERR(vpbe_dev->dac_clk)) { 611 if (IS_ERR(vpbe_dev->dac_clk)) {
628 ret = PTR_ERR(vpbe_dev->dac_clk); 612 ret = PTR_ERR(vpbe_dev->dac_clk);
629 goto vpbe_unlock; 613 goto fail_mutex_unlock;
630 } 614 }
631 if (clk_enable(vpbe_dev->dac_clk)) { 615 if (clk_enable(vpbe_dev->dac_clk)) {
632 ret = -ENODEV; 616 ret = -ENODEV;
633 goto vpbe_unlock; 617 goto fail_mutex_unlock;
634 } 618 }
635 } 619 }
636 620
@@ -642,7 +626,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
642 if (ret) { 626 if (ret) {
643 v4l2_err(dev->driver, 627 v4l2_err(dev->driver,
644 "Unable to register v4l2 device.\n"); 628 "Unable to register v4l2 device.\n");
645 goto vpbe_fail_clock; 629 goto fail_clk_put;
646 } 630 }
647 v4l2_info(&vpbe_dev->v4l2_dev, "vpbe v4l2 device registered\n"); 631 v4l2_info(&vpbe_dev->v4l2_dev, "vpbe v4l2 device registered\n");
648 632
@@ -658,7 +642,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
658 v4l2_err(&vpbe_dev->v4l2_dev, 642 v4l2_err(&vpbe_dev->v4l2_dev,
659 "vpbe unable to init venc sub device\n"); 643 "vpbe unable to init venc sub device\n");
660 ret = -ENODEV; 644 ret = -ENODEV;
661 goto vpbe_fail_v4l2_device; 645 goto fail_dev_unregister;
662 } 646 }
663 /* initialize osd device */ 647 /* initialize osd device */
664 osd_device = vpbe_dev->osd_device; 648 osd_device = vpbe_dev->osd_device;
@@ -669,7 +653,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
669 v4l2_err(&vpbe_dev->v4l2_dev, 653 v4l2_err(&vpbe_dev->v4l2_dev,
670 "unable to initialize the OSD device"); 654 "unable to initialize the OSD device");
671 err = -ENOMEM; 655 err = -ENOMEM;
672 goto vpbe_fail_v4l2_device; 656 goto fail_dev_unregister;
673 } 657 }
674 } 658 }
675 659
@@ -685,7 +669,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
685 v4l2_err(&vpbe_dev->v4l2_dev, 669 v4l2_err(&vpbe_dev->v4l2_dev,
686 "unable to allocate memory for encoders sub devices"); 670 "unable to allocate memory for encoders sub devices");
687 ret = -ENOMEM; 671 ret = -ENOMEM;
688 goto vpbe_fail_v4l2_device; 672 goto fail_dev_unregister;
689 } 673 }
690 674
691 i2c_adap = i2c_get_adapter(vpbe_dev->cfg->i2c_adapter_id); 675 i2c_adap = i2c_get_adapter(vpbe_dev->cfg->i2c_adapter_id);
@@ -711,7 +695,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
711 " failed to register", 695 " failed to register",
712 enc_info->module_name); 696 enc_info->module_name);
713 ret = -ENODEV; 697 ret = -ENODEV;
714 goto vpbe_fail_sd_register; 698 goto fail_kfree_encoders;
715 } 699 }
716 } else 700 } else
717 v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c encoders" 701 v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c encoders"
@@ -730,7 +714,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
730 "amplifier %s failed to register", 714 "amplifier %s failed to register",
731 amp_info->module_name); 715 amp_info->module_name);
732 ret = -ENODEV; 716 ret = -ENODEV;
733 goto vpbe_fail_amp_register; 717 goto fail_kfree_encoders;
734 } 718 }
735 v4l2_info(&vpbe_dev->v4l2_dev, 719 v4l2_info(&vpbe_dev->v4l2_dev,
736 "v4l2 sub device %s registered\n", 720 "v4l2 sub device %s registered\n",
@@ -770,16 +754,14 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
770 /* TBD handling of bootargs for default output and mode */ 754 /* TBD handling of bootargs for default output and mode */
771 return 0; 755 return 0;
772 756
773vpbe_fail_amp_register: 757fail_kfree_encoders:
774 kfree(vpbe_dev->amp);
775vpbe_fail_sd_register:
776 kfree(vpbe_dev->encoders); 758 kfree(vpbe_dev->encoders);
777vpbe_fail_v4l2_device: 759fail_dev_unregister:
778 v4l2_device_unregister(&vpbe_dev->v4l2_dev); 760 v4l2_device_unregister(&vpbe_dev->v4l2_dev);
779vpbe_fail_clock: 761fail_clk_put:
780 if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) 762 if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0)
781 clk_put(vpbe_dev->dac_clk); 763 clk_put(vpbe_dev->dac_clk);
782vpbe_unlock: 764fail_mutex_unlock:
783 mutex_unlock(&vpbe_dev->lock); 765 mutex_unlock(&vpbe_dev->lock);
784 return ret; 766 return ret;
785} 767}
@@ -810,9 +792,9 @@ static struct vpbe_device_ops vpbe_dev_ops = {
810 .enum_outputs = vpbe_enum_outputs, 792 .enum_outputs = vpbe_enum_outputs,
811 .set_output = vpbe_set_output, 793 .set_output = vpbe_set_output,
812 .get_output = vpbe_get_output, 794 .get_output = vpbe_get_output,
813 .s_dv_preset = vpbe_s_dv_preset, 795 .s_dv_timings = vpbe_s_dv_timings,
814 .g_dv_preset = vpbe_g_dv_preset, 796 .g_dv_timings = vpbe_g_dv_timings,
815 .enum_dv_presets = vpbe_enum_dv_presets, 797 .enum_dv_timings = vpbe_enum_dv_timings,
816 .s_std = vpbe_s_std, 798 .s_std = vpbe_s_std,
817 .g_std = vpbe_g_std, 799 .g_std = vpbe_g_std,
818 .initialize = vpbe_initialize, 800 .initialize = vpbe_initialize,
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index 239f37bfa313..161c77650e2f 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -393,7 +393,7 @@ vpbe_disp_calculate_scale_factor(struct vpbe_display *disp_dev,
393 int h_scale; 393 int h_scale;
394 int v_scale; 394 int v_scale;
395 395
396 v4l2_std_id standard_id = vpbe_dev->current_timings.timings.std_id; 396 v4l2_std_id standard_id = vpbe_dev->current_timings.std_id;
397 397
398 /* 398 /*
399 * Application initially set the image format. Current display 399 * Application initially set the image format. Current display
@@ -637,7 +637,7 @@ static int vpbe_display_s_crop(struct file *file, void *priv,
637 struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev; 637 struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
638 struct osd_layer_config *cfg = &layer->layer_info.config; 638 struct osd_layer_config *cfg = &layer->layer_info.config;
639 struct osd_state *osd_device = disp_dev->osd_device; 639 struct osd_state *osd_device = disp_dev->osd_device;
640 struct v4l2_rect *rect = &crop->c; 640 struct v4l2_rect rect = crop->c;
641 int ret; 641 int ret;
642 642
643 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, 643 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
@@ -648,21 +648,21 @@ static int vpbe_display_s_crop(struct file *file, void *priv,
648 return -EINVAL; 648 return -EINVAL;
649 } 649 }
650 650
651 if (rect->top < 0) 651 if (rect.top < 0)
652 rect->top = 0; 652 rect.top = 0;
653 if (rect->left < 0) 653 if (rect.left < 0)
654 rect->left = 0; 654 rect.left = 0;
655 655
656 vpbe_disp_check_window_params(disp_dev, rect); 656 vpbe_disp_check_window_params(disp_dev, &rect);
657 657
658 osd_device->ops.get_layer_config(osd_device, 658 osd_device->ops.get_layer_config(osd_device,
659 layer->layer_info.id, cfg); 659 layer->layer_info.id, cfg);
660 660
661 vpbe_disp_calculate_scale_factor(disp_dev, layer, 661 vpbe_disp_calculate_scale_factor(disp_dev, layer,
662 rect->width, 662 rect.width,
663 rect->height); 663 rect.height);
664 vpbe_disp_adj_position(disp_dev, layer, rect->top, 664 vpbe_disp_adj_position(disp_dev, layer, rect.top,
665 rect->left); 665 rect.left);
666 ret = osd_device->ops.set_layer_config(osd_device, 666 ret = osd_device->ops.set_layer_config(osd_device,
667 layer->layer_info.id, cfg); 667 layer->layer_info.id, cfg);
668 if (ret < 0) { 668 if (ret < 0) {
@@ -943,7 +943,7 @@ static int vpbe_display_g_std(struct file *file, void *priv,
943 943
944 /* Get the standard from the current encoder */ 944 /* Get the standard from the current encoder */
945 if (vpbe_dev->current_timings.timings_type & VPBE_ENC_STD) { 945 if (vpbe_dev->current_timings.timings_type & VPBE_ENC_STD) {
946 *std_id = vpbe_dev->current_timings.timings.std_id; 946 *std_id = vpbe_dev->current_timings.std_id;
947 return 0; 947 return 0;
948 } 948 }
949 949
@@ -1029,29 +1029,29 @@ static int vpbe_display_g_output(struct file *file, void *priv,
1029} 1029}
1030 1030
1031/** 1031/**
1032 * vpbe_display_enum_dv_presets - Enumerate the dv presets 1032 * vpbe_display_enum_dv_timings - Enumerate the dv timings
1033 * 1033 *
1034 * enum the preset in the current encoder. Return the status. 0 - success 1034 * enum the timings in the current encoder. Return the status. 0 - success
1035 * -EINVAL on error 1035 * -EINVAL on error
1036 */ 1036 */
1037static int 1037static int
1038vpbe_display_enum_dv_presets(struct file *file, void *priv, 1038vpbe_display_enum_dv_timings(struct file *file, void *priv,
1039 struct v4l2_dv_enum_preset *preset) 1039 struct v4l2_enum_dv_timings *timings)
1040{ 1040{
1041 struct vpbe_fh *fh = priv; 1041 struct vpbe_fh *fh = priv;
1042 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev; 1042 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1043 int ret; 1043 int ret;
1044 1044
1045 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_DV_PRESETS\n"); 1045 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_DV_TIMINGS\n");
1046 1046
1047 /* Enumerate outputs */ 1047 /* Enumerate outputs */
1048 if (NULL == vpbe_dev->ops.enum_dv_presets) 1048 if (NULL == vpbe_dev->ops.enum_dv_timings)
1049 return -EINVAL; 1049 return -EINVAL;
1050 1050
1051 ret = vpbe_dev->ops.enum_dv_presets(vpbe_dev, preset); 1051 ret = vpbe_dev->ops.enum_dv_timings(vpbe_dev, timings);
1052 if (ret) { 1052 if (ret) {
1053 v4l2_err(&vpbe_dev->v4l2_dev, 1053 v4l2_err(&vpbe_dev->v4l2_dev,
1054 "Failed to enumerate dv presets info\n"); 1054 "Failed to enumerate dv timings info\n");
1055 return -EINVAL; 1055 return -EINVAL;
1056 } 1056 }
1057 1057
@@ -1059,21 +1059,21 @@ vpbe_display_enum_dv_presets(struct file *file, void *priv,
1059} 1059}
1060 1060
1061/** 1061/**
1062 * vpbe_display_s_dv_preset - Set the dv presets 1062 * vpbe_display_s_dv_timings - Set the dv timings
1063 * 1063 *
1064 * Set the preset in the current encoder. Return the status. 0 - success 1064 * Set the timings in the current encoder. Return the status. 0 - success
1065 * -EINVAL on error 1065 * -EINVAL on error
1066 */ 1066 */
1067static int 1067static int
1068vpbe_display_s_dv_preset(struct file *file, void *priv, 1068vpbe_display_s_dv_timings(struct file *file, void *priv,
1069 struct v4l2_dv_preset *preset) 1069 struct v4l2_dv_timings *timings)
1070{ 1070{
1071 struct vpbe_fh *fh = priv; 1071 struct vpbe_fh *fh = priv;
1072 struct vpbe_layer *layer = fh->layer; 1072 struct vpbe_layer *layer = fh->layer;
1073 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev; 1073 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1074 int ret; 1074 int ret;
1075 1075
1076 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_DV_PRESETS\n"); 1076 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_DV_TIMINGS\n");
1077 1077
1078 1078
1079 /* If streaming is started, return error */ 1079 /* If streaming is started, return error */
@@ -1083,13 +1083,13 @@ vpbe_display_s_dv_preset(struct file *file, void *priv,
1083 } 1083 }
1084 1084
1085 /* Set the given standard in the encoder */ 1085 /* Set the given standard in the encoder */
1086 if (!vpbe_dev->ops.s_dv_preset) 1086 if (!vpbe_dev->ops.s_dv_timings)
1087 return -EINVAL; 1087 return -EINVAL;
1088 1088
1089 ret = vpbe_dev->ops.s_dv_preset(vpbe_dev, preset); 1089 ret = vpbe_dev->ops.s_dv_timings(vpbe_dev, timings);
1090 if (ret) { 1090 if (ret) {
1091 v4l2_err(&vpbe_dev->v4l2_dev, 1091 v4l2_err(&vpbe_dev->v4l2_dev,
1092 "Failed to set the dv presets info\n"); 1092 "Failed to set the dv timings info\n");
1093 return -EINVAL; 1093 return -EINVAL;
1094 } 1094 }
1095 /* set the current norm to zero to be consistent. If STD is used 1095 /* set the current norm to zero to be consistent. If STD is used
@@ -1101,26 +1101,25 @@ vpbe_display_s_dv_preset(struct file *file, void *priv,
1101} 1101}
1102 1102
1103/** 1103/**
1104 * vpbe_display_g_dv_preset - Set the dv presets 1104 * vpbe_display_g_dv_timings - Set the dv timings
1105 * 1105 *
1106 * Get the preset in the current encoder. Return the status. 0 - success 1106 * Get the timings in the current encoder. Return the status. 0 - success
1107 * -EINVAL on error 1107 * -EINVAL on error
1108 */ 1108 */
1109static int 1109static int
1110vpbe_display_g_dv_preset(struct file *file, void *priv, 1110vpbe_display_g_dv_timings(struct file *file, void *priv,
1111 struct v4l2_dv_preset *dv_preset) 1111 struct v4l2_dv_timings *dv_timings)
1112{ 1112{
1113 struct vpbe_fh *fh = priv; 1113 struct vpbe_fh *fh = priv;
1114 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev; 1114 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1115 1115
1116 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_DV_PRESETS\n"); 1116 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_DV_TIMINGS\n");
1117 1117
1118 /* Get the given standard in the encoder */ 1118 /* Get the given standard in the encoder */
1119 1119
1120 if (vpbe_dev->current_timings.timings_type & 1120 if (vpbe_dev->current_timings.timings_type &
1121 VPBE_ENC_DV_PRESET) { 1121 VPBE_ENC_CUSTOM_TIMINGS) {
1122 dv_preset->preset = 1122 *dv_timings = vpbe_dev->current_timings.dv_timings;
1123 vpbe_dev->current_timings.timings.dv_preset;
1124 } else { 1123 } else {
1125 return -EINVAL; 1124 return -EINVAL;
1126 } 1125 }
@@ -1572,9 +1571,9 @@ static const struct v4l2_ioctl_ops vpbe_ioctl_ops = {
1572 .vidioc_enum_output = vpbe_display_enum_output, 1571 .vidioc_enum_output = vpbe_display_enum_output,
1573 .vidioc_s_output = vpbe_display_s_output, 1572 .vidioc_s_output = vpbe_display_s_output,
1574 .vidioc_g_output = vpbe_display_g_output, 1573 .vidioc_g_output = vpbe_display_g_output,
1575 .vidioc_s_dv_preset = vpbe_display_s_dv_preset, 1574 .vidioc_s_dv_timings = vpbe_display_s_dv_timings,
1576 .vidioc_g_dv_preset = vpbe_display_g_dv_preset, 1575 .vidioc_g_dv_timings = vpbe_display_g_dv_timings,
1577 .vidioc_enum_dv_presets = vpbe_display_enum_dv_presets, 1576 .vidioc_enum_dv_timings = vpbe_display_enum_dv_timings,
1578#ifdef CONFIG_VIDEO_ADV_DEBUG 1577#ifdef CONFIG_VIDEO_ADV_DEBUG
1579 .vidioc_g_register = vpbe_display_g_register, 1578 .vidioc_g_register = vpbe_display_g_register,
1580 .vidioc_s_register = vpbe_display_s_register, 1579 .vidioc_s_register = vpbe_display_s_register,
@@ -1639,8 +1638,7 @@ static __devinit int init_vpbe_layer(int i, struct vpbe_display *disp_dev,
1639 VPBE_ENC_STD) { 1638 VPBE_ENC_STD) {
1640 vbd->tvnorms = (V4L2_STD_525_60 | V4L2_STD_625_50); 1639 vbd->tvnorms = (V4L2_STD_525_60 | V4L2_STD_625_50);
1641 vbd->current_norm = 1640 vbd->current_norm =
1642 disp_dev->vpbe_dev-> 1641 disp_dev->vpbe_dev->current_timings.std_id;
1643 current_timings.timings.std_id;
1644 } else 1642 } else
1645 vbd->current_norm = 0; 1643 vbd->current_norm = 0;
1646 1644
diff --git a/drivers/media/platform/davinci/vpbe_venc.c b/drivers/media/platform/davinci/vpbe_venc.c
index 0302669622d6..aed7369b962a 100644
--- a/drivers/media/platform/davinci/vpbe_venc.c
+++ b/drivers/media/platform/davinci/vpbe_venc.c
@@ -298,7 +298,7 @@ static int venc_set_480p59_94(struct v4l2_subdev *sd)
298 return -EINVAL; 298 return -EINVAL;
299 299
300 /* Setup clock at VPSS & VENC for SD */ 300 /* Setup clock at VPSS & VENC for SD */
301 if (pdata->setup_clock(VPBE_ENC_DV_PRESET, V4L2_DV_480P59_94) < 0) 301 if (pdata->setup_clock(VPBE_ENC_CUSTOM_TIMINGS, 27000000) < 0)
302 return -EINVAL; 302 return -EINVAL;
303 303
304 venc_enabledigitaloutput(sd, 0); 304 venc_enabledigitaloutput(sd, 0);
@@ -345,7 +345,7 @@ static int venc_set_576p50(struct v4l2_subdev *sd)
345 (pdata->venc_type != VPBE_VERSION_2)) 345 (pdata->venc_type != VPBE_VERSION_2))
346 return -EINVAL; 346 return -EINVAL;
347 /* Setup clock at VPSS & VENC for SD */ 347 /* Setup clock at VPSS & VENC for SD */
348 if (pdata->setup_clock(VPBE_ENC_DV_PRESET, V4L2_DV_576P50) < 0) 348 if (pdata->setup_clock(VPBE_ENC_CUSTOM_TIMINGS, 27000000) < 0)
349 return -EINVAL; 349 return -EINVAL;
350 350
351 venc_enabledigitaloutput(sd, 0); 351 venc_enabledigitaloutput(sd, 0);
@@ -385,7 +385,7 @@ static int venc_set_720p60_internal(struct v4l2_subdev *sd)
385 struct venc_state *venc = to_state(sd); 385 struct venc_state *venc = to_state(sd);
386 struct venc_platform_data *pdata = venc->pdata; 386 struct venc_platform_data *pdata = venc->pdata;
387 387
388 if (pdata->setup_clock(VPBE_ENC_DV_PRESET, V4L2_DV_720P60) < 0) 388 if (pdata->setup_clock(VPBE_ENC_CUSTOM_TIMINGS, 74250000) < 0)
389 return -EINVAL; 389 return -EINVAL;
390 390
391 venc_enabledigitaloutput(sd, 0); 391 venc_enabledigitaloutput(sd, 0);
@@ -413,7 +413,7 @@ static int venc_set_1080i30_internal(struct v4l2_subdev *sd)
413 struct venc_state *venc = to_state(sd); 413 struct venc_state *venc = to_state(sd);
414 struct venc_platform_data *pdata = venc->pdata; 414 struct venc_platform_data *pdata = venc->pdata;
415 415
416 if (pdata->setup_clock(VPBE_ENC_DV_PRESET, V4L2_DV_1080P30) < 0) 416 if (pdata->setup_clock(VPBE_ENC_CUSTOM_TIMINGS, 74250000) < 0)
417 return -EINVAL; 417 return -EINVAL;
418 418
419 venc_enabledigitaloutput(sd, 0); 419 venc_enabledigitaloutput(sd, 0);
@@ -446,26 +446,27 @@ static int venc_s_std_output(struct v4l2_subdev *sd, v4l2_std_id norm)
446 return -EINVAL; 446 return -EINVAL;
447} 447}
448 448
449static int venc_s_dv_preset(struct v4l2_subdev *sd, 449static int venc_s_dv_timings(struct v4l2_subdev *sd,
450 struct v4l2_dv_preset *dv_preset) 450 struct v4l2_dv_timings *dv_timings)
451{ 451{
452 struct venc_state *venc = to_state(sd); 452 struct venc_state *venc = to_state(sd);
453 u32 height = dv_timings->bt.height;
453 int ret; 454 int ret;
454 455
455 v4l2_dbg(debug, 1, sd, "venc_s_dv_preset\n"); 456 v4l2_dbg(debug, 1, sd, "venc_s_dv_timings\n");
456 457
457 if (dv_preset->preset == V4L2_DV_576P50) 458 if (height == 576)
458 return venc_set_576p50(sd); 459 return venc_set_576p50(sd);
459 else if (dv_preset->preset == V4L2_DV_480P59_94) 460 else if (height == 480)
460 return venc_set_480p59_94(sd); 461 return venc_set_480p59_94(sd);
461 else if ((dv_preset->preset == V4L2_DV_720P60) && 462 else if ((height == 720) &&
462 (venc->pdata->venc_type == VPBE_VERSION_2)) { 463 (venc->pdata->venc_type == VPBE_VERSION_2)) {
463 /* TBD setup internal 720p mode here */ 464 /* TBD setup internal 720p mode here */
464 ret = venc_set_720p60_internal(sd); 465 ret = venc_set_720p60_internal(sd);
465 /* for DM365 VPBE, there is DAC inside */ 466 /* for DM365 VPBE, there is DAC inside */
466 vdaccfg_write(sd, VDAC_CONFIG_HD_V2); 467 vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
467 return ret; 468 return ret;
468 } else if ((dv_preset->preset == V4L2_DV_1080I30) && 469 } else if ((height == 1080) &&
469 (venc->pdata->venc_type == VPBE_VERSION_2)) { 470 (venc->pdata->venc_type == VPBE_VERSION_2)) {
470 /* TBD setup internal 1080i mode here */ 471 /* TBD setup internal 1080i mode here */
471 ret = venc_set_1080i30_internal(sd); 472 ret = venc_set_1080i30_internal(sd);
@@ -518,7 +519,7 @@ static const struct v4l2_subdev_core_ops venc_core_ops = {
518static const struct v4l2_subdev_video_ops venc_video_ops = { 519static const struct v4l2_subdev_video_ops venc_video_ops = {
519 .s_routing = venc_s_routing, 520 .s_routing = venc_s_routing,
520 .s_std_output = venc_s_std_output, 521 .s_std_output = venc_s_std_output,
521 .s_dv_preset = venc_s_dv_preset, 522 .s_dv_timings = venc_s_dv_timings,
522}; 523};
523 524
524static const struct v4l2_subdev_ops venc_ops = { 525static const struct v4l2_subdev_ops venc_ops = {
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 48052cbffc2b..8be492cd8ed4 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -1669,6 +1669,7 @@ static int vpfe_s_crop(struct file *file, void *priv,
1669 const struct v4l2_crop *crop) 1669 const struct v4l2_crop *crop)
1670{ 1670{
1671 struct vpfe_device *vpfe_dev = video_drvdata(file); 1671 struct vpfe_device *vpfe_dev = video_drvdata(file);
1672 struct v4l2_rect rect = crop->c;
1672 int ret = 0; 1673 int ret = 0;
1673 1674
1674 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_crop\n"); 1675 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_crop\n");
@@ -1684,7 +1685,7 @@ static int vpfe_s_crop(struct file *file, void *priv,
1684 if (ret) 1685 if (ret)
1685 return ret; 1686 return ret;
1686 1687
1687 if (crop->c.top < 0 || crop->c.left < 0) { 1688 if (rect.top < 0 || rect.left < 0) {
1688 v4l2_err(&vpfe_dev->v4l2_dev, 1689 v4l2_err(&vpfe_dev->v4l2_dev,
1689 "doesn't support negative values for top & left\n"); 1690 "doesn't support negative values for top & left\n");
1690 ret = -EINVAL; 1691 ret = -EINVAL;
@@ -1692,26 +1693,26 @@ static int vpfe_s_crop(struct file *file, void *priv,
1692 } 1693 }
1693 1694
1694 /* adjust the width to 16 pixel boundary */ 1695 /* adjust the width to 16 pixel boundary */
1695 crop->c.width = ((crop->c.width + 15) & ~0xf); 1696 rect.width = ((rect.width + 15) & ~0xf);
1696 1697
1697 /* make sure parameters are valid */ 1698 /* make sure parameters are valid */
1698 if ((crop->c.left + crop->c.width > 1699 if ((rect.left + rect.width >
1699 vpfe_dev->std_info.active_pixels) || 1700 vpfe_dev->std_info.active_pixels) ||
1700 (crop->c.top + crop->c.height > 1701 (rect.top + rect.height >
1701 vpfe_dev->std_info.active_lines)) { 1702 vpfe_dev->std_info.active_lines)) {
1702 v4l2_err(&vpfe_dev->v4l2_dev, "Error in S_CROP params\n"); 1703 v4l2_err(&vpfe_dev->v4l2_dev, "Error in S_CROP params\n");
1703 ret = -EINVAL; 1704 ret = -EINVAL;
1704 goto unlock_out; 1705 goto unlock_out;
1705 } 1706 }
1706 ccdc_dev->hw_ops.set_image_window(&crop->c); 1707 ccdc_dev->hw_ops.set_image_window(&rect);
1707 vpfe_dev->fmt.fmt.pix.width = crop->c.width; 1708 vpfe_dev->fmt.fmt.pix.width = rect.width;
1708 vpfe_dev->fmt.fmt.pix.height = crop->c.height; 1709 vpfe_dev->fmt.fmt.pix.height = rect.height;
1709 vpfe_dev->fmt.fmt.pix.bytesperline = 1710 vpfe_dev->fmt.fmt.pix.bytesperline =
1710 ccdc_dev->hw_ops.get_line_length(); 1711 ccdc_dev->hw_ops.get_line_length();
1711 vpfe_dev->fmt.fmt.pix.sizeimage = 1712 vpfe_dev->fmt.fmt.pix.sizeimage =
1712 vpfe_dev->fmt.fmt.pix.bytesperline * 1713 vpfe_dev->fmt.fmt.pix.bytesperline *
1713 vpfe_dev->fmt.fmt.pix.height; 1714 vpfe_dev->fmt.fmt.pix.height;
1714 vpfe_dev->crop = crop->c; 1715 vpfe_dev->crop = rect;
1715unlock_out: 1716unlock_out:
1716 mutex_unlock(&vpfe_dev->lock); 1717 mutex_unlock(&vpfe_dev->lock);
1717 return ret; 1718 return ret;
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 0bafecac4923..fcabc023885d 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -311,12 +311,13 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
311 } 311 }
312 312
313 /* configure 1 or 2 channel mode */ 313 /* configure 1 or 2 channel mode */
314 ret = vpif_config_data->setup_input_channel_mode 314 if (vpif_config_data->setup_input_channel_mode) {
315 (vpif->std_info.ycmux_mode); 315 ret = vpif_config_data->
316 316 setup_input_channel_mode(vpif->std_info.ycmux_mode);
317 if (ret < 0) { 317 if (ret < 0) {
318 vpif_dbg(1, debug, "can't set vpif channel mode\n"); 318 vpif_dbg(1, debug, "can't set vpif channel mode\n");
319 return ret; 319 return ret;
320 }
320 } 321 }
321 322
322 /* Call vpif_set_params function to set the parameters and addresses */ 323 /* Call vpif_set_params function to set the parameters and addresses */
@@ -863,13 +864,11 @@ static unsigned int vpif_poll(struct file *filep, poll_table * wait)
863 */ 864 */
864static int vpif_open(struct file *filep) 865static int vpif_open(struct file *filep)
865{ 866{
866 struct vpif_capture_config *config = vpif_dev->platform_data;
867 struct video_device *vdev = video_devdata(filep); 867 struct video_device *vdev = video_devdata(filep);
868 struct common_obj *common; 868 struct common_obj *common;
869 struct video_obj *vid_ch; 869 struct video_obj *vid_ch;
870 struct channel_obj *ch; 870 struct channel_obj *ch;
871 struct vpif_fh *fh; 871 struct vpif_fh *fh;
872 int i;
873 872
874 vpif_dbg(2, debug, "vpif_open\n"); 873 vpif_dbg(2, debug, "vpif_open\n");
875 874
@@ -878,26 +877,6 @@ static int vpif_open(struct file *filep)
878 vid_ch = &ch->video; 877 vid_ch = &ch->video;
879 common = &ch->common[VPIF_VIDEO_INDEX]; 878 common = &ch->common[VPIF_VIDEO_INDEX];
880 879
881 if (NULL == ch->curr_subdev_info) {
882 /**
883 * search through the sub device to see a registered
884 * sub device and make it as current sub device
885 */
886 for (i = 0; i < config->subdev_count; i++) {
887 if (vpif_obj.sd[i]) {
888 /* the sub device is registered */
889 ch->curr_subdev_info = &config->subdev_info[i];
890 /* make first input as the current input */
891 vid_ch->input_idx = 0;
892 break;
893 }
894 }
895 if (i == config->subdev_count) {
896 vpif_err("No sub device registered\n");
897 return -ENOENT;
898 }
899 }
900
901 /* Allocate memory for the file handle object */ 880 /* Allocate memory for the file handle object */
902 fh = kzalloc(sizeof(struct vpif_fh), GFP_KERNEL); 881 fh = kzalloc(sizeof(struct vpif_fh), GFP_KERNEL);
903 if (NULL == fh) { 882 if (NULL == fh) {
@@ -997,6 +976,7 @@ static int vpif_reqbufs(struct file *file, void *priv,
997 struct common_obj *common; 976 struct common_obj *common;
998 u8 index = 0; 977 u8 index = 0;
999 struct vb2_queue *q; 978 struct vb2_queue *q;
979 int ret;
1000 980
1001 vpif_dbg(2, debug, "vpif_reqbufs\n"); 981 vpif_dbg(2, debug, "vpif_reqbufs\n");
1002 982
@@ -1036,8 +1016,12 @@ static int vpif_reqbufs(struct file *file, void *priv,
1036 q->mem_ops = &vb2_dma_contig_memops; 1016 q->mem_ops = &vb2_dma_contig_memops;
1037 q->buf_struct_size = sizeof(struct vpif_cap_buffer); 1017 q->buf_struct_size = sizeof(struct vpif_cap_buffer);
1038 1018
1039 vb2_queue_init(q); 1019 ret = vb2_queue_init(q);
1040 1020 if (ret) {
1021 vpif_err("vpif_capture: vb2_queue_init() failed\n");
1022 vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
1023 return ret;
1024 }
1041 /* Set io allowed member of file handle to TRUE */ 1025 /* Set io allowed member of file handle to TRUE */
1042 fh->io_allowed[index] = 1; 1026 fh->io_allowed[index] = 1;
1043 /* Increment io usrs member of channel object to 1 */ 1027 /* Increment io usrs member of channel object to 1 */
@@ -1175,10 +1159,9 @@ static int vpif_streamon(struct file *file, void *priv,
1175 return ret; 1159 return ret;
1176 1160
1177 /* Enable streamon on the sub device */ 1161 /* Enable streamon on the sub device */
1178 ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video, 1162 ret = v4l2_subdev_call(ch->sd, video, s_stream, 1);
1179 s_stream, 1);
1180 1163
1181 if (ret && (ret != -ENOIOCTLCMD)) { 1164 if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) {
1182 vpif_dbg(1, debug, "stream on failed in subdev\n"); 1165 vpif_dbg(1, debug, "stream on failed in subdev\n");
1183 return ret; 1166 return ret;
1184 } 1167 }
@@ -1238,73 +1221,105 @@ static int vpif_streamoff(struct file *file, void *priv,
1238 1221
1239 common->started = 0; 1222 common->started = 0;
1240 1223
1241 ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video, 1224 ret = v4l2_subdev_call(ch->sd, video, s_stream, 0);
1242 s_stream, 0);
1243 1225
1244 if (ret && (ret != -ENOIOCTLCMD)) 1226 if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
1245 vpif_dbg(1, debug, "stream off failed in subdev\n"); 1227 vpif_dbg(1, debug, "stream off failed in subdev\n");
1246 1228
1247 return vb2_streamoff(&common->buffer_queue, buftype); 1229 return vb2_streamoff(&common->buffer_queue, buftype);
1248} 1230}
1249 1231
1250/** 1232/**
1251 * vpif_map_sub_device_to_input() - Maps sub device to input 1233 * vpif_input_to_subdev() - Maps input to sub device
1252 * @ch - ptr to channel 1234 * @vpif_cfg - global config ptr
1253 * @config - ptr to capture configuration 1235 * @chan_cfg - channel config ptr
1254 * @input_index - Given input index from application 1236 * @input_index - Given input index from application
1255 * @sub_device_index - index into sd table
1256 * 1237 *
1257 * lookup the sub device information for a given input index. 1238 * lookup the sub device information for a given input index.
1258 * we report all the inputs to application. inputs table also 1239 * we report all the inputs to application. inputs table also
1259 * has sub device name for the each input 1240 * has sub device name for the each input
1260 */ 1241 */
1261static struct vpif_subdev_info *vpif_map_sub_device_to_input( 1242static int vpif_input_to_subdev(
1262 struct channel_obj *ch, 1243 struct vpif_capture_config *vpif_cfg,
1263 struct vpif_capture_config *vpif_cfg, 1244 struct vpif_capture_chan_config *chan_cfg,
1264 int input_index, 1245 int input_index)
1265 int *sub_device_index)
1266{ 1246{
1267 struct vpif_capture_chan_config *chan_cfg; 1247 struct vpif_subdev_info *subdev_info;
1268 struct vpif_subdev_info *subdev_info = NULL; 1248 const char *subdev_name;
1269 const char *subdev_name = NULL;
1270 int i; 1249 int i;
1271 1250
1272 vpif_dbg(2, debug, "vpif_map_sub_device_to_input\n"); 1251 vpif_dbg(2, debug, "vpif_input_to_subdev\n");
1273 1252
1274 chan_cfg = &vpif_cfg->chan_config[ch->channel_id]; 1253 subdev_name = chan_cfg->inputs[input_index].subdev_name;
1275 1254 if (subdev_name == NULL)
1276 /** 1255 return -1;
1277 * search through the inputs to find the sub device supporting
1278 * the input
1279 */
1280 for (i = 0; i < chan_cfg->input_count; i++) {
1281 /* For each sub device, loop through input */
1282 if (i == input_index) {
1283 subdev_name = chan_cfg->inputs[i].subdev_name;
1284 break;
1285 }
1286 }
1287
1288 /* if reached maximum. return null */
1289 if (i == chan_cfg->input_count || (NULL == subdev_name))
1290 return subdev_info;
1291 1256
1292 /* loop through the sub device list to get the sub device info */ 1257 /* loop through the sub device list to get the sub device info */
1293 for (i = 0; i < vpif_cfg->subdev_count; i++) { 1258 for (i = 0; i < vpif_cfg->subdev_count; i++) {
1294 subdev_info = &vpif_cfg->subdev_info[i]; 1259 subdev_info = &vpif_cfg->subdev_info[i];
1295 if (!strcmp(subdev_info->name, subdev_name)) 1260 if (!strcmp(subdev_info->name, subdev_name))
1296 break; 1261 return i;
1262 }
1263 return -1;
1264}
1265
1266/**
1267 * vpif_set_input() - Select an input
1268 * @vpif_cfg - global config ptr
1269 * @ch - channel
1270 * @_index - Given input index from application
1271 *
1272 * Select the given input.
1273 */
1274static int vpif_set_input(
1275 struct vpif_capture_config *vpif_cfg,
1276 struct channel_obj *ch,
1277 int index)
1278{
1279 struct vpif_capture_chan_config *chan_cfg =
1280 &vpif_cfg->chan_config[ch->channel_id];
1281 struct vpif_subdev_info *subdev_info = NULL;
1282 struct v4l2_subdev *sd = NULL;
1283 u32 input = 0, output = 0;
1284 int sd_index;
1285 int ret;
1286
1287 sd_index = vpif_input_to_subdev(vpif_cfg, chan_cfg, index);
1288 if (sd_index >= 0) {
1289 sd = vpif_obj.sd[sd_index];
1290 subdev_info = &vpif_cfg->subdev_info[sd_index];
1297 } 1291 }
1298 1292
1299 if (i == vpif_cfg->subdev_count) 1293 /* first setup input path from sub device to vpif */
1300 return subdev_info; 1294 if (sd && vpif_cfg->setup_input_path) {
1295 ret = vpif_cfg->setup_input_path(ch->channel_id,
1296 subdev_info->name);
1297 if (ret < 0) {
1298 vpif_dbg(1, debug, "couldn't setup input path for the" \
1299 " sub device %s, for input index %d\n",
1300 subdev_info->name, index);
1301 return ret;
1302 }
1303 }
1301 1304
1302 /* check if the sub device is registered */ 1305 if (sd) {
1303 if (NULL == vpif_obj.sd[i]) 1306 input = chan_cfg->inputs[index].input_route;
1304 return NULL; 1307 output = chan_cfg->inputs[index].output_route;
1308 ret = v4l2_subdev_call(sd, video, s_routing,
1309 input, output, 0);
1310 if (ret < 0 && ret != -ENOIOCTLCMD) {
1311 vpif_dbg(1, debug, "Failed to set input\n");
1312 return ret;
1313 }
1314 }
1315 ch->input_idx = index;
1316 ch->sd = sd;
1317 /* copy interface parameters to vpif */
1318 ch->vpifparams.iface = chan_cfg->vpif_if;
1305 1319
1306 *sub_device_index = i; 1320 /* update tvnorms from the sub device input info */
1307 return subdev_info; 1321 ch->video_dev->tvnorms = chan_cfg->inputs[index].input.std;
1322 return 0;
1308} 1323}
1309 1324
1310/** 1325/**
@@ -1324,12 +1339,16 @@ static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
1324 vpif_dbg(2, debug, "vpif_querystd\n"); 1339 vpif_dbg(2, debug, "vpif_querystd\n");
1325 1340
1326 /* Call querystd function of decoder device */ 1341 /* Call querystd function of decoder device */
1327 ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video, 1342 ret = v4l2_subdev_call(ch->sd, video, querystd, std_id);
1328 querystd, std_id);
1329 if (ret < 0)
1330 vpif_dbg(1, debug, "Failed to set standard for sub devices\n");
1331 1343
1332 return ret; 1344 if (ret == -ENOIOCTLCMD || ret == -ENODEV)
1345 return -ENODATA;
1346 if (ret) {
1347 vpif_dbg(1, debug, "Failed to query standard for sub devices\n");
1348 return ret;
1349 }
1350
1351 return 0;
1333} 1352}
1334 1353
1335/** 1354/**
@@ -1397,11 +1416,12 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
1397 vpif_config_format(ch); 1416 vpif_config_format(ch);
1398 1417
1399 /* set standard in the sub device */ 1418 /* set standard in the sub device */
1400 ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core, 1419 ret = v4l2_subdev_call(ch->sd, core, s_std, *std_id);
1401 s_std, *std_id); 1420 if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) {
1402 if (ret < 0)
1403 vpif_dbg(1, debug, "Failed to set standard for sub devices\n"); 1421 vpif_dbg(1, debug, "Failed to set standard for sub devices\n");
1404 return ret; 1422 return ret;
1423 }
1424 return 0;
1405} 1425}
1406 1426
1407/** 1427/**
@@ -1441,10 +1461,8 @@ static int vpif_g_input(struct file *file, void *priv, unsigned int *index)
1441{ 1461{
1442 struct vpif_fh *fh = priv; 1462 struct vpif_fh *fh = priv;
1443 struct channel_obj *ch = fh->channel; 1463 struct channel_obj *ch = fh->channel;
1444 struct video_obj *vid_ch = &ch->video;
1445
1446 *index = vid_ch->input_idx;
1447 1464
1465 *index = ch->input_idx;
1448 return 0; 1466 return 0;
1449} 1467}
1450 1468
@@ -1461,13 +1479,13 @@ static int vpif_s_input(struct file *file, void *priv, unsigned int index)
1461 struct vpif_fh *fh = priv; 1479 struct vpif_fh *fh = priv;
1462 struct channel_obj *ch = fh->channel; 1480 struct channel_obj *ch = fh->channel;
1463 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; 1481 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
1464 struct video_obj *vid_ch = &ch->video; 1482 int ret;
1465 struct vpif_subdev_info *subdev_info;
1466 int ret = 0, sd_index = 0;
1467 u32 input = 0, output = 0;
1468 1483
1469 chan_cfg = &config->chan_config[ch->channel_id]; 1484 chan_cfg = &config->chan_config[ch->channel_id];
1470 1485
1486 if (index >= chan_cfg->input_count)
1487 return -EINVAL;
1488
1471 if (common->started) { 1489 if (common->started) {
1472 vpif_err("Streaming in progress\n"); 1490 vpif_err("Streaming in progress\n");
1473 return -EBUSY; 1491 return -EBUSY;
@@ -1486,45 +1504,7 @@ static int vpif_s_input(struct file *file, void *priv, unsigned int index)
1486 return ret; 1504 return ret;
1487 1505
1488 fh->initialized = 1; 1506 fh->initialized = 1;
1489 subdev_info = vpif_map_sub_device_to_input(ch, config, index, 1507 return vpif_set_input(config, ch, index);
1490 &sd_index);
1491 if (NULL == subdev_info) {
1492 vpif_dbg(1, debug,
1493 "couldn't lookup sub device for the input index\n");
1494 return -EINVAL;
1495 }
1496
1497 /* first setup input path from sub device to vpif */
1498 if (config->setup_input_path) {
1499 ret = config->setup_input_path(ch->channel_id,
1500 subdev_info->name);
1501 if (ret < 0) {
1502 vpif_dbg(1, debug, "couldn't setup input path for the"
1503 " sub device %s, for input index %d\n",
1504 subdev_info->name, index);
1505 return ret;
1506 }
1507 }
1508
1509 if (subdev_info->can_route) {
1510 input = subdev_info->input;
1511 output = subdev_info->output;
1512 ret = v4l2_subdev_call(vpif_obj.sd[sd_index], video, s_routing,
1513 input, output, 0);
1514 if (ret < 0) {
1515 vpif_dbg(1, debug, "Failed to set input\n");
1516 return ret;
1517 }
1518 }
1519 vid_ch->input_idx = index;
1520 ch->curr_subdev_info = subdev_info;
1521 ch->curr_sd_index = sd_index;
1522 /* copy interface parameters to vpif */
1523 ch->vpifparams.iface = subdev_info->vpif_if;
1524
1525 /* update tvnorms from the sub device input info */
1526 ch->video_dev->tvnorms = chan_cfg->inputs[index].input.std;
1527 return ret;
1528} 1508}
1529 1509
1530/** 1510/**
@@ -1655,9 +1635,11 @@ static int vpif_querycap(struct file *file, void *priv,
1655{ 1635{
1656 struct vpif_capture_config *config = vpif_dev->platform_data; 1636 struct vpif_capture_config *config = vpif_dev->platform_data;
1657 1637
1658 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 1638 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
1659 strlcpy(cap->driver, "vpif capture", sizeof(cap->driver)); 1639 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
1660 strlcpy(cap->bus_info, "VPIF Platform", sizeof(cap->bus_info)); 1640 snprintf(cap->driver, sizeof(cap->driver), "%s", dev_name(vpif_dev));
1641 snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
1642 dev_name(vpif_dev));
1661 strlcpy(cap->card, config->card_name, sizeof(cap->card)); 1643 strlcpy(cap->card, config->card_name, sizeof(cap->card));
1662 1644
1663 return 0; 1645 return 0;
@@ -1730,9 +1712,12 @@ vpif_enum_dv_timings(struct file *file, void *priv,
1730{ 1712{
1731 struct vpif_fh *fh = priv; 1713 struct vpif_fh *fh = priv;
1732 struct channel_obj *ch = fh->channel; 1714 struct channel_obj *ch = fh->channel;
1715 int ret;
1733 1716
1734 return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], 1717 ret = v4l2_subdev_call(ch->sd, video, enum_dv_timings, timings);
1735 video, enum_dv_timings, timings); 1718 if (ret == -ENOIOCTLCMD && ret == -ENODEV)
1719 return -EINVAL;
1720 return ret;
1736} 1721}
1737 1722
1738/** 1723/**
@@ -1747,9 +1732,12 @@ vpif_query_dv_timings(struct file *file, void *priv,
1747{ 1732{
1748 struct vpif_fh *fh = priv; 1733 struct vpif_fh *fh = priv;
1749 struct channel_obj *ch = fh->channel; 1734 struct channel_obj *ch = fh->channel;
1735 int ret;
1750 1736
1751 return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], 1737 ret = v4l2_subdev_call(ch->sd, video, query_dv_timings, timings);
1752 video, query_dv_timings, timings); 1738 if (ret == -ENOIOCTLCMD && ret == -ENODEV)
1739 return -ENODATA;
1740 return ret;
1753} 1741}
1754 1742
1755/** 1743/**
@@ -1775,13 +1763,9 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
1775 } 1763 }
1776 1764
1777 /* Configure subdevice timings, if any */ 1765 /* Configure subdevice timings, if any */
1778 ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], 1766 ret = v4l2_subdev_call(ch->sd, video, s_dv_timings, timings);
1779 video, s_dv_timings, timings); 1767 if (ret == -ENOIOCTLCMD || ret == -ENODEV)
1780 if (ret == -ENOIOCTLCMD) { 1768 ret = 0;
1781 vpif_dbg(2, debug, "Custom DV timings not supported by "
1782 "subdevice\n");
1783 return -EINVAL;
1784 }
1785 if (ret < 0) { 1769 if (ret < 0) {
1786 vpif_dbg(2, debug, "Error setting custom DV timings\n"); 1770 vpif_dbg(2, debug, "Error setting custom DV timings\n");
1787 return ret; 1771 return ret;
@@ -1906,8 +1890,7 @@ static int vpif_dbg_g_register(struct file *file, void *priv,
1906 struct vpif_fh *fh = priv; 1890 struct vpif_fh *fh = priv;
1907 struct channel_obj *ch = fh->channel; 1891 struct channel_obj *ch = fh->channel;
1908 1892
1909 return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core, 1893 return v4l2_subdev_call(ch->sd, core, g_register, reg);
1910 g_register, reg);
1911} 1894}
1912 1895
1913/* 1896/*
@@ -1924,8 +1907,7 @@ static int vpif_dbg_s_register(struct file *file, void *priv,
1924 struct vpif_fh *fh = priv; 1907 struct vpif_fh *fh = priv;
1925 struct channel_obj *ch = fh->channel; 1908 struct channel_obj *ch = fh->channel;
1926 1909
1927 return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core, 1910 return v4l2_subdev_call(ch->sd, core, s_register, reg);
1928 s_register, reg);
1929} 1911}
1930#endif 1912#endif
1931 1913
@@ -2063,7 +2045,8 @@ static __init int vpif_probe(struct platform_device *pdev)
2063{ 2045{
2064 struct vpif_subdev_info *subdevdata; 2046 struct vpif_subdev_info *subdevdata;
2065 struct vpif_capture_config *config; 2047 struct vpif_capture_config *config;
2066 int i, j, k, m, q, err; 2048 int i, j, k, err;
2049 int res_idx = 0;
2067 struct i2c_adapter *i2c_adap; 2050 struct i2c_adapter *i2c_adap;
2068 struct channel_obj *ch; 2051 struct channel_obj *ch;
2069 struct common_obj *common; 2052 struct common_obj *common;
@@ -2086,18 +2069,19 @@ static __init int vpif_probe(struct platform_device *pdev)
2086 return err; 2069 return err;
2087 } 2070 }
2088 2071
2089 k = 0; 2072 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) {
2090 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, k))) {
2091 for (i = res->start; i <= res->end; i++) { 2073 for (i = res->start; i <= res->end; i++) {
2092 if (request_irq(i, vpif_channel_isr, IRQF_SHARED, 2074 if (request_irq(i, vpif_channel_isr, IRQF_SHARED,
2093 "VPIF_Capture", 2075 "VPIF_Capture", (void *)
2094 (void *)(&vpif_obj.dev[k]->channel_id))) { 2076 (&vpif_obj.dev[res_idx]->channel_id))) {
2095 err = -EBUSY; 2077 err = -EBUSY;
2096 i--; 2078 for (j = 0; j < i; j++)
2079 free_irq(j, (void *)
2080 (&vpif_obj.dev[res_idx]->channel_id));
2097 goto vpif_int_err; 2081 goto vpif_int_err;
2098 } 2082 }
2099 } 2083 }
2100 k++; 2084 res_idx++;
2101 } 2085 }
2102 2086
2103 for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { 2087 for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
@@ -2111,7 +2095,7 @@ static __init int vpif_probe(struct platform_device *pdev)
2111 video_device_release(ch->video_dev); 2095 video_device_release(ch->video_dev);
2112 } 2096 }
2113 err = -ENOMEM; 2097 err = -ENOMEM;
2114 goto vpif_dev_alloc_err; 2098 goto vpif_int_err;
2115 } 2099 }
2116 2100
2117 /* Initialize field of video device */ 2101 /* Initialize field of video device */
@@ -2142,24 +2126,6 @@ static __init int vpif_probe(struct platform_device *pdev)
2142 } 2126 }
2143 } 2127 }
2144 2128
2145 for (j = 0; j < VPIF_CAPTURE_MAX_DEVICES; j++) {
2146 ch = vpif_obj.dev[j];
2147 ch->channel_id = j;
2148 common = &(ch->common[VPIF_VIDEO_INDEX]);
2149 spin_lock_init(&common->irqlock);
2150 mutex_init(&common->lock);
2151 ch->video_dev->lock = &common->lock;
2152 /* Initialize prio member of channel object */
2153 v4l2_prio_init(&ch->prio);
2154 err = video_register_device(ch->video_dev,
2155 VFL_TYPE_GRABBER, (j ? 1 : 0));
2156 if (err)
2157 goto probe_out;
2158
2159 video_set_drvdata(ch->video_dev, ch);
2160
2161 }
2162
2163 i2c_adap = i2c_get_adapter(1); 2129 i2c_adap = i2c_get_adapter(1);
2164 config = pdev->dev.platform_data; 2130 config = pdev->dev.platform_data;
2165 2131
@@ -2169,7 +2135,7 @@ static __init int vpif_probe(struct platform_device *pdev)
2169 if (vpif_obj.sd == NULL) { 2135 if (vpif_obj.sd == NULL) {
2170 vpif_err("unable to allocate memory for subdevice pointers\n"); 2136 vpif_err("unable to allocate memory for subdevice pointers\n");
2171 err = -ENOMEM; 2137 err = -ENOMEM;
2172 goto probe_out; 2138 goto vpif_sd_error;
2173 } 2139 }
2174 2140
2175 for (i = 0; i < subdev_count; i++) { 2141 for (i = 0; i < subdev_count; i++) {
@@ -2186,19 +2152,32 @@ static __init int vpif_probe(struct platform_device *pdev)
2186 } 2152 }
2187 v4l2_info(&vpif_obj.v4l2_dev, "registered sub device %s\n", 2153 v4l2_info(&vpif_obj.v4l2_dev, "registered sub device %s\n",
2188 subdevdata->name); 2154 subdevdata->name);
2189
2190 if (vpif_obj.sd[i])
2191 vpif_obj.sd[i]->grp_id = 1 << i;
2192 } 2155 }
2193 2156
2157 for (j = 0; j < VPIF_CAPTURE_MAX_DEVICES; j++) {
2158 ch = vpif_obj.dev[j];
2159 ch->channel_id = j;
2160 common = &(ch->common[VPIF_VIDEO_INDEX]);
2161 spin_lock_init(&common->irqlock);
2162 mutex_init(&common->lock);
2163 ch->video_dev->lock = &common->lock;
2164 /* Initialize prio member of channel object */
2165 v4l2_prio_init(&ch->prio);
2166 video_set_drvdata(ch->video_dev, ch);
2167
2168 /* select input 0 */
2169 err = vpif_set_input(config, ch, 0);
2170 if (err)
2171 goto probe_out;
2172
2173 err = video_register_device(ch->video_dev,
2174 VFL_TYPE_GRABBER, (j ? 1 : 0));
2175 if (err)
2176 goto probe_out;
2177 }
2194 v4l2_info(&vpif_obj.v4l2_dev, "VPIF capture driver initialized\n"); 2178 v4l2_info(&vpif_obj.v4l2_dev, "VPIF capture driver initialized\n");
2195 return 0; 2179 return 0;
2196 2180
2197probe_subdev_out:
2198 /* free sub devices memory */
2199 kfree(vpif_obj.sd);
2200
2201 j = VPIF_CAPTURE_MAX_DEVICES;
2202probe_out: 2181probe_out:
2203 for (k = 0; k < j; k++) { 2182 for (k = 0; k < j; k++) {
2204 /* Get the pointer to the channel object */ 2183 /* Get the pointer to the channel object */
@@ -2206,22 +2185,23 @@ probe_out:
2206 /* Unregister video device */ 2185 /* Unregister video device */
2207 video_unregister_device(ch->video_dev); 2186 video_unregister_device(ch->video_dev);
2208 } 2187 }
2188probe_subdev_out:
2189 /* free sub devices memory */
2190 kfree(vpif_obj.sd);
2209 2191
2210vpif_dev_alloc_err: 2192vpif_sd_error:
2211 k = VPIF_CAPTURE_MAX_DEVICES-1; 2193 for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
2212 res = platform_get_resource(pdev, IORESOURCE_IRQ, k); 2194 ch = vpif_obj.dev[i];
2213 i = res->end; 2195 /* Note: does nothing if ch->video_dev == NULL */
2214 2196 video_device_release(ch->video_dev);
2215vpif_int_err:
2216 for (q = k; q >= 0; q--) {
2217 for (m = i; m >= (int)res->start; m--)
2218 free_irq(m, (void *)(&vpif_obj.dev[q]->channel_id));
2219
2220 res = platform_get_resource(pdev, IORESOURCE_IRQ, q-1);
2221 if (res)
2222 i = res->end;
2223 } 2197 }
2198vpif_int_err:
2224 v4l2_device_unregister(&vpif_obj.v4l2_dev); 2199 v4l2_device_unregister(&vpif_obj.v4l2_dev);
2200 for (i = 0; i < res_idx; i++) {
2201 res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
2202 for (j = res->start; j <= res->end; j++)
2203 free_irq(j, (void *)(&vpif_obj.dev[i]->channel_id));
2204 }
2225 return err; 2205 return err;
2226} 2206}
2227 2207
diff --git a/drivers/media/platform/davinci/vpif_capture.h b/drivers/media/platform/davinci/vpif_capture.h
index d24efc17e4c8..3d3c1e5cd5d4 100644
--- a/drivers/media/platform/davinci/vpif_capture.h
+++ b/drivers/media/platform/davinci/vpif_capture.h
@@ -54,8 +54,6 @@ struct video_obj {
54 /* Currently selected or default standard */ 54 /* Currently selected or default standard */
55 v4l2_std_id stdid; 55 v4l2_std_id stdid;
56 struct v4l2_dv_timings dv_timings; 56 struct v4l2_dv_timings dv_timings;
57 /* This is to track the last input that is passed to application */
58 u32 input_idx;
59}; 57};
60 58
61struct vpif_cap_buffer { 59struct vpif_cap_buffer {
@@ -119,10 +117,10 @@ struct channel_obj {
119 u8 initialized; 117 u8 initialized;
120 /* Identifies channel */ 118 /* Identifies channel */
121 enum vpif_channel_id channel_id; 119 enum vpif_channel_id channel_id;
122 /* index into sd table */ 120 /* Current input */
123 int curr_sd_index; 121 u32 input_idx;
124 /* ptr to current sub device information */ 122 /* subdev corresponding to the current input, may be NULL */
125 struct vpif_subdev_info *curr_subdev_info; 123 struct v4l2_subdev *sd;
126 /* vpif configuration params */ 124 /* vpif configuration params */
127 struct vpif_params vpifparams; 125 struct vpif_params vpifparams;
128 /* common object array */ 126 /* common object array */
@@ -159,10 +157,6 @@ struct vpif_config_params {
159 u32 video_limit[VPIF_CAPTURE_NUM_CHANNELS]; 157 u32 video_limit[VPIF_CAPTURE_NUM_CHANNELS];
160 u8 max_device_type; 158 u8 max_device_type;
161}; 159};
162/* Struct which keeps track of the line numbers for the sliced vbi service */ 160
163struct vpif_service_line {
164 u16 service_id;
165 u16 service_line[2];
166};
167#endif /* End of __KERNEL__ */ 161#endif /* End of __KERNEL__ */
168#endif /* VPIF_CAPTURE_H */ 162#endif /* VPIF_CAPTURE_H */
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index a5b88689abad..b716fbd4241f 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -280,12 +280,13 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
280 } 280 }
281 281
282 /* clock settings */ 282 /* clock settings */
283 ret = 283 if (vpif_config_data->set_clock) {
284 vpif_config_data->set_clock(ch->vpifparams.std_info.ycmux_mode, 284 ret = vpif_config_data->set_clock(ch->vpifparams.std_info.
285 ch->vpifparams.std_info.hd_sd); 285 ycmux_mode, ch->vpifparams.std_info.hd_sd);
286 if (ret < 0) { 286 if (ret < 0) {
287 vpif_err("can't set clock\n"); 287 vpif_err("can't set clock\n");
288 return ret; 288 return ret;
289 }
289 } 290 }
290 291
291 /* set the parameters and addresses */ 292 /* set the parameters and addresses */
@@ -307,7 +308,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
307 channel2_intr_assert(); 308 channel2_intr_assert();
308 channel2_intr_enable(1); 309 channel2_intr_enable(1);
309 enable_channel2(1); 310 enable_channel2(1);
310 if (vpif_config_data->ch2_clip_en) 311 if (vpif_config_data->chan_config[VPIF_CHANNEL2_VIDEO].clip_en)
311 channel2_clipping_enable(1); 312 channel2_clipping_enable(1);
312 } 313 }
313 314
@@ -316,7 +317,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
316 channel3_intr_assert(); 317 channel3_intr_assert();
317 channel3_intr_enable(1); 318 channel3_intr_enable(1);
318 enable_channel3(1); 319 enable_channel3(1);
319 if (vpif_config_data->ch3_clip_en) 320 if (vpif_config_data->chan_config[VPIF_CHANNEL3_VIDEO].clip_en)
320 channel3_clipping_enable(1); 321 channel3_clipping_enable(1);
321 } 322 }
322 323
@@ -826,9 +827,11 @@ static int vpif_querycap(struct file *file, void *priv,
826{ 827{
827 struct vpif_display_config *config = vpif_dev->platform_data; 828 struct vpif_display_config *config = vpif_dev->platform_data;
828 829
829 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; 830 cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
830 strlcpy(cap->driver, "vpif display", sizeof(cap->driver)); 831 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
831 strlcpy(cap->bus_info, "Platform", sizeof(cap->bus_info)); 832 snprintf(cap->driver, sizeof(cap->driver), "%s", dev_name(vpif_dev));
833 snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
834 dev_name(vpif_dev));
832 strlcpy(cap->card, config->card_name, sizeof(cap->card)); 835 strlcpy(cap->card, config->card_name, sizeof(cap->card));
833 836
834 return 0; 837 return 0;
@@ -935,6 +938,7 @@ static int vpif_reqbufs(struct file *file, void *priv,
935 enum v4l2_field field; 938 enum v4l2_field field;
936 struct vb2_queue *q; 939 struct vb2_queue *q;
937 u8 index = 0; 940 u8 index = 0;
941 int ret;
938 942
939 /* This file handle has not initialized the channel, 943 /* This file handle has not initialized the channel,
940 It is not allowed to do settings */ 944 It is not allowed to do settings */
@@ -980,8 +984,12 @@ static int vpif_reqbufs(struct file *file, void *priv,
980 q->mem_ops = &vb2_dma_contig_memops; 984 q->mem_ops = &vb2_dma_contig_memops;
981 q->buf_struct_size = sizeof(struct vpif_disp_buffer); 985 q->buf_struct_size = sizeof(struct vpif_disp_buffer);
982 986
983 vb2_queue_init(q); 987 ret = vb2_queue_init(q);
984 988 if (ret) {
989 vpif_err("vpif_display: vb2_queue_init() failed\n");
990 vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
991 return ret;
992 }
985 /* Set io allowed member of file handle to TRUE */ 993 /* Set io allowed member of file handle to TRUE */
986 fh->io_allowed[index] = 1; 994 fh->io_allowed[index] = 1;
987 /* Increment io usrs member of channel object to 1 */ 995 /* Increment io usrs member of channel object to 1 */
@@ -1173,14 +1181,16 @@ static int vpif_streamoff(struct file *file, void *priv,
1173 if (buftype == V4L2_BUF_TYPE_VIDEO_OUTPUT) { 1181 if (buftype == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
1174 /* disable channel */ 1182 /* disable channel */
1175 if (VPIF_CHANNEL2_VIDEO == ch->channel_id) { 1183 if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
1176 if (vpif_config_data->ch2_clip_en) 1184 if (vpif_config_data->
1185 chan_config[VPIF_CHANNEL2_VIDEO].clip_en)
1177 channel2_clipping_enable(0); 1186 channel2_clipping_enable(0);
1178 enable_channel2(0); 1187 enable_channel2(0);
1179 channel2_intr_enable(0); 1188 channel2_intr_enable(0);
1180 } 1189 }
1181 if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) || 1190 if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) ||
1182 (2 == common->started)) { 1191 (2 == common->started)) {
1183 if (vpif_config_data->ch3_clip_en) 1192 if (vpif_config_data->
1193 chan_config[VPIF_CHANNEL3_VIDEO].clip_en)
1184 channel3_clipping_enable(0); 1194 channel3_clipping_enable(0);
1185 enable_channel3(0); 1195 enable_channel3(0);
1186 channel3_intr_enable(0); 1196 channel3_intr_enable(0);
@@ -1213,49 +1223,126 @@ static int vpif_enum_output(struct file *file, void *fh,
1213{ 1223{
1214 1224
1215 struct vpif_display_config *config = vpif_dev->platform_data; 1225 struct vpif_display_config *config = vpif_dev->platform_data;
1226 struct vpif_display_chan_config *chan_cfg;
1227 struct vpif_fh *vpif_handler = fh;
1228 struct channel_obj *ch = vpif_handler->channel;
1216 1229
1217 if (output->index >= config->output_count) { 1230 chan_cfg = &config->chan_config[ch->channel_id];
1231 if (output->index >= chan_cfg->output_count) {
1218 vpif_dbg(1, debug, "Invalid output index\n"); 1232 vpif_dbg(1, debug, "Invalid output index\n");
1219 return -EINVAL; 1233 return -EINVAL;
1220 } 1234 }
1221 1235
1222 strcpy(output->name, config->output[output->index]); 1236 *output = chan_cfg->outputs[output->index].output;
1223 output->type = V4L2_OUTPUT_TYPE_ANALOG; 1237 return 0;
1224 output->std = VPIF_V4L2_STD; 1238}
1239
1240/**
1241 * vpif_output_to_subdev() - Maps output to sub device
1242 * @vpif_cfg - global config ptr
1243 * @chan_cfg - channel config ptr
1244 * @index - Given output index from application
1245 *
1246 * lookup the sub device information for a given output index.
1247 * we report all the output to application. output table also
1248 * has sub device name for the each output
1249 */
1250static int
1251vpif_output_to_subdev(struct vpif_display_config *vpif_cfg,
1252 struct vpif_display_chan_config *chan_cfg, int index)
1253{
1254 struct vpif_subdev_info *subdev_info;
1255 const char *subdev_name;
1256 int i;
1257
1258 vpif_dbg(2, debug, "vpif_output_to_subdev\n");
1259
1260 if (chan_cfg->outputs == NULL)
1261 return -1;
1262
1263 subdev_name = chan_cfg->outputs[index].subdev_name;
1264 if (subdev_name == NULL)
1265 return -1;
1266
1267 /* loop through the sub device list to get the sub device info */
1268 for (i = 0; i < vpif_cfg->subdev_count; i++) {
1269 subdev_info = &vpif_cfg->subdevinfo[i];
1270 if (!strcmp(subdev_info->name, subdev_name))
1271 return i;
1272 }
1273 return -1;
1274}
1275
1276/**
1277 * vpif_set_output() - Select an output
1278 * @vpif_cfg - global config ptr
1279 * @ch - channel
1280 * @index - Given output index from application
1281 *
1282 * Select the given output.
1283 */
1284static int vpif_set_output(struct vpif_display_config *vpif_cfg,
1285 struct channel_obj *ch, int index)
1286{
1287 struct vpif_display_chan_config *chan_cfg =
1288 &vpif_cfg->chan_config[ch->channel_id];
1289 struct vpif_subdev_info *subdev_info = NULL;
1290 struct v4l2_subdev *sd = NULL;
1291 u32 input = 0, output = 0;
1292 int sd_index;
1293 int ret;
1294
1295 sd_index = vpif_output_to_subdev(vpif_cfg, chan_cfg, index);
1296 if (sd_index >= 0) {
1297 sd = vpif_obj.sd[sd_index];
1298 subdev_info = &vpif_cfg->subdevinfo[sd_index];
1299 }
1300
1301 if (sd) {
1302 input = chan_cfg->outputs[index].input_route;
1303 output = chan_cfg->outputs[index].output_route;
1304 ret = v4l2_subdev_call(sd, video, s_routing, input, output, 0);
1305 if (ret < 0 && ret != -ENOIOCTLCMD) {
1306 vpif_err("Failed to set output\n");
1307 return ret;
1308 }
1225 1309
1310 }
1311 ch->output_idx = index;
1312 ch->sd = sd;
1313 if (chan_cfg->outputs != NULL)
1314 /* update tvnorms from the sub device output info */
1315 ch->video_dev->tvnorms = chan_cfg->outputs[index].output.std;
1226 return 0; 1316 return 0;
1227} 1317}
1228 1318
1229static int vpif_s_output(struct file *file, void *priv, unsigned int i) 1319static int vpif_s_output(struct file *file, void *priv, unsigned int i)
1230{ 1320{
1321 struct vpif_display_config *config = vpif_dev->platform_data;
1322 struct vpif_display_chan_config *chan_cfg;
1231 struct vpif_fh *fh = priv; 1323 struct vpif_fh *fh = priv;
1232 struct channel_obj *ch = fh->channel; 1324 struct channel_obj *ch = fh->channel;
1233 struct video_obj *vid_ch = &ch->video;
1234 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; 1325 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
1235 int ret = 0; 1326
1327 chan_cfg = &config->chan_config[ch->channel_id];
1328
1329 if (i >= chan_cfg->output_count)
1330 return -EINVAL;
1236 1331
1237 if (common->started) { 1332 if (common->started) {
1238 vpif_err("Streaming in progress\n"); 1333 vpif_err("Streaming in progress\n");
1239 return -EBUSY; 1334 return -EBUSY;
1240 } 1335 }
1241 1336
1242 ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, video, 1337 return vpif_set_output(config, ch, i);
1243 s_routing, 0, i, 0);
1244
1245 if (ret < 0)
1246 vpif_err("Failed to set output standard\n");
1247
1248 vid_ch->output_id = i;
1249 return ret;
1250} 1338}
1251 1339
1252static int vpif_g_output(struct file *file, void *priv, unsigned int *i) 1340static int vpif_g_output(struct file *file, void *priv, unsigned int *i)
1253{ 1341{
1254 struct vpif_fh *fh = priv; 1342 struct vpif_fh *fh = priv;
1255 struct channel_obj *ch = fh->channel; 1343 struct channel_obj *ch = fh->channel;
1256 struct video_obj *vid_ch = &ch->video;
1257 1344
1258 *i = vid_ch->output_id; 1345 *i = ch->output_idx;
1259 1346
1260 return 0; 1347 return 0;
1261} 1348}
@@ -1290,10 +1377,12 @@ vpif_enum_dv_timings(struct file *file, void *priv,
1290{ 1377{
1291 struct vpif_fh *fh = priv; 1378 struct vpif_fh *fh = priv;
1292 struct channel_obj *ch = fh->channel; 1379 struct channel_obj *ch = fh->channel;
1293 struct video_obj *vid_ch = &ch->video; 1380 int ret;
1294 1381
1295 return v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id], 1382 ret = v4l2_subdev_call(ch->sd, video, enum_dv_timings, timings);
1296 video, enum_dv_timings, timings); 1383 if (ret == -ENOIOCTLCMD && ret == -ENODEV)
1384 return -EINVAL;
1385 return ret;
1297} 1386}
1298 1387
1299/** 1388/**
@@ -1319,13 +1408,9 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
1319 } 1408 }
1320 1409
1321 /* Configure subdevice timings, if any */ 1410 /* Configure subdevice timings, if any */
1322 ret = v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id], 1411 ret = v4l2_subdev_call(ch->sd, video, s_dv_timings, timings);
1323 video, s_dv_timings, timings); 1412 if (ret == -ENOIOCTLCMD || ret == -ENODEV)
1324 if (ret == -ENOIOCTLCMD) { 1413 ret = 0;
1325 vpif_dbg(2, debug, "Custom DV timings not supported by "
1326 "subdevice\n");
1327 return -EINVAL;
1328 }
1329 if (ret < 0) { 1414 if (ret < 0) {
1330 vpif_dbg(2, debug, "Error setting custom DV timings\n"); 1415 vpif_dbg(2, debug, "Error setting custom DV timings\n");
1331 return ret; 1416 return ret;
@@ -1450,10 +1535,8 @@ static int vpif_dbg_g_register(struct file *file, void *priv,
1450 struct v4l2_dbg_register *reg){ 1535 struct v4l2_dbg_register *reg){
1451 struct vpif_fh *fh = priv; 1536 struct vpif_fh *fh = priv;
1452 struct channel_obj *ch = fh->channel; 1537 struct channel_obj *ch = fh->channel;
1453 struct video_obj *vid_ch = &ch->video;
1454 1538
1455 return v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id], core, 1539 return v4l2_subdev_call(ch->sd, core, g_register, reg);
1456 g_register, reg);
1457} 1540}
1458 1541
1459/* 1542/*
@@ -1469,10 +1552,8 @@ static int vpif_dbg_s_register(struct file *file, void *priv,
1469 struct v4l2_dbg_register *reg){ 1552 struct v4l2_dbg_register *reg){
1470 struct vpif_fh *fh = priv; 1553 struct vpif_fh *fh = priv;
1471 struct channel_obj *ch = fh->channel; 1554 struct channel_obj *ch = fh->channel;
1472 struct video_obj *vid_ch = &ch->video;
1473 1555
1474 return v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id], core, 1556 return v4l2_subdev_call(ch->sd, core, s_register, reg);
1475 s_register, reg);
1476} 1557}
1477#endif 1558#endif
1478 1559
@@ -1536,9 +1617,6 @@ static struct video_device vpif_video_template = {
1536 .name = "vpif", 1617 .name = "vpif",
1537 .fops = &vpif_fops, 1618 .fops = &vpif_fops,
1538 .ioctl_ops = &vpif_ioctl_ops, 1619 .ioctl_ops = &vpif_ioctl_ops,
1539 .tvnorms = VPIF_V4L2_STD,
1540 .current_norm = V4L2_STD_625_50,
1541
1542}; 1620};
1543 1621
1544/*Configure the channels, buffer sizei, request irq */ 1622/*Configure the channels, buffer sizei, request irq */
@@ -1611,7 +1689,8 @@ static __init int vpif_probe(struct platform_device *pdev)
1611{ 1689{
1612 struct vpif_subdev_info *subdevdata; 1690 struct vpif_subdev_info *subdevdata;
1613 struct vpif_display_config *config; 1691 struct vpif_display_config *config;
1614 int i, j = 0, k, q, m, err = 0; 1692 int i, j = 0, k, err = 0;
1693 int res_idx = 0;
1615 struct i2c_adapter *i2c_adap; 1694 struct i2c_adapter *i2c_adap;
1616 struct common_obj *common; 1695 struct common_obj *common;
1617 struct channel_obj *ch; 1696 struct channel_obj *ch;
@@ -1634,21 +1713,22 @@ static __init int vpif_probe(struct platform_device *pdev)
1634 return err; 1713 return err;
1635 } 1714 }
1636 1715
1637 k = 0; 1716 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) {
1638 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, k))) {
1639 for (i = res->start; i <= res->end; i++) { 1717 for (i = res->start; i <= res->end; i++) {
1640 if (request_irq(i, vpif_channel_isr, IRQF_SHARED, 1718 if (request_irq(i, vpif_channel_isr, IRQF_SHARED,
1641 "VPIF_Display", 1719 "VPIF_Display", (void *)
1642 (void *)(&vpif_obj.dev[k]->channel_id))) { 1720 (&vpif_obj.dev[res_idx]->channel_id))) {
1643 err = -EBUSY; 1721 err = -EBUSY;
1722 for (j = 0; j < i; j++)
1723 free_irq(j, (void *)
1724 (&vpif_obj.dev[res_idx]->channel_id));
1644 goto vpif_int_err; 1725 goto vpif_int_err;
1645 } 1726 }
1646 } 1727 }
1647 k++; 1728 res_idx++;
1648 } 1729 }
1649 1730
1650 for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) { 1731 for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
1651
1652 /* Get the pointer to the channel object */ 1732 /* Get the pointer to the channel object */
1653 ch = vpif_obj.dev[i]; 1733 ch = vpif_obj.dev[i];
1654 1734
@@ -1694,6 +1774,32 @@ static __init int vpif_probe(struct platform_device *pdev)
1694 } 1774 }
1695 } 1775 }
1696 1776
1777 i2c_adap = i2c_get_adapter(1);
1778 config = pdev->dev.platform_data;
1779 subdev_count = config->subdev_count;
1780 subdevdata = config->subdevinfo;
1781 vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count,
1782 GFP_KERNEL);
1783 if (vpif_obj.sd == NULL) {
1784 vpif_err("unable to allocate memory for subdevice pointers\n");
1785 err = -ENOMEM;
1786 goto vpif_sd_error;
1787 }
1788
1789 for (i = 0; i < subdev_count; i++) {
1790 vpif_obj.sd[i] = v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
1791 i2c_adap,
1792 &subdevdata[i].board_info,
1793 NULL);
1794 if (!vpif_obj.sd[i]) {
1795 vpif_err("Error registering v4l2 subdevice\n");
1796 goto probe_subdev_out;
1797 }
1798
1799 if (vpif_obj.sd[i])
1800 vpif_obj.sd[i]->grp_id = 1 << i;
1801 }
1802
1697 for (j = 0; j < VPIF_DISPLAY_MAX_DEVICES; j++) { 1803 for (j = 0; j < VPIF_DISPLAY_MAX_DEVICES; j++) {
1698 ch = vpif_obj.dev[j]; 1804 ch = vpif_obj.dev[j];
1699 /* Initialize field of the channel objects */ 1805 /* Initialize field of the channel objects */
@@ -1715,6 +1821,8 @@ static __init int vpif_probe(struct platform_device *pdev)
1715 1821
1716 } 1822 }
1717 ch->initialized = 0; 1823 ch->initialized = 0;
1824 if (subdev_count)
1825 ch->sd = vpif_obj.sd[0];
1718 ch->channel_id = j; 1826 ch->channel_id = j;
1719 if (j < 2) 1827 if (j < 2)
1720 ch->common[VPIF_VIDEO_INDEX].numbuffers = 1828 ch->common[VPIF_VIDEO_INDEX].numbuffers =
@@ -1729,6 +1837,12 @@ static __init int vpif_probe(struct platform_device *pdev)
1729 ch->common[VPIF_VIDEO_INDEX].fmt.type = 1837 ch->common[VPIF_VIDEO_INDEX].fmt.type =
1730 V4L2_BUF_TYPE_VIDEO_OUTPUT; 1838 V4L2_BUF_TYPE_VIDEO_OUTPUT;
1731 ch->video_dev->lock = &common->lock; 1839 ch->video_dev->lock = &common->lock;
1840 video_set_drvdata(ch->video_dev, ch);
1841
1842 /* select output 0 */
1843 err = vpif_set_output(config, ch, 0);
1844 if (err)
1845 goto probe_out;
1732 1846
1733 /* register video device */ 1847 /* register video device */
1734 vpif_dbg(1, debug, "channel=%x,channel->video_dev=%x\n", 1848 vpif_dbg(1, debug, "channel=%x,channel->video_dev=%x\n",
@@ -1738,42 +1852,12 @@ static __init int vpif_probe(struct platform_device *pdev)
1738 VFL_TYPE_GRABBER, (j ? 3 : 2)); 1852 VFL_TYPE_GRABBER, (j ? 3 : 2));
1739 if (err < 0) 1853 if (err < 0)
1740 goto probe_out; 1854 goto probe_out;
1741
1742 video_set_drvdata(ch->video_dev, ch);
1743 }
1744
1745 i2c_adap = i2c_get_adapter(1);
1746 config = pdev->dev.platform_data;
1747 subdev_count = config->subdev_count;
1748 subdevdata = config->subdevinfo;
1749 vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count,
1750 GFP_KERNEL);
1751 if (vpif_obj.sd == NULL) {
1752 vpif_err("unable to allocate memory for subdevice pointers\n");
1753 err = -ENOMEM;
1754 goto probe_out;
1755 }
1756
1757 for (i = 0; i < subdev_count; i++) {
1758 vpif_obj.sd[i] = v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
1759 i2c_adap,
1760 &subdevdata[i].board_info,
1761 NULL);
1762 if (!vpif_obj.sd[i]) {
1763 vpif_err("Error registering v4l2 subdevice\n");
1764 goto probe_subdev_out;
1765 }
1766
1767 if (vpif_obj.sd[i])
1768 vpif_obj.sd[i]->grp_id = 1 << i;
1769 } 1855 }
1770 1856
1771 v4l2_info(&vpif_obj.v4l2_dev, 1857 v4l2_info(&vpif_obj.v4l2_dev,
1772 " VPIF display driver initialized\n"); 1858 " VPIF display driver initialized\n");
1773 return 0; 1859 return 0;
1774 1860
1775probe_subdev_out:
1776 kfree(vpif_obj.sd);
1777probe_out: 1861probe_out:
1778 for (k = 0; k < j; k++) { 1862 for (k = 0; k < j; k++) {
1779 ch = vpif_obj.dev[k]; 1863 ch = vpif_obj.dev[k];
@@ -1781,14 +1865,21 @@ probe_out:
1781 video_device_release(ch->video_dev); 1865 video_device_release(ch->video_dev);
1782 ch->video_dev = NULL; 1866 ch->video_dev = NULL;
1783 } 1867 }
1868probe_subdev_out:
1869 kfree(vpif_obj.sd);
1870vpif_sd_error:
1871 for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
1872 ch = vpif_obj.dev[i];
1873 /* Note: does nothing if ch->video_dev == NULL */
1874 video_device_release(ch->video_dev);
1875 }
1784vpif_int_err: 1876vpif_int_err:
1785 v4l2_device_unregister(&vpif_obj.v4l2_dev); 1877 v4l2_device_unregister(&vpif_obj.v4l2_dev);
1786 vpif_err("VPIF IRQ request failed\n"); 1878 vpif_err("VPIF IRQ request failed\n");
1787 for (q = k; k >= 0; k--) { 1879 for (i = 0; i < res_idx; i++) {
1788 for (m = i; m >= res->start; m--) 1880 res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1789 free_irq(m, (void *)(&vpif_obj.dev[k]->channel_id)); 1881 for (j = res->start; j <= res->end; j++)
1790 res = platform_get_resource(pdev, IORESOURCE_IRQ, k-1); 1882 free_irq(j, (void *)(&vpif_obj.dev[i]->channel_id));
1791 m = res->end;
1792 } 1883 }
1793 1884
1794 return err; 1885 return err;
diff --git a/drivers/media/platform/davinci/vpif_display.h b/drivers/media/platform/davinci/vpif_display.h
index f628ebcf3674..a5a18f74395c 100644
--- a/drivers/media/platform/davinci/vpif_display.h
+++ b/drivers/media/platform/davinci/vpif_display.h
@@ -62,13 +62,6 @@ struct video_obj {
62 v4l2_std_id stdid; /* Currently selected or default 62 v4l2_std_id stdid; /* Currently selected or default
63 * standard */ 63 * standard */
64 struct v4l2_dv_timings dv_timings; 64 struct v4l2_dv_timings dv_timings;
65 u32 output_id; /* Current output id */
66};
67
68struct vbi_obj {
69 int num_services;
70 struct vpif_vbi_params vbiparams; /* vpif parameters for the raw
71 * vbi data */
72}; 65};
73 66
74struct vpif_disp_buffer { 67struct vpif_disp_buffer {
@@ -131,12 +124,13 @@ struct channel_obj {
131 * which is being displayed */ 124 * which is being displayed */
132 u8 initialized; /* flag to indicate whether 125 u8 initialized; /* flag to indicate whether
133 * encoder is initialized */ 126 * encoder is initialized */
127 u32 output_idx; /* Current output index */
128 struct v4l2_subdev *sd; /* Current output subdev(may be NULL) */
134 129
135 enum vpif_channel_id channel_id;/* Identifies channel */ 130 enum vpif_channel_id channel_id;/* Identifies channel */
136 struct vpif_params vpifparams; 131 struct vpif_params vpifparams;
137 struct common_obj common[VPIF_NUMOBJECTS]; 132 struct common_obj common[VPIF_NUMOBJECTS];
138 struct video_obj video; 133 struct video_obj video;
139 struct vbi_obj vbi;
140}; 134};
141 135
142/* File handle structure */ 136/* File handle structure */
@@ -168,12 +162,4 @@ struct vpif_config_params {
168 u8 min_numbuffers; 162 u8 min_numbuffers;
169}; 163};
170 164
171/* Struct which keeps track of the line numbers for the sliced vbi service */
172struct vpif_service_line {
173 u16 service_id;
174 u16 service_line[2];
175 u16 enc_service_id;
176 u8 bytestowrite;
177};
178
179#endif /* DAVINCIHD_DISPLAY_H */ 165#endif /* DAVINCIHD_DISPLAY_H */
diff --git a/drivers/media/platform/exynos-gsc/gsc-regs.c b/drivers/media/platform/exynos-gsc/gsc-regs.c
index 0d8625f03a32..0146b354dc22 100644
--- a/drivers/media/platform/exynos-gsc/gsc-regs.c
+++ b/drivers/media/platform/exynos-gsc/gsc-regs.c
@@ -212,7 +212,7 @@ void gsc_hw_set_in_image_format(struct gsc_ctx *ctx)
212 else 212 else
213 cfg |= GSC_IN_YUV422_3P; 213 cfg |= GSC_IN_YUV422_3P;
214 break; 214 break;
215 }; 215 }
216 216
217 writel(cfg, dev->regs + GSC_IN_CON); 217 writel(cfg, dev->regs + GSC_IN_CON);
218} 218}
@@ -332,7 +332,7 @@ void gsc_hw_set_out_image_format(struct gsc_ctx *ctx)
332 case 3: 332 case 3:
333 cfg |= GSC_OUT_YUV420_3P; 333 cfg |= GSC_OUT_YUV420_3P;
334 break; 334 break;
335 }; 335 }
336 336
337end_set: 337end_set:
338 writel(cfg, dev->regs + GSC_OUT_CON); 338 writel(cfg, dev->regs + GSC_OUT_CON);
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index 897250b88647..31ac4dc69247 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -864,7 +864,7 @@ int vidioc_s_fbuf(struct file *file, void *priv, const struct v4l2_framebuffer *
864{ 864{
865 struct viu_fh *fh = priv; 865 struct viu_fh *fh = priv;
866 struct viu_dev *dev = fh->dev; 866 struct viu_dev *dev = fh->dev;
867 struct v4l2_framebuffer *fb = arg; 867 const struct v4l2_framebuffer *fb = arg;
868 struct viu_fmt *fmt; 868 struct viu_fmt *fmt;
869 869
870 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO)) 870 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
diff --git a/drivers/media/platform/mem2mem_testdev.c b/drivers/media/platform/mem2mem_testdev.c
index d03637537118..2e2121e98133 100644
--- a/drivers/media/platform/mem2mem_testdev.c
+++ b/drivers/media/platform/mem2mem_testdev.c
@@ -397,8 +397,7 @@ static void device_isr(unsigned long priv)
397 curr_ctx = v4l2_m2m_get_curr_priv(m2mtest_dev->m2m_dev); 397 curr_ctx = v4l2_m2m_get_curr_priv(m2mtest_dev->m2m_dev);
398 398
399 if (NULL == curr_ctx) { 399 if (NULL == curr_ctx) {
400 printk(KERN_ERR 400 pr_err("Instance released before the end of transaction\n");
401 "Instance released before the end of transaction\n");
402 return; 401 return;
403 } 402 }
404 403
@@ -894,7 +893,7 @@ static int m2mtest_open(struct file *file)
894 893
895 if (mutex_lock_interruptible(&dev->dev_mutex)) 894 if (mutex_lock_interruptible(&dev->dev_mutex))
896 return -ERESTARTSYS; 895 return -ERESTARTSYS;
897 ctx = kzalloc(sizeof *ctx, GFP_KERNEL); 896 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
898 if (!ctx) { 897 if (!ctx) {
899 rc = -ENOMEM; 898 rc = -ENOMEM;
900 goto open_unlock; 899 goto open_unlock;
@@ -1020,7 +1019,7 @@ static int m2mtest_probe(struct platform_device *pdev)
1020 struct video_device *vfd; 1019 struct video_device *vfd;
1021 int ret; 1020 int ret;
1022 1021
1023 dev = kzalloc(sizeof *dev, GFP_KERNEL); 1022 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1024 if (!dev) 1023 if (!dev)
1025 return -ENOMEM; 1024 return -ENOMEM;
1026 1025
@@ -1028,7 +1027,7 @@ static int m2mtest_probe(struct platform_device *pdev)
1028 1027
1029 ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); 1028 ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
1030 if (ret) 1029 if (ret)
1031 goto free_dev; 1030 return ret;
1032 1031
1033 atomic_set(&dev->num_inst, 0); 1032 atomic_set(&dev->num_inst, 0);
1034 mutex_init(&dev->dev_mutex); 1033 mutex_init(&dev->dev_mutex);
@@ -1067,15 +1066,13 @@ static int m2mtest_probe(struct platform_device *pdev)
1067 1066
1068 return 0; 1067 return 0;
1069 1068
1070 v4l2_m2m_release(dev->m2m_dev);
1071err_m2m: 1069err_m2m:
1070 v4l2_m2m_release(dev->m2m_dev);
1072 video_unregister_device(dev->vfd); 1071 video_unregister_device(dev->vfd);
1073rel_vdev: 1072rel_vdev:
1074 video_device_release(vfd); 1073 video_device_release(vfd);
1075unreg_dev: 1074unreg_dev:
1076 v4l2_device_unregister(&dev->v4l2_dev); 1075 v4l2_device_unregister(&dev->v4l2_dev);
1077free_dev:
1078 kfree(dev);
1079 1076
1080 return ret; 1077 return ret;
1081} 1078}
@@ -1090,7 +1087,6 @@ static int m2mtest_remove(struct platform_device *pdev)
1090 del_timer_sync(&dev->timer); 1087 del_timer_sync(&dev->timer);
1091 video_unregister_device(dev->vfd); 1088 video_unregister_device(dev->vfd);
1092 v4l2_device_unregister(&dev->v4l2_dev); 1089 v4l2_device_unregister(&dev->v4l2_dev);
1093 kfree(dev);
1094 1090
1095 return 0; 1091 return 0;
1096} 1092}
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 134016f0e660..a3b1a34c896d 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -455,11 +455,15 @@ static int omapvid_init(struct omap_vout_device *vout, u32 addr)
455 455
456 win = &vout->win; 456 win = &vout->win;
457 for (i = 0; i < ovid->num_overlays; i++) { 457 for (i = 0; i < ovid->num_overlays; i++) {
458 struct omap_dss_device *dssdev;
459
458 ovl = ovid->overlays[i]; 460 ovl = ovid->overlays[i];
459 if (!ovl->manager || !ovl->manager->device) 461 dssdev = ovl->get_device(ovl);
462
463 if (!dssdev)
460 return -EINVAL; 464 return -EINVAL;
461 465
462 timing = &ovl->manager->device->panel.timings; 466 timing = &dssdev->panel.timings;
463 467
464 outw = win->w.width; 468 outw = win->w.width;
465 outh = win->w.height; 469 outh = win->w.height;
@@ -516,8 +520,11 @@ static int omapvid_apply_changes(struct omap_vout_device *vout)
516 struct omapvideo_info *ovid = &vout->vid_info; 520 struct omapvideo_info *ovid = &vout->vid_info;
517 521
518 for (i = 0; i < ovid->num_overlays; i++) { 522 for (i = 0; i < ovid->num_overlays; i++) {
523 struct omap_dss_device *dssdev;
524
519 ovl = ovid->overlays[i]; 525 ovl = ovid->overlays[i];
520 if (!ovl->manager || !ovl->manager->device) 526 dssdev = ovl->get_device(ovl);
527 if (!dssdev)
521 return -EINVAL; 528 return -EINVAL;
522 ovl->manager->apply(ovl->manager); 529 ovl->manager->apply(ovl->manager);
523 } 530 }
@@ -580,12 +587,14 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus)
580 587
581 ovid = &vout->vid_info; 588 ovid = &vout->vid_info;
582 ovl = ovid->overlays[0]; 589 ovl = ovid->overlays[0];
583 /* get the display device attached to the overlay */
584 if (!ovl->manager || !ovl->manager->device)
585 return;
586 590
587 mgr_id = ovl->manager->id; 591 mgr_id = ovl->manager->id;
588 cur_display = ovl->manager->device; 592
593 /* get the display device attached to the overlay */
594 cur_display = ovl->get_device(ovl);
595
596 if (!cur_display)
597 return;
589 598
590 spin_lock(&vout->vbq_lock); 599 spin_lock(&vout->vbq_lock);
591 do_gettimeofday(&timevalue); 600 do_gettimeofday(&timevalue);
@@ -949,7 +958,9 @@ static int omap_vout_release(struct file *file)
949 /* Disable all the overlay managers connected with this interface */ 958 /* Disable all the overlay managers connected with this interface */
950 for (i = 0; i < ovid->num_overlays; i++) { 959 for (i = 0; i < ovid->num_overlays; i++) {
951 struct omap_overlay *ovl = ovid->overlays[i]; 960 struct omap_overlay *ovl = ovid->overlays[i];
952 if (ovl->manager && ovl->manager->device) 961 struct omap_dss_device *dssdev = ovl->get_device(ovl);
962
963 if (dssdev)
953 ovl->disable(ovl); 964 ovl->disable(ovl);
954 } 965 }
955 /* Turn off the pipeline */ 966 /* Turn off the pipeline */
@@ -1082,14 +1093,17 @@ static int vidioc_try_fmt_vid_out(struct file *file, void *fh,
1082 struct omapvideo_info *ovid; 1093 struct omapvideo_info *ovid;
1083 struct omap_video_timings *timing; 1094 struct omap_video_timings *timing;
1084 struct omap_vout_device *vout = fh; 1095 struct omap_vout_device *vout = fh;
1096 struct omap_dss_device *dssdev;
1085 1097
1086 ovid = &vout->vid_info; 1098 ovid = &vout->vid_info;
1087 ovl = ovid->overlays[0]; 1099 ovl = ovid->overlays[0];
1100 /* get the display device attached to the overlay */
1101 dssdev = ovl->get_device(ovl);
1088 1102
1089 if (!ovl->manager || !ovl->manager->device) 1103 if (!dssdev)
1090 return -EINVAL; 1104 return -EINVAL;
1091 /* get the display device attached to the overlay */ 1105
1092 timing = &ovl->manager->device->panel.timings; 1106 timing = &dssdev->panel.timings;
1093 1107
1094 vout->fbuf.fmt.height = timing->y_res; 1108 vout->fbuf.fmt.height = timing->y_res;
1095 vout->fbuf.fmt.width = timing->x_res; 1109 vout->fbuf.fmt.width = timing->x_res;
@@ -1106,6 +1120,7 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
1106 struct omapvideo_info *ovid; 1120 struct omapvideo_info *ovid;
1107 struct omap_video_timings *timing; 1121 struct omap_video_timings *timing;
1108 struct omap_vout_device *vout = fh; 1122 struct omap_vout_device *vout = fh;
1123 struct omap_dss_device *dssdev;
1109 1124
1110 if (vout->streaming) 1125 if (vout->streaming)
1111 return -EBUSY; 1126 return -EBUSY;
@@ -1114,13 +1129,14 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
1114 1129
1115 ovid = &vout->vid_info; 1130 ovid = &vout->vid_info;
1116 ovl = ovid->overlays[0]; 1131 ovl = ovid->overlays[0];
1132 dssdev = ovl->get_device(ovl);
1117 1133
1118 /* get the display device attached to the overlay */ 1134 /* get the display device attached to the overlay */
1119 if (!ovl->manager || !ovl->manager->device) { 1135 if (!dssdev) {
1120 ret = -EINVAL; 1136 ret = -EINVAL;
1121 goto s_fmt_vid_out_exit; 1137 goto s_fmt_vid_out_exit;
1122 } 1138 }
1123 timing = &ovl->manager->device->panel.timings; 1139 timing = &dssdev->panel.timings;
1124 1140
1125 /* We dont support RGB24-packed mode if vrfb rotation 1141 /* We dont support RGB24-packed mode if vrfb rotation
1126 * is enabled*/ 1142 * is enabled*/
@@ -1299,6 +1315,7 @@ static int vidioc_s_crop(struct file *file, void *fh, const struct v4l2_crop *cr
1299 struct omapvideo_info *ovid; 1315 struct omapvideo_info *ovid;
1300 struct omap_overlay *ovl; 1316 struct omap_overlay *ovl;
1301 struct omap_video_timings *timing; 1317 struct omap_video_timings *timing;
1318 struct omap_dss_device *dssdev;
1302 1319
1303 if (vout->streaming) 1320 if (vout->streaming)
1304 return -EBUSY; 1321 return -EBUSY;
@@ -1306,13 +1323,15 @@ static int vidioc_s_crop(struct file *file, void *fh, const struct v4l2_crop *cr
1306 mutex_lock(&vout->lock); 1323 mutex_lock(&vout->lock);
1307 ovid = &vout->vid_info; 1324 ovid = &vout->vid_info;
1308 ovl = ovid->overlays[0]; 1325 ovl = ovid->overlays[0];
1326 /* get the display device attached to the overlay */
1327 dssdev = ovl->get_device(ovl);
1309 1328
1310 if (!ovl->manager || !ovl->manager->device) { 1329 if (!dssdev) {
1311 ret = -EINVAL; 1330 ret = -EINVAL;
1312 goto s_crop_err; 1331 goto s_crop_err;
1313 } 1332 }
1314 /* get the display device attached to the overlay */ 1333
1315 timing = &ovl->manager->device->panel.timings; 1334 timing = &dssdev->panel.timings;
1316 1335
1317 if (is_rotation_90_or_270(vout)) { 1336 if (is_rotation_90_or_270(vout)) {
1318 vout->fbuf.fmt.height = timing->x_res; 1337 vout->fbuf.fmt.height = timing->x_res;
@@ -1668,7 +1687,7 @@ static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
1668 for (j = 0; j < ovid->num_overlays; j++) { 1687 for (j = 0; j < ovid->num_overlays; j++) {
1669 struct omap_overlay *ovl = ovid->overlays[j]; 1688 struct omap_overlay *ovl = ovid->overlays[j];
1670 1689
1671 if (ovl->manager && ovl->manager->device) { 1690 if (ovl->get_device(ovl)) {
1672 struct omap_overlay_info info; 1691 struct omap_overlay_info info;
1673 ovl->get_overlay_info(ovl, &info); 1692 ovl->get_overlay_info(ovl, &info);
1674 info.paddr = addr; 1693 info.paddr = addr;
@@ -1691,8 +1710,9 @@ static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
1691 1710
1692 for (j = 0; j < ovid->num_overlays; j++) { 1711 for (j = 0; j < ovid->num_overlays; j++) {
1693 struct omap_overlay *ovl = ovid->overlays[j]; 1712 struct omap_overlay *ovl = ovid->overlays[j];
1713 struct omap_dss_device *dssdev = ovl->get_device(ovl);
1694 1714
1695 if (ovl->manager && ovl->manager->device) { 1715 if (dssdev) {
1696 ret = ovl->enable(ovl); 1716 ret = ovl->enable(ovl);
1697 if (ret) 1717 if (ret)
1698 goto streamon_err1; 1718 goto streamon_err1;
@@ -1727,8 +1747,9 @@ static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
1727 1747
1728 for (j = 0; j < ovid->num_overlays; j++) { 1748 for (j = 0; j < ovid->num_overlays; j++) {
1729 struct omap_overlay *ovl = ovid->overlays[j]; 1749 struct omap_overlay *ovl = ovid->overlays[j];
1750 struct omap_dss_device *dssdev = ovl->get_device(ovl);
1730 1751
1731 if (ovl->manager && ovl->manager->device) 1752 if (dssdev)
1732 ovl->disable(ovl); 1753 ovl->disable(ovl);
1733 } 1754 }
1734 1755
@@ -1891,8 +1912,8 @@ static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
1891 struct video_device *vfd; 1912 struct video_device *vfd;
1892 struct v4l2_pix_format *pix; 1913 struct v4l2_pix_format *pix;
1893 struct v4l2_control *control; 1914 struct v4l2_control *control;
1894 struct omap_dss_device *display = 1915 struct omap_overlay *ovl = vout->vid_info.overlays[0];
1895 vout->vid_info.overlays[0]->manager->device; 1916 struct omap_dss_device *display = ovl->get_device(ovl);
1896 1917
1897 /* set the default pix */ 1918 /* set the default pix */
1898 pix = &vout->pix; 1919 pix = &vout->pix;
@@ -2207,8 +2228,10 @@ static int __init omap_vout_probe(struct platform_device *pdev)
2207 */ 2228 */
2208 for (i = 1; i < vid_dev->num_overlays; i++) { 2229 for (i = 1; i < vid_dev->num_overlays; i++) {
2209 ovl = omap_dss_get_overlay(i); 2230 ovl = omap_dss_get_overlay(i);
2210 if (ovl->manager && ovl->manager->device) { 2231 dssdev = ovl->get_device(ovl);
2211 def_display = ovl->manager->device; 2232
2233 if (dssdev) {
2234 def_display = dssdev;
2212 } else { 2235 } else {
2213 dev_warn(&pdev->dev, "cannot find display\n"); 2236 dev_warn(&pdev->dev, "cannot find display\n");
2214 def_display = NULL; 2237 def_display = NULL;
@@ -2255,8 +2278,10 @@ probe_err1:
2255 for (i = 1; i < vid_dev->num_overlays; i++) { 2278 for (i = 1; i < vid_dev->num_overlays; i++) {
2256 def_display = NULL; 2279 def_display = NULL;
2257 ovl = omap_dss_get_overlay(i); 2280 ovl = omap_dss_get_overlay(i);
2258 if (ovl->manager && ovl->manager->device) 2281 dssdev = ovl->get_device(ovl);
2259 def_display = ovl->manager->device; 2282
2283 if (dssdev)
2284 def_display = dssdev;
2260 2285
2261 if (def_display && def_display->driver) 2286 if (def_display && def_display->driver)
2262 def_display->driver->disable(def_display); 2287 def_display->driver->disable(def_display);
diff --git a/drivers/media/platform/omap3isp/ispreg.h b/drivers/media/platform/omap3isp/ispreg.h
index 084ea77d65a7..e2c57f334c5d 100644
--- a/drivers/media/platform/omap3isp/ispreg.h
+++ b/drivers/media/platform/omap3isp/ispreg.h
@@ -27,13 +27,13 @@
27#ifndef OMAP3_ISP_REG_H 27#ifndef OMAP3_ISP_REG_H
28#define OMAP3_ISP_REG_H 28#define OMAP3_ISP_REG_H
29 29
30#include <plat/omap34xx.h>
31
32
33#define CM_CAM_MCLK_HZ 172800000 /* Hz */ 30#define CM_CAM_MCLK_HZ 172800000 /* Hz */
34 31
35/* ISP Submodules offset */ 32/* ISP Submodules offset */
36 33
34#define L4_34XX_BASE 0x48000000
35#define OMAP3430_ISP_BASE (L4_34XX_BASE + 0xBC000)
36
37#define OMAP3ISP_REG_BASE OMAP3430_ISP_BASE 37#define OMAP3ISP_REG_BASE OMAP3430_ISP_BASE
38#define OMAP3ISP_REG(offset) (OMAP3ISP_REG_BASE + (offset)) 38#define OMAP3ISP_REG(offset) (OMAP3ISP_REG_BASE + (offset))
39 39
diff --git a/drivers/media/platform/s5p-fimc/fimc-capture.c b/drivers/media/platform/s5p-fimc/fimc-capture.c
index dded98815220..367efd164d0f 100644
--- a/drivers/media/platform/s5p-fimc/fimc-capture.c
+++ b/drivers/media/platform/s5p-fimc/fimc-capture.c
@@ -177,7 +177,9 @@ static int fimc_capture_config_update(struct fimc_ctx *ctx)
177 177
178void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf) 178void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf)
179{ 179{
180 struct v4l2_subdev *csis = fimc->pipeline.subdevs[IDX_CSIS];
180 struct fimc_vid_cap *cap = &fimc->vid_cap; 181 struct fimc_vid_cap *cap = &fimc->vid_cap;
182 struct fimc_frame *f = &cap->ctx->d_frame;
181 struct fimc_vid_buffer *v_buf; 183 struct fimc_vid_buffer *v_buf;
182 struct timeval *tv; 184 struct timeval *tv;
183 struct timespec ts; 185 struct timespec ts;
@@ -216,6 +218,25 @@ void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf)
216 if (++cap->buf_index >= FIMC_MAX_OUT_BUFS) 218 if (++cap->buf_index >= FIMC_MAX_OUT_BUFS)
217 cap->buf_index = 0; 219 cap->buf_index = 0;
218 } 220 }
221 /*
222 * Set up a buffer at MIPI-CSIS if current image format
223 * requires the frame embedded data capture.
224 */
225 if (f->fmt->mdataplanes && !list_empty(&cap->active_buf_q)) {
226 unsigned int plane = ffs(f->fmt->mdataplanes) - 1;
227 unsigned int size = f->payload[plane];
228 s32 index = fimc_hw_get_frame_index(fimc);
229 void *vaddr;
230
231 list_for_each_entry(v_buf, &cap->active_buf_q, list) {
232 if (v_buf->index != index)
233 continue;
234 vaddr = vb2_plane_vaddr(&v_buf->vb, plane);
235 v4l2_subdev_call(csis, video, s_rx_buffer,
236 vaddr, &size);
237 break;
238 }
239 }
219 240
220 if (cap->active_buf_cnt == 0) { 241 if (cap->active_buf_cnt == 0) {
221 if (deq_buf) 242 if (deq_buf)
@@ -351,6 +372,8 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
351 unsigned int size = (wh * fmt->depth[i]) / 8; 372 unsigned int size = (wh * fmt->depth[i]) / 8;
352 if (pixm) 373 if (pixm)
353 sizes[i] = max(size, pixm->plane_fmt[i].sizeimage); 374 sizes[i] = max(size, pixm->plane_fmt[i].sizeimage);
375 else if (fimc_fmt_is_user_defined(fmt->color))
376 sizes[i] = frame->payload[i];
354 else 377 else
355 sizes[i] = max_t(u32, size, frame->payload[i]); 378 sizes[i] = max_t(u32, size, frame->payload[i]);
356 379
@@ -611,10 +634,10 @@ static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
611 u32 mask = FMT_FLAGS_CAM; 634 u32 mask = FMT_FLAGS_CAM;
612 struct fimc_fmt *ffmt; 635 struct fimc_fmt *ffmt;
613 636
614 /* Color conversion from/to JPEG is not supported */ 637 /* Conversion from/to JPEG or User Defined format is not supported */
615 if (code && ctx->s_frame.fmt && pad == FIMC_SD_PAD_SOURCE && 638 if (code && ctx->s_frame.fmt && pad == FIMC_SD_PAD_SOURCE &&
616 fimc_fmt_is_jpeg(ctx->s_frame.fmt->color)) 639 fimc_fmt_is_user_defined(ctx->s_frame.fmt->color))
617 *code = V4L2_MBUS_FMT_JPEG_1X8; 640 *code = ctx->s_frame.fmt->mbus_code;
618 641
619 if (fourcc && *fourcc != V4L2_PIX_FMT_JPEG && pad != FIMC_SD_PAD_SINK) 642 if (fourcc && *fourcc != V4L2_PIX_FMT_JPEG && pad != FIMC_SD_PAD_SINK)
620 mask |= FMT_FLAGS_M2M; 643 mask |= FMT_FLAGS_M2M;
@@ -628,18 +651,19 @@ static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
628 *fourcc = ffmt->fourcc; 651 *fourcc = ffmt->fourcc;
629 652
630 if (pad == FIMC_SD_PAD_SINK) { 653 if (pad == FIMC_SD_PAD_SINK) {
631 max_w = fimc_fmt_is_jpeg(ffmt->color) ? 654 max_w = fimc_fmt_is_user_defined(ffmt->color) ?
632 pl->scaler_dis_w : pl->scaler_en_w; 655 pl->scaler_dis_w : pl->scaler_en_w;
633 /* Apply the camera input interface pixel constraints */ 656 /* Apply the camera input interface pixel constraints */
634 v4l_bound_align_image(width, max_t(u32, *width, 32), max_w, 4, 657 v4l_bound_align_image(width, max_t(u32, *width, 32), max_w, 4,
635 height, max_t(u32, *height, 32), 658 height, max_t(u32, *height, 32),
636 FIMC_CAMIF_MAX_HEIGHT, 659 FIMC_CAMIF_MAX_HEIGHT,
637 fimc_fmt_is_jpeg(ffmt->color) ? 3 : 1, 660 fimc_fmt_is_user_defined(ffmt->color) ?
661 3 : 1,
638 0); 662 0);
639 return ffmt; 663 return ffmt;
640 } 664 }
641 /* Can't scale or crop in transparent (JPEG) transfer mode */ 665 /* Can't scale or crop in transparent (JPEG) transfer mode */
642 if (fimc_fmt_is_jpeg(ffmt->color)) { 666 if (fimc_fmt_is_user_defined(ffmt->color)) {
643 *width = ctx->s_frame.f_width; 667 *width = ctx->s_frame.f_width;
644 *height = ctx->s_frame.f_height; 668 *height = ctx->s_frame.f_height;
645 return ffmt; 669 return ffmt;
@@ -684,7 +708,7 @@ static void fimc_capture_try_selection(struct fimc_ctx *ctx,
684 u32 max_sc_h, max_sc_v; 708 u32 max_sc_h, max_sc_v;
685 709
686 /* In JPEG transparent transfer mode cropping is not supported */ 710 /* In JPEG transparent transfer mode cropping is not supported */
687 if (fimc_fmt_is_jpeg(ctx->d_frame.fmt->color)) { 711 if (fimc_fmt_is_user_defined(ctx->d_frame.fmt->color)) {
688 r->width = sink->f_width; 712 r->width = sink->f_width;
689 r->height = sink->f_height; 713 r->height = sink->f_height;
690 r->left = r->top = 0; 714 r->left = r->top = 0;
@@ -847,6 +871,48 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
847 return 0; 871 return 0;
848} 872}
849 873
874/**
875 * fimc_get_sensor_frame_desc - query the sensor for media bus frame parameters
876 * @sensor: pointer to the sensor subdev
877 * @plane_fmt: provides plane sizes corresponding to the frame layout entries
878 * @try: true to set the frame parameters, false to query only
879 *
880 * This function is used by this driver only for compressed/blob data formats.
881 */
882static int fimc_get_sensor_frame_desc(struct v4l2_subdev *sensor,
883 struct v4l2_plane_pix_format *plane_fmt,
884 unsigned int num_planes, bool try)
885{
886 struct v4l2_mbus_frame_desc fd;
887 int i, ret;
888
889 for (i = 0; i < num_planes; i++)
890 fd.entry[i].length = plane_fmt[i].sizeimage;
891
892 if (try)
893 ret = v4l2_subdev_call(sensor, pad, set_frame_desc, 0, &fd);
894 else
895 ret = v4l2_subdev_call(sensor, pad, get_frame_desc, 0, &fd);
896
897 if (ret < 0)
898 return ret;
899
900 if (num_planes != fd.num_entries)
901 return -EINVAL;
902
903 for (i = 0; i < num_planes; i++)
904 plane_fmt[i].sizeimage = fd.entry[i].length;
905
906 if (fd.entry[0].length > FIMC_MAX_JPEG_BUF_SIZE) {
907 v4l2_err(sensor->v4l2_dev, "Unsupported buffer size: %u\n",
908 fd.entry[0].length);
909
910 return -EINVAL;
911 }
912
913 return 0;
914}
915
850static int fimc_cap_g_fmt_mplane(struct file *file, void *fh, 916static int fimc_cap_g_fmt_mplane(struct file *file, void *fh,
851 struct v4l2_format *f) 917 struct v4l2_format *f)
852{ 918{
@@ -865,7 +931,7 @@ static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
865 struct v4l2_mbus_framefmt mf; 931 struct v4l2_mbus_framefmt mf;
866 struct fimc_fmt *ffmt = NULL; 932 struct fimc_fmt *ffmt = NULL;
867 933
868 if (pix->pixelformat == V4L2_PIX_FMT_JPEG) { 934 if (fimc_jpeg_fourcc(pix->pixelformat)) {
869 fimc_capture_try_format(ctx, &pix->width, &pix->height, 935 fimc_capture_try_format(ctx, &pix->width, &pix->height,
870 NULL, &pix->pixelformat, 936 NULL, &pix->pixelformat,
871 FIMC_SD_PAD_SINK); 937 FIMC_SD_PAD_SINK);
@@ -879,25 +945,32 @@ static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
879 return -EINVAL; 945 return -EINVAL;
880 946
881 if (!fimc->vid_cap.user_subdev_api) { 947 if (!fimc->vid_cap.user_subdev_api) {
882 mf.width = pix->width; 948 mf.width = pix->width;
883 mf.height = pix->height; 949 mf.height = pix->height;
884 mf.code = ffmt->mbus_code; 950 mf.code = ffmt->mbus_code;
885 fimc_md_graph_lock(fimc); 951 fimc_md_graph_lock(fimc);
886 fimc_pipeline_try_format(ctx, &mf, &ffmt, false); 952 fimc_pipeline_try_format(ctx, &mf, &ffmt, false);
887 fimc_md_graph_unlock(fimc); 953 fimc_md_graph_unlock(fimc);
888 954 pix->width = mf.width;
889 pix->width = mf.width; 955 pix->height = mf.height;
890 pix->height = mf.height;
891 if (ffmt) 956 if (ffmt)
892 pix->pixelformat = ffmt->fourcc; 957 pix->pixelformat = ffmt->fourcc;
893 } 958 }
894 959
895 fimc_adjust_mplane_format(ffmt, pix->width, pix->height, pix); 960 fimc_adjust_mplane_format(ffmt, pix->width, pix->height, pix);
961
962 if (ffmt->flags & FMT_FLAGS_COMPRESSED)
963 fimc_get_sensor_frame_desc(fimc->pipeline.subdevs[IDX_SENSOR],
964 pix->plane_fmt, ffmt->memplanes, true);
965
896 return 0; 966 return 0;
897} 967}
898 968
899static void fimc_capture_mark_jpeg_xfer(struct fimc_ctx *ctx, bool jpeg) 969static void fimc_capture_mark_jpeg_xfer(struct fimc_ctx *ctx,
970 enum fimc_color_fmt color)
900{ 971{
972 bool jpeg = fimc_fmt_is_user_defined(color);
973
901 ctx->scaler.enabled = !jpeg; 974 ctx->scaler.enabled = !jpeg;
902 fimc_ctrls_activate(ctx, !jpeg); 975 fimc_ctrls_activate(ctx, !jpeg);
903 976
@@ -920,7 +993,7 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
920 return -EBUSY; 993 return -EBUSY;
921 994
922 /* Pre-configure format at camera interface input, for JPEG only */ 995 /* Pre-configure format at camera interface input, for JPEG only */
923 if (pix->pixelformat == V4L2_PIX_FMT_JPEG) { 996 if (fimc_jpeg_fourcc(pix->pixelformat)) {
924 fimc_capture_try_format(ctx, &pix->width, &pix->height, 997 fimc_capture_try_format(ctx, &pix->width, &pix->height,
925 NULL, &pix->pixelformat, 998 NULL, &pix->pixelformat,
926 FIMC_SD_PAD_SINK); 999 FIMC_SD_PAD_SINK);
@@ -953,7 +1026,16 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
953 } 1026 }
954 1027
955 fimc_adjust_mplane_format(ff->fmt, pix->width, pix->height, pix); 1028 fimc_adjust_mplane_format(ff->fmt, pix->width, pix->height, pix);
956 for (i = 0; i < ff->fmt->colplanes; i++) 1029
1030 if (ff->fmt->flags & FMT_FLAGS_COMPRESSED) {
1031 ret = fimc_get_sensor_frame_desc(fimc->pipeline.subdevs[IDX_SENSOR],
1032 pix->plane_fmt, ff->fmt->memplanes,
1033 true);
1034 if (ret < 0)
1035 return ret;
1036 }
1037
1038 for (i = 0; i < ff->fmt->memplanes; i++)
957 ff->payload[i] = pix->plane_fmt[i].sizeimage; 1039 ff->payload[i] = pix->plane_fmt[i].sizeimage;
958 1040
959 set_frame_bounds(ff, pix->width, pix->height); 1041 set_frame_bounds(ff, pix->width, pix->height);
@@ -961,7 +1043,7 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
961 if (!(ctx->state & FIMC_COMPOSE)) 1043 if (!(ctx->state & FIMC_COMPOSE))
962 set_frame_crop(ff, 0, 0, pix->width, pix->height); 1044 set_frame_crop(ff, 0, 0, pix->width, pix->height);
963 1045
964 fimc_capture_mark_jpeg_xfer(ctx, fimc_fmt_is_jpeg(ff->fmt->color)); 1046 fimc_capture_mark_jpeg_xfer(ctx, ff->fmt->color);
965 1047
966 /* Reset cropping and set format at the camera interface input */ 1048 /* Reset cropping and set format at the camera interface input */
967 if (!fimc->vid_cap.user_subdev_api) { 1049 if (!fimc->vid_cap.user_subdev_api) {
@@ -1063,6 +1145,23 @@ static int fimc_pipeline_validate(struct fimc_dev *fimc)
1063 src_fmt.format.height != sink_fmt.format.height || 1145 src_fmt.format.height != sink_fmt.format.height ||
1064 src_fmt.format.code != sink_fmt.format.code) 1146 src_fmt.format.code != sink_fmt.format.code)
1065 return -EPIPE; 1147 return -EPIPE;
1148
1149 if (sd == fimc->pipeline.subdevs[IDX_SENSOR] &&
1150 fimc_user_defined_mbus_fmt(src_fmt.format.code)) {
1151 struct v4l2_plane_pix_format plane_fmt[FIMC_MAX_PLANES];
1152 struct fimc_frame *frame = &vid_cap->ctx->d_frame;
1153 unsigned int i;
1154
1155 ret = fimc_get_sensor_frame_desc(sd, plane_fmt,
1156 frame->fmt->memplanes,
1157 false);
1158 if (ret < 0)
1159 return -EPIPE;
1160
1161 for (i = 0; i < frame->fmt->memplanes; i++)
1162 if (frame->payload[i] < plane_fmt[i].sizeimage)
1163 return -EPIPE;
1164 }
1066 } 1165 }
1067 return 0; 1166 return 0;
1068} 1167}
@@ -1424,7 +1523,7 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
1424 /* Update RGB Alpha control state and value range */ 1523 /* Update RGB Alpha control state and value range */
1425 fimc_alpha_ctrl_update(ctx); 1524 fimc_alpha_ctrl_update(ctx);
1426 1525
1427 fimc_capture_mark_jpeg_xfer(ctx, fimc_fmt_is_jpeg(ffmt->color)); 1526 fimc_capture_mark_jpeg_xfer(ctx, ffmt->color);
1428 1527
1429 ff = fmt->pad == FIMC_SD_PAD_SINK ? 1528 ff = fmt->pad == FIMC_SD_PAD_SINK ?
1430 &ctx->s_frame : &ctx->d_frame; 1529 &ctx->s_frame : &ctx->d_frame;
diff --git a/drivers/media/platform/s5p-fimc/fimc-core.c b/drivers/media/platform/s5p-fimc/fimc-core.c
index 1a445404e73d..8d0d2b94a135 100644
--- a/drivers/media/platform/s5p-fimc/fimc-core.c
+++ b/drivers/media/platform/s5p-fimc/fimc-core.c
@@ -184,7 +184,17 @@ static struct fimc_fmt fimc_formats[] = {
184 .memplanes = 1, 184 .memplanes = 1,
185 .colplanes = 1, 185 .colplanes = 1,
186 .mbus_code = V4L2_MBUS_FMT_JPEG_1X8, 186 .mbus_code = V4L2_MBUS_FMT_JPEG_1X8,
187 .flags = FMT_FLAGS_CAM, 187 .flags = FMT_FLAGS_CAM | FMT_FLAGS_COMPRESSED,
188 }, {
189 .name = "S5C73MX interleaved UYVY/JPEG",
190 .fourcc = V4L2_PIX_FMT_S5C_UYVY_JPG,
191 .color = FIMC_FMT_YUYV_JPEG,
192 .depth = { 8 },
193 .memplanes = 2,
194 .colplanes = 1,
195 .mdataplanes = 0x2, /* plane 1 holds frame meta data */
196 .mbus_code = V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8,
197 .flags = FMT_FLAGS_CAM | FMT_FLAGS_COMPRESSED,
188 }, 198 },
189}; 199};
190 200
@@ -371,7 +381,7 @@ int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
371 default: 381 default:
372 return -EINVAL; 382 return -EINVAL;
373 } 383 }
374 } else { 384 } else if (!frame->fmt->mdataplanes) {
375 if (frame->fmt->memplanes >= 2) 385 if (frame->fmt->memplanes >= 2)
376 paddr->cb = vb2_dma_contig_plane_dma_addr(vb, 1); 386 paddr->cb = vb2_dma_contig_plane_dma_addr(vb, 1);
377 387
@@ -698,6 +708,11 @@ int fimc_fill_format(struct fimc_frame *frame, struct v4l2_format *f)
698 if (frame->fmt->colplanes == 1) /* packed formats */ 708 if (frame->fmt->colplanes == 1) /* packed formats */
699 bpl = (bpl * frame->fmt->depth[0]) / 8; 709 bpl = (bpl * frame->fmt->depth[0]) / 8;
700 pixm->plane_fmt[i].bytesperline = bpl; 710 pixm->plane_fmt[i].bytesperline = bpl;
711
712 if (frame->fmt->flags & FMT_FLAGS_COMPRESSED) {
713 pixm->plane_fmt[i].sizeimage = frame->payload[i];
714 continue;
715 }
701 pixm->plane_fmt[i].sizeimage = (frame->o_width * 716 pixm->plane_fmt[i].sizeimage = (frame->o_width *
702 frame->o_height * frame->fmt->depth[i]) / 8; 717 frame->o_height * frame->fmt->depth[i]) / 8;
703 } 718 }
diff --git a/drivers/media/platform/s5p-fimc/fimc-core.h b/drivers/media/platform/s5p-fimc/fimc-core.h
index cd716ba6015f..c0040d792499 100644
--- a/drivers/media/platform/s5p-fimc/fimc-core.h
+++ b/drivers/media/platform/s5p-fimc/fimc-core.h
@@ -40,6 +40,8 @@
40#define SCALER_MAX_VRATIO 64 40#define SCALER_MAX_VRATIO 64
41#define DMA_MIN_SIZE 8 41#define DMA_MIN_SIZE 8
42#define FIMC_CAMIF_MAX_HEIGHT 0x2000 42#define FIMC_CAMIF_MAX_HEIGHT 0x2000
43#define FIMC_MAX_JPEG_BUF_SIZE (10 * SZ_1M)
44#define FIMC_MAX_PLANES 3
43 45
44/* indices to the clocks array */ 46/* indices to the clocks array */
45enum { 47enum {
@@ -83,7 +85,7 @@ enum fimc_datapath {
83}; 85};
84 86
85enum fimc_color_fmt { 87enum fimc_color_fmt {
86 FIMC_FMT_RGB444 = 0x10, 88 FIMC_FMT_RGB444 = 0x10,
87 FIMC_FMT_RGB555, 89 FIMC_FMT_RGB555,
88 FIMC_FMT_RGB565, 90 FIMC_FMT_RGB565,
89 FIMC_FMT_RGB666, 91 FIMC_FMT_RGB666,
@@ -95,14 +97,15 @@ enum fimc_color_fmt {
95 FIMC_FMT_CBYCRY422, 97 FIMC_FMT_CBYCRY422,
96 FIMC_FMT_CRYCBY422, 98 FIMC_FMT_CRYCBY422,
97 FIMC_FMT_YCBCR444_LOCAL, 99 FIMC_FMT_YCBCR444_LOCAL,
98 FIMC_FMT_JPEG = 0x40, 100 FIMC_FMT_RAW8 = 0x40,
99 FIMC_FMT_RAW8 = 0x80,
100 FIMC_FMT_RAW10, 101 FIMC_FMT_RAW10,
101 FIMC_FMT_RAW12, 102 FIMC_FMT_RAW12,
103 FIMC_FMT_JPEG = 0x80,
104 FIMC_FMT_YUYV_JPEG = 0x100,
102}; 105};
103 106
107#define fimc_fmt_is_user_defined(x) (!!((x) & 0x180))
104#define fimc_fmt_is_rgb(x) (!!((x) & 0x10)) 108#define fimc_fmt_is_rgb(x) (!!((x) & 0x10))
105#define fimc_fmt_is_jpeg(x) (!!((x) & 0x40))
106 109
107#define IS_M2M(__strt) ((__strt) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE || \ 110#define IS_M2M(__strt) ((__strt) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE || \
108 __strt == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) 111 __strt == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
@@ -139,6 +142,7 @@ enum fimc_color_fmt {
139 * @memplanes: number of physically non-contiguous data planes 142 * @memplanes: number of physically non-contiguous data planes
140 * @colplanes: number of physically contiguous data planes 143 * @colplanes: number of physically contiguous data planes
141 * @depth: per plane driver's private 'number of bits per pixel' 144 * @depth: per plane driver's private 'number of bits per pixel'
145 * @mdataplanes: bitmask indicating meta data plane(s), (1 << plane_no)
142 * @flags: flags indicating which operation mode format applies to 146 * @flags: flags indicating which operation mode format applies to
143 */ 147 */
144struct fimc_fmt { 148struct fimc_fmt {
@@ -149,12 +153,14 @@ struct fimc_fmt {
149 u16 memplanes; 153 u16 memplanes;
150 u16 colplanes; 154 u16 colplanes;
151 u8 depth[VIDEO_MAX_PLANES]; 155 u8 depth[VIDEO_MAX_PLANES];
156 u16 mdataplanes;
152 u16 flags; 157 u16 flags;
153#define FMT_FLAGS_CAM (1 << 0) 158#define FMT_FLAGS_CAM (1 << 0)
154#define FMT_FLAGS_M2M_IN (1 << 1) 159#define FMT_FLAGS_M2M_IN (1 << 1)
155#define FMT_FLAGS_M2M_OUT (1 << 2) 160#define FMT_FLAGS_M2M_OUT (1 << 2)
156#define FMT_FLAGS_M2M (1 << 1 | 1 << 2) 161#define FMT_FLAGS_M2M (1 << 1 | 1 << 2)
157#define FMT_HAS_ALPHA (1 << 3) 162#define FMT_HAS_ALPHA (1 << 3)
163#define FMT_FLAGS_COMPRESSED (1 << 4)
158}; 164};
159 165
160/** 166/**
@@ -272,7 +278,7 @@ struct fimc_frame {
272 u32 offs_v; 278 u32 offs_v;
273 u32 width; 279 u32 width;
274 u32 height; 280 u32 height;
275 unsigned long payload[VIDEO_MAX_PLANES]; 281 unsigned int payload[VIDEO_MAX_PLANES];
276 struct fimc_addr paddr; 282 struct fimc_addr paddr;
277 struct fimc_dma_offset dma_offset; 283 struct fimc_dma_offset dma_offset;
278 struct fimc_fmt *fmt; 284 struct fimc_fmt *fmt;
@@ -577,6 +583,18 @@ static inline int tiled_fmt(struct fimc_fmt *fmt)
577 return fmt->fourcc == V4L2_PIX_FMT_NV12MT; 583 return fmt->fourcc == V4L2_PIX_FMT_NV12MT;
578} 584}
579 585
586static inline bool fimc_jpeg_fourcc(u32 pixelformat)
587{
588 return (pixelformat == V4L2_PIX_FMT_JPEG ||
589 pixelformat == V4L2_PIX_FMT_S5C_UYVY_JPG);
590}
591
592static inline bool fimc_user_defined_mbus_fmt(u32 code)
593{
594 return (code == V4L2_MBUS_FMT_JPEG_1X8 ||
595 code == V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8);
596}
597
580/* Return the alpha component bit mask */ 598/* Return the alpha component bit mask */
581static inline int fimc_get_alpha_mask(struct fimc_fmt *fmt) 599static inline int fimc_get_alpha_mask(struct fimc_fmt *fmt)
582{ 600{
diff --git a/drivers/media/platform/s5p-fimc/fimc-m2m.c b/drivers/media/platform/s5p-fimc/fimc-m2m.c
index 6b71d953fd15..4500e44f6857 100644
--- a/drivers/media/platform/s5p-fimc/fimc-m2m.c
+++ b/drivers/media/platform/s5p-fimc/fimc-m2m.c
@@ -551,30 +551,31 @@ static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
551 return 0; 551 return 0;
552} 552}
553 553
554static int fimc_m2m_s_crop(struct file *file, void *fh, const struct v4l2_crop *cr) 554static int fimc_m2m_s_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
555{ 555{
556 struct fimc_ctx *ctx = fh_to_ctx(fh); 556 struct fimc_ctx *ctx = fh_to_ctx(fh);
557 struct fimc_dev *fimc = ctx->fimc_dev; 557 struct fimc_dev *fimc = ctx->fimc_dev;
558 struct v4l2_crop cr = *crop;
558 struct fimc_frame *f; 559 struct fimc_frame *f;
559 int ret; 560 int ret;
560 561
561 ret = fimc_m2m_try_crop(ctx, cr); 562 ret = fimc_m2m_try_crop(ctx, &cr);
562 if (ret) 563 if (ret)
563 return ret; 564 return ret;
564 565
565 f = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ? 566 f = (cr.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
566 &ctx->s_frame : &ctx->d_frame; 567 &ctx->s_frame : &ctx->d_frame;
567 568
568 /* Check to see if scaling ratio is within supported range */ 569 /* Check to see if scaling ratio is within supported range */
569 if (fimc_ctx_state_is_set(FIMC_DST_FMT | FIMC_SRC_FMT, ctx)) { 570 if (fimc_ctx_state_is_set(FIMC_DST_FMT | FIMC_SRC_FMT, ctx)) {
570 if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { 571 if (cr.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
571 ret = fimc_check_scaler_ratio(ctx, cr->c.width, 572 ret = fimc_check_scaler_ratio(ctx, cr.c.width,
572 cr->c.height, ctx->d_frame.width, 573 cr.c.height, ctx->d_frame.width,
573 ctx->d_frame.height, ctx->rotation); 574 ctx->d_frame.height, ctx->rotation);
574 } else { 575 } else {
575 ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width, 576 ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
576 ctx->s_frame.height, cr->c.width, 577 ctx->s_frame.height, cr.c.width,
577 cr->c.height, ctx->rotation); 578 cr.c.height, ctx->rotation);
578 } 579 }
579 if (ret) { 580 if (ret) {
580 v4l2_err(&fimc->m2m.vfd, "Out of scaler range\n"); 581 v4l2_err(&fimc->m2m.vfd, "Out of scaler range\n");
@@ -582,10 +583,10 @@ static int fimc_m2m_s_crop(struct file *file, void *fh, const struct v4l2_crop *
582 } 583 }
583 } 584 }
584 585
585 f->offs_h = cr->c.left; 586 f->offs_h = cr.c.left;
586 f->offs_v = cr->c.top; 587 f->offs_v = cr.c.top;
587 f->width = cr->c.width; 588 f->width = cr.c.width;
588 f->height = cr->c.height; 589 f->height = cr.c.height;
589 590
590 fimc_ctx_state_set(FIMC_PARAMS, ctx); 591 fimc_ctx_state_set(FIMC_PARAMS, ctx);
591 592
diff --git a/drivers/media/platform/s5p-fimc/fimc-reg.c b/drivers/media/platform/s5p-fimc/fimc-reg.c
index 783408fd7d56..2c9d0c06c9e8 100644
--- a/drivers/media/platform/s5p-fimc/fimc-reg.c
+++ b/drivers/media/platform/s5p-fimc/fimc-reg.c
@@ -625,7 +625,7 @@ int fimc_hw_set_camera_source(struct fimc_dev *fimc,
625 cfg |= FIMC_REG_CISRCFMT_ITU601_16BIT; 625 cfg |= FIMC_REG_CISRCFMT_ITU601_16BIT;
626 } /* else defaults to ITU-R BT.656 8-bit */ 626 } /* else defaults to ITU-R BT.656 8-bit */
627 } else if (cam->bus_type == FIMC_MIPI_CSI2) { 627 } else if (cam->bus_type == FIMC_MIPI_CSI2) {
628 if (fimc_fmt_is_jpeg(f->fmt->color)) 628 if (fimc_fmt_is_user_defined(f->fmt->color))
629 cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT; 629 cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT;
630 } 630 }
631 631
@@ -680,6 +680,7 @@ int fimc_hw_set_camera_type(struct fimc_dev *fimc,
680 tmp = FIMC_REG_CSIIMGFMT_YCBCR422_8BIT; 680 tmp = FIMC_REG_CSIIMGFMT_YCBCR422_8BIT;
681 break; 681 break;
682 case V4L2_MBUS_FMT_JPEG_1X8: 682 case V4L2_MBUS_FMT_JPEG_1X8:
683 case V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8:
683 tmp = FIMC_REG_CSIIMGFMT_USER(1); 684 tmp = FIMC_REG_CSIIMGFMT_USER(1);
684 cfg |= FIMC_REG_CIGCTRL_CAM_JPEG; 685 cfg |= FIMC_REG_CIGCTRL_CAM_JPEG;
685 break; 686 break;
@@ -744,13 +745,13 @@ void fimc_hw_dis_capture(struct fimc_dev *dev)
744} 745}
745 746
746/* Return an index to the buffer actually being written. */ 747/* Return an index to the buffer actually being written. */
747u32 fimc_hw_get_frame_index(struct fimc_dev *dev) 748s32 fimc_hw_get_frame_index(struct fimc_dev *dev)
748{ 749{
749 u32 reg; 750 s32 reg;
750 751
751 if (dev->variant->has_cistatus2) { 752 if (dev->variant->has_cistatus2) {
752 reg = readl(dev->regs + FIMC_REG_CISTATUS2) & 0x3F; 753 reg = readl(dev->regs + FIMC_REG_CISTATUS2) & 0x3f;
753 return reg > 0 ? --reg : reg; 754 return reg - 1;
754 } 755 }
755 756
756 reg = readl(dev->regs + FIMC_REG_CISTATUS); 757 reg = readl(dev->regs + FIMC_REG_CISTATUS);
@@ -759,6 +760,18 @@ u32 fimc_hw_get_frame_index(struct fimc_dev *dev)
759 FIMC_REG_CISTATUS_FRAMECNT_SHIFT; 760 FIMC_REG_CISTATUS_FRAMECNT_SHIFT;
760} 761}
761 762
763/* Return an index to the buffer being written previously. */
764s32 fimc_hw_get_prev_frame_index(struct fimc_dev *dev)
765{
766 s32 reg;
767
768 if (!dev->variant->has_cistatus2)
769 return -1;
770
771 reg = readl(dev->regs + FIMC_REG_CISTATUS2);
772 return ((reg >> 7) & 0x3f) - 1;
773}
774
762/* Locking: the caller holds fimc->slock */ 775/* Locking: the caller holds fimc->slock */
763void fimc_activate_capture(struct fimc_ctx *ctx) 776void fimc_activate_capture(struct fimc_ctx *ctx)
764{ 777{
diff --git a/drivers/media/platform/s5p-fimc/fimc-reg.h b/drivers/media/platform/s5p-fimc/fimc-reg.h
index 579ac8ac03de..b6abfc7b72ac 100644
--- a/drivers/media/platform/s5p-fimc/fimc-reg.h
+++ b/drivers/media/platform/s5p-fimc/fimc-reg.h
@@ -307,7 +307,8 @@ void fimc_hw_clear_irq(struct fimc_dev *dev);
307void fimc_hw_enable_scaler(struct fimc_dev *dev, bool on); 307void fimc_hw_enable_scaler(struct fimc_dev *dev, bool on);
308void fimc_hw_activate_input_dma(struct fimc_dev *dev, bool on); 308void fimc_hw_activate_input_dma(struct fimc_dev *dev, bool on);
309void fimc_hw_dis_capture(struct fimc_dev *dev); 309void fimc_hw_dis_capture(struct fimc_dev *dev);
310u32 fimc_hw_get_frame_index(struct fimc_dev *dev); 310s32 fimc_hw_get_frame_index(struct fimc_dev *dev);
311s32 fimc_hw_get_prev_frame_index(struct fimc_dev *dev);
311void fimc_activate_capture(struct fimc_ctx *ctx); 312void fimc_activate_capture(struct fimc_ctx *ctx);
312void fimc_deactivate_capture(struct fimc_dev *fimc); 313void fimc_deactivate_capture(struct fimc_dev *fimc);
313 314
diff --git a/drivers/media/platform/s5p-fimc/mipi-csis.c b/drivers/media/platform/s5p-fimc/mipi-csis.c
index e92236ac5cfe..4c961b1b68e6 100644
--- a/drivers/media/platform/s5p-fimc/mipi-csis.c
+++ b/drivers/media/platform/s5p-fimc/mipi-csis.c
@@ -2,7 +2,7 @@
2 * Samsung S5P/EXYNOS4 SoC series MIPI-CSI receiver driver 2 * Samsung S5P/EXYNOS4 SoC series MIPI-CSI receiver driver
3 * 3 *
4 * Copyright (C) 2011 - 2012 Samsung Electronics Co., Ltd. 4 * Copyright (C) 2011 - 2012 Samsung Electronics Co., Ltd.
5 * Sylwester Nawrocki, <s.nawrocki@samsung.com> 5 * Sylwester Nawrocki <s.nawrocki@samsung.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -98,6 +98,11 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)");
98#define CSIS_MAX_PIX_WIDTH 0xffff 98#define CSIS_MAX_PIX_WIDTH 0xffff
99#define CSIS_MAX_PIX_HEIGHT 0xffff 99#define CSIS_MAX_PIX_HEIGHT 0xffff
100 100
101/* Non-image packet data buffers */
102#define S5PCSIS_PKTDATA_ODD 0x2000
103#define S5PCSIS_PKTDATA_EVEN 0x3000
104#define S5PCSIS_PKTDATA_SIZE SZ_4K
105
101enum { 106enum {
102 CSIS_CLK_MUX, 107 CSIS_CLK_MUX,
103 CSIS_CLK_GATE, 108 CSIS_CLK_GATE,
@@ -110,8 +115,8 @@ static char *csi_clock_name[] = {
110#define NUM_CSIS_CLOCKS ARRAY_SIZE(csi_clock_name) 115#define NUM_CSIS_CLOCKS ARRAY_SIZE(csi_clock_name)
111 116
112static const char * const csis_supply_name[] = { 117static const char * const csis_supply_name[] = {
113 "vdd11", /* 1.1V or 1.2V (s5pc100) MIPI CSI suppply */ 118 "vddcore", /* CSIS Core (1.0V, 1.1V or 1.2V) suppply */
114 "vdd18", /* VDD 1.8V and MIPI CSI PLL supply */ 119 "vddio", /* CSIS I/O and PLL (1.8V) supply */
115}; 120};
116#define CSIS_NUM_SUPPLIES ARRAY_SIZE(csis_supply_name) 121#define CSIS_NUM_SUPPLIES ARRAY_SIZE(csis_supply_name)
117 122
@@ -144,12 +149,18 @@ static const struct s5pcsis_event s5pcsis_events[] = {
144}; 149};
145#define S5PCSIS_NUM_EVENTS ARRAY_SIZE(s5pcsis_events) 150#define S5PCSIS_NUM_EVENTS ARRAY_SIZE(s5pcsis_events)
146 151
152struct csis_pktbuf {
153 u32 *data;
154 unsigned int len;
155};
156
147/** 157/**
148 * struct csis_state - the driver's internal state data structure 158 * struct csis_state - the driver's internal state data structure
149 * @lock: mutex serializing the subdev and power management operations, 159 * @lock: mutex serializing the subdev and power management operations,
150 * protecting @format and @flags members 160 * protecting @format and @flags members
151 * @pads: CSIS pads array 161 * @pads: CSIS pads array
152 * @sd: v4l2_subdev associated with CSIS device instance 162 * @sd: v4l2_subdev associated with CSIS device instance
163 * @index: the hardware instance index
153 * @pdev: CSIS platform device 164 * @pdev: CSIS platform device
154 * @regs: mmaped I/O registers memory 165 * @regs: mmaped I/O registers memory
155 * @supplies: CSIS regulator supplies 166 * @supplies: CSIS regulator supplies
@@ -159,12 +170,14 @@ static const struct s5pcsis_event s5pcsis_events[] = {
159 * @csis_fmt: current CSIS pixel format 170 * @csis_fmt: current CSIS pixel format
160 * @format: common media bus format for the source and sink pad 171 * @format: common media bus format for the source and sink pad
161 * @slock: spinlock protecting structure members below 172 * @slock: spinlock protecting structure members below
173 * @pkt_buf: the frame embedded (non-image) data buffer
162 * @events: MIPI-CSIS event (error) counters 174 * @events: MIPI-CSIS event (error) counters
163 */ 175 */
164struct csis_state { 176struct csis_state {
165 struct mutex lock; 177 struct mutex lock;
166 struct media_pad pads[CSIS_PADS_NUM]; 178 struct media_pad pads[CSIS_PADS_NUM];
167 struct v4l2_subdev sd; 179 struct v4l2_subdev sd;
180 u8 index;
168 struct platform_device *pdev; 181 struct platform_device *pdev;
169 void __iomem *regs; 182 void __iomem *regs;
170 struct regulator_bulk_data supplies[CSIS_NUM_SUPPLIES]; 183 struct regulator_bulk_data supplies[CSIS_NUM_SUPPLIES];
@@ -175,6 +188,7 @@ struct csis_state {
175 struct v4l2_mbus_framefmt format; 188 struct v4l2_mbus_framefmt format;
176 189
177 struct spinlock slock; 190 struct spinlock slock;
191 struct csis_pktbuf pkt_buf;
178 struct s5pcsis_event events[S5PCSIS_NUM_EVENTS]; 192 struct s5pcsis_event events[S5PCSIS_NUM_EVENTS];
179}; 193};
180 194
@@ -202,7 +216,11 @@ static const struct csis_pix_format s5pcsis_formats[] = {
202 .code = V4L2_MBUS_FMT_JPEG_1X8, 216 .code = V4L2_MBUS_FMT_JPEG_1X8,
203 .fmt_reg = S5PCSIS_CFG_FMT_USER(1), 217 .fmt_reg = S5PCSIS_CFG_FMT_USER(1),
204 .data_alignment = 32, 218 .data_alignment = 32,
205 }, 219 }, {
220 .code = V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8,
221 .fmt_reg = S5PCSIS_CFG_FMT_USER(1),
222 .data_alignment = 32,
223 }
206}; 224};
207 225
208#define s5pcsis_write(__csis, __r, __v) writel(__v, __csis->regs + __r) 226#define s5pcsis_write(__csis, __r, __v) writel(__v, __csis->regs + __r)
@@ -266,7 +284,7 @@ static void __s5pcsis_set_format(struct csis_state *state)
266 struct v4l2_mbus_framefmt *mf = &state->format; 284 struct v4l2_mbus_framefmt *mf = &state->format;
267 u32 val; 285 u32 val;
268 286
269 v4l2_dbg(1, debug, &state->sd, "fmt: %d, %d x %d\n", 287 v4l2_dbg(1, debug, &state->sd, "fmt: %#x, %d x %d\n",
270 mf->code, mf->width, mf->height); 288 mf->code, mf->width, mf->height);
271 289
272 /* Color format */ 290 /* Color format */
@@ -304,8 +322,10 @@ static void s5pcsis_set_params(struct csis_state *state)
304 val |= S5PCSIS_CTRL_ALIGN_32BIT; 322 val |= S5PCSIS_CTRL_ALIGN_32BIT;
305 else /* 24-bits */ 323 else /* 24-bits */
306 val &= ~S5PCSIS_CTRL_ALIGN_32BIT; 324 val &= ~S5PCSIS_CTRL_ALIGN_32BIT;
307 /* Not using external clock. */ 325
308 val &= ~S5PCSIS_CTRL_WCLK_EXTCLK; 326 val &= ~S5PCSIS_CTRL_WCLK_EXTCLK;
327 if (pdata->wclk_source)
328 val |= S5PCSIS_CTRL_WCLK_EXTCLK;
309 s5pcsis_write(state, S5PCSIS_CTRL, val); 329 s5pcsis_write(state, S5PCSIS_CTRL, val);
310 330
311 /* Update the shadow register. */ 331 /* Update the shadow register. */
@@ -529,6 +549,22 @@ static int s5pcsis_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
529 return 0; 549 return 0;
530} 550}
531 551
552static int s5pcsis_s_rx_buffer(struct v4l2_subdev *sd, void *buf,
553 unsigned int *size)
554{
555 struct csis_state *state = sd_to_csis_state(sd);
556 unsigned long flags;
557
558 *size = min_t(unsigned int, *size, S5PCSIS_PKTDATA_SIZE);
559
560 spin_lock_irqsave(&state->slock, flags);
561 state->pkt_buf.data = buf;
562 state->pkt_buf.len = *size;
563 spin_unlock_irqrestore(&state->slock, flags);
564
565 return 0;
566}
567
532static int s5pcsis_log_status(struct v4l2_subdev *sd) 568static int s5pcsis_log_status(struct v4l2_subdev *sd)
533{ 569{
534 struct csis_state *state = sd_to_csis_state(sd); 570 struct csis_state *state = sd_to_csis_state(sd);
@@ -566,6 +602,7 @@ static struct v4l2_subdev_pad_ops s5pcsis_pad_ops = {
566}; 602};
567 603
568static struct v4l2_subdev_video_ops s5pcsis_video_ops = { 604static struct v4l2_subdev_video_ops s5pcsis_video_ops = {
605 .s_rx_buffer = s5pcsis_s_rx_buffer,
569 .s_stream = s5pcsis_s_stream, 606 .s_stream = s5pcsis_s_stream,
570}; 607};
571 608
@@ -578,13 +615,26 @@ static struct v4l2_subdev_ops s5pcsis_subdev_ops = {
578static irqreturn_t s5pcsis_irq_handler(int irq, void *dev_id) 615static irqreturn_t s5pcsis_irq_handler(int irq, void *dev_id)
579{ 616{
580 struct csis_state *state = dev_id; 617 struct csis_state *state = dev_id;
618 struct csis_pktbuf *pktbuf = &state->pkt_buf;
581 unsigned long flags; 619 unsigned long flags;
582 u32 status; 620 u32 status;
583 621
584 status = s5pcsis_read(state, S5PCSIS_INTSRC); 622 status = s5pcsis_read(state, S5PCSIS_INTSRC);
585
586 spin_lock_irqsave(&state->slock, flags); 623 spin_lock_irqsave(&state->slock, flags);
587 624
625 if ((status & S5PCSIS_INTSRC_NON_IMAGE_DATA) && pktbuf->data) {
626 u32 offset;
627
628 if (status & S5PCSIS_INTSRC_EVEN)
629 offset = S5PCSIS_PKTDATA_EVEN;
630 else
631 offset = S5PCSIS_PKTDATA_ODD;
632
633 memcpy(pktbuf->data, state->regs + offset, pktbuf->len);
634 pktbuf->data = NULL;
635 rmb();
636 }
637
588 /* Update the event/error counters */ 638 /* Update the event/error counters */
589 if ((status & S5PCSIS_INTSRC_ERRORS) || debug) { 639 if ((status & S5PCSIS_INTSRC_ERRORS) || debug) {
590 int i; 640 int i;
@@ -620,14 +670,15 @@ static int __devinit s5pcsis_probe(struct platform_device *pdev)
620 spin_lock_init(&state->slock); 670 spin_lock_init(&state->slock);
621 671
622 state->pdev = pdev; 672 state->pdev = pdev;
673 state->index = max(0, pdev->id);
623 674
624 pdata = pdev->dev.platform_data; 675 pdata = pdev->dev.platform_data;
625 if (pdata == NULL || pdata->phy_enable == NULL) { 676 if (pdata == NULL) {
626 dev_err(&pdev->dev, "Platform data not fully specified\n"); 677 dev_err(&pdev->dev, "Platform data not fully specified\n");
627 return -EINVAL; 678 return -EINVAL;
628 } 679 }
629 680
630 if ((pdev->id == 1 && pdata->lanes > CSIS1_MAX_LANES) || 681 if ((state->index == 1 && pdata->lanes > CSIS1_MAX_LANES) ||
631 pdata->lanes > CSIS0_MAX_LANES) { 682 pdata->lanes > CSIS0_MAX_LANES) {
632 dev_err(&pdev->dev, "Unsupported number of data lanes: %d\n", 683 dev_err(&pdev->dev, "Unsupported number of data lanes: %d\n",
633 pdata->lanes); 684 pdata->lanes);
@@ -710,7 +761,6 @@ e_clkput:
710 761
711static int s5pcsis_pm_suspend(struct device *dev, bool runtime) 762static int s5pcsis_pm_suspend(struct device *dev, bool runtime)
712{ 763{
713 struct s5p_platform_mipi_csis *pdata = dev->platform_data;
714 struct platform_device *pdev = to_platform_device(dev); 764 struct platform_device *pdev = to_platform_device(dev);
715 struct v4l2_subdev *sd = platform_get_drvdata(pdev); 765 struct v4l2_subdev *sd = platform_get_drvdata(pdev);
716 struct csis_state *state = sd_to_csis_state(sd); 766 struct csis_state *state = sd_to_csis_state(sd);
@@ -722,7 +772,7 @@ static int s5pcsis_pm_suspend(struct device *dev, bool runtime)
722 mutex_lock(&state->lock); 772 mutex_lock(&state->lock);
723 if (state->flags & ST_POWERED) { 773 if (state->flags & ST_POWERED) {
724 s5pcsis_stop_stream(state); 774 s5pcsis_stop_stream(state);
725 ret = pdata->phy_enable(state->pdev, false); 775 ret = s5p_csis_phy_enable(state->index, false);
726 if (ret) 776 if (ret)
727 goto unlock; 777 goto unlock;
728 ret = regulator_bulk_disable(CSIS_NUM_SUPPLIES, 778 ret = regulator_bulk_disable(CSIS_NUM_SUPPLIES,
@@ -741,7 +791,6 @@ static int s5pcsis_pm_suspend(struct device *dev, bool runtime)
741 791
742static int s5pcsis_pm_resume(struct device *dev, bool runtime) 792static int s5pcsis_pm_resume(struct device *dev, bool runtime)
743{ 793{
744 struct s5p_platform_mipi_csis *pdata = dev->platform_data;
745 struct platform_device *pdev = to_platform_device(dev); 794 struct platform_device *pdev = to_platform_device(dev);
746 struct v4l2_subdev *sd = platform_get_drvdata(pdev); 795 struct v4l2_subdev *sd = platform_get_drvdata(pdev);
747 struct csis_state *state = sd_to_csis_state(sd); 796 struct csis_state *state = sd_to_csis_state(sd);
@@ -759,7 +808,7 @@ static int s5pcsis_pm_resume(struct device *dev, bool runtime)
759 state->supplies); 808 state->supplies);
760 if (ret) 809 if (ret)
761 goto unlock; 810 goto unlock;
762 ret = pdata->phy_enable(state->pdev, true); 811 ret = s5p_csis_phy_enable(state->index, true);
763 if (!ret) { 812 if (!ret) {
764 state->flags |= ST_POWERED; 813 state->flags |= ST_POWERED;
765 } else { 814 } else {
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 1e3b9dd014c0..1bfbc325836b 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -507,7 +507,7 @@ static int vidioc_g_crop(struct file *file, void *prv, struct v4l2_crop *cr)
507 return 0; 507 return 0;
508} 508}
509 509
510static int vidioc_try_crop(struct file *file, void *prv, struct v4l2_crop *cr) 510static int vidioc_try_crop(struct file *file, void *prv, const struct v4l2_crop *cr)
511{ 511{
512 struct g2d_ctx *ctx = prv; 512 struct g2d_ctx *ctx = prv;
513 struct g2d_dev *dev = ctx->dev; 513 struct g2d_dev *dev = ctx->dev;
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 394775ae5774..17983c4c9a9a 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1353,7 +1353,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
1353 return ret; 1353 return ret;
1354 } 1354 }
1355 dev_dbg(&pdev->dev, "clock source %p\n", jpeg->clk); 1355 dev_dbg(&pdev->dev, "clock source %p\n", jpeg->clk);
1356 clk_enable(jpeg->clk); 1356 clk_prepare_enable(jpeg->clk);
1357 1357
1358 /* v4l2 device */ 1358 /* v4l2 device */
1359 ret = v4l2_device_register(&pdev->dev, &jpeg->v4l2_dev); 1359 ret = v4l2_device_register(&pdev->dev, &jpeg->v4l2_dev);
@@ -1460,7 +1460,7 @@ device_register_rollback:
1460 v4l2_device_unregister(&jpeg->v4l2_dev); 1460 v4l2_device_unregister(&jpeg->v4l2_dev);
1461 1461
1462clk_get_rollback: 1462clk_get_rollback:
1463 clk_disable(jpeg->clk); 1463 clk_disable_unprepare(jpeg->clk);
1464 clk_put(jpeg->clk); 1464 clk_put(jpeg->clk);
1465 1465
1466 return ret; 1466 return ret;
@@ -1480,7 +1480,7 @@ static int s5p_jpeg_remove(struct platform_device *pdev)
1480 v4l2_m2m_release(jpeg->m2m_dev); 1480 v4l2_m2m_release(jpeg->m2m_dev);
1481 v4l2_device_unregister(&jpeg->v4l2_dev); 1481 v4l2_device_unregister(&jpeg->v4l2_dev);
1482 1482
1483 clk_disable(jpeg->clk); 1483 clk_disable_unprepare(jpeg->clk);
1484 clk_put(jpeg->clk); 1484 clk_put(jpeg->clk);
1485 1485
1486 return 0; 1486 return 0;
diff --git a/drivers/media/platform/s5p-mfc/Makefile b/drivers/media/platform/s5p-mfc/Makefile
index d0663409af00..379008c6d09a 100644
--- a/drivers/media/platform/s5p-mfc/Makefile
+++ b/drivers/media/platform/s5p-mfc/Makefile
@@ -1,5 +1,6 @@
1obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) := s5p-mfc.o 1obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) := s5p-mfc.o
2s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o s5p_mfc_opr.o 2s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o
3s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o 3s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o
4s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_cmd.o 4s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_pm.o
5s5p-mfc-y += s5p_mfc_pm.o s5p_mfc_shm.o 5s5p-mfc-y += s5p_mfc_opr.o s5p_mfc_opr_v5.o s5p_mfc_opr_v6.o
6s5p-mfc-y += s5p_mfc_cmd.o s5p_mfc_cmd_v5.o s5p_mfc_cmd_v6.o
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v6.h b/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
new file mode 100644
index 000000000000..363a97cc7681
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
@@ -0,0 +1,408 @@
1/*
2 * Register definition file for Samsung MFC V6.x Interface (FIMV) driver
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _REGS_FIMV_V6_H
13#define _REGS_FIMV_V6_H
14
15#include <linux/kernel.h>
16#include <linux/sizes.h>
17
18#define S5P_FIMV_REG_SIZE_V6 (S5P_FIMV_END_ADDR - S5P_FIMV_START_ADDR)
19#define S5P_FIMV_REG_COUNT_V6 ((S5P_FIMV_END_ADDR - S5P_FIMV_START_ADDR) / 4)
20
21/* Number of bits that the buffer address should be shifted for particular
22 * MFC buffers. */
23#define S5P_FIMV_MEM_OFFSET_V6 0
24
25#define S5P_FIMV_START_ADDR_V6 0x0000
26#define S5P_FIMV_END_ADDR_V6 0xfd80
27
28#define S5P_FIMV_REG_CLEAR_BEGIN_V6 0xf000
29#define S5P_FIMV_REG_CLEAR_COUNT_V6 1024
30
31/* Codec Common Registers */
32#define S5P_FIMV_RISC_ON_V6 0x0000
33#define S5P_FIMV_RISC2HOST_INT_V6 0x003C
34#define S5P_FIMV_HOST2RISC_INT_V6 0x0044
35#define S5P_FIMV_RISC_BASE_ADDRESS_V6 0x0054
36
37#define S5P_FIMV_MFC_RESET_V6 0x1070
38
39#define S5P_FIMV_HOST2RISC_CMD_V6 0x1100
40#define S5P_FIMV_H2R_CMD_EMPTY_V6 0
41#define S5P_FIMV_H2R_CMD_SYS_INIT_V6 1
42#define S5P_FIMV_H2R_CMD_OPEN_INSTANCE_V6 2
43#define S5P_FIMV_CH_SEQ_HEADER_V6 3
44#define S5P_FIMV_CH_INIT_BUFS_V6 4
45#define S5P_FIMV_CH_FRAME_START_V6 5
46#define S5P_FIMV_H2R_CMD_CLOSE_INSTANCE_V6 6
47#define S5P_FIMV_H2R_CMD_SLEEP_V6 7
48#define S5P_FIMV_H2R_CMD_WAKEUP_V6 8
49#define S5P_FIMV_CH_LAST_FRAME_V6 9
50#define S5P_FIMV_H2R_CMD_FLUSH_V6 10
51/* RMVME: REALLOC used? */
52#define S5P_FIMV_CH_FRAME_START_REALLOC_V6 5
53
54#define S5P_FIMV_RISC2HOST_CMD_V6 0x1104
55#define S5P_FIMV_R2H_CMD_EMPTY_V6 0
56#define S5P_FIMV_R2H_CMD_SYS_INIT_RET_V6 1
57#define S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET_V6 2
58#define S5P_FIMV_R2H_CMD_SEQ_DONE_RET_V6 3
59#define S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET_V6 4
60
61#define S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET_V6 6
62#define S5P_FIMV_R2H_CMD_SLEEP_RET_V6 7
63#define S5P_FIMV_R2H_CMD_WAKEUP_RET_V6 8
64#define S5P_FIMV_R2H_CMD_COMPLETE_SEQ_RET_V6 9
65#define S5P_FIMV_R2H_CMD_DPB_FLUSH_RET_V6 10
66#define S5P_FIMV_R2H_CMD_NAL_ABORT_RET_V6 11
67#define S5P_FIMV_R2H_CMD_FW_STATUS_RET_V6 12
68#define S5P_FIMV_R2H_CMD_FRAME_DONE_RET_V6 13
69#define S5P_FIMV_R2H_CMD_FIELD_DONE_RET_V6 14
70#define S5P_FIMV_R2H_CMD_SLICE_DONE_RET_V6 15
71#define S5P_FIMV_R2H_CMD_ENC_BUFFER_FUL_RET_V6 16
72#define S5P_FIMV_R2H_CMD_ERR_RET_V6 32
73
74#define S5P_FIMV_FW_VERSION_V6 0xf000
75
76#define S5P_FIMV_INSTANCE_ID_V6 0xf008
77#define S5P_FIMV_CODEC_TYPE_V6 0xf00c
78#define S5P_FIMV_CONTEXT_MEM_ADDR_V6 0xf014
79#define S5P_FIMV_CONTEXT_MEM_SIZE_V6 0xf018
80#define S5P_FIMV_PIXEL_FORMAT_V6 0xf020
81
82#define S5P_FIMV_METADATA_ENABLE_V6 0xf024
83#define S5P_FIMV_DBG_BUFFER_ADDR_V6 0xf030
84#define S5P_FIMV_DBG_BUFFER_SIZE_V6 0xf034
85#define S5P_FIMV_RET_INSTANCE_ID_V6 0xf070
86
87#define S5P_FIMV_ERROR_CODE_V6 0xf074
88#define S5P_FIMV_ERR_WARNINGS_START_V6 160
89#define S5P_FIMV_ERR_DEC_MASK_V6 0xffff
90#define S5P_FIMV_ERR_DEC_SHIFT_V6 0
91#define S5P_FIMV_ERR_DSPL_MASK_V6 0xffff0000
92#define S5P_FIMV_ERR_DSPL_SHIFT_V6 16
93
94#define S5P_FIMV_DBG_BUFFER_OUTPUT_SIZE_V6 0xf078
95#define S5P_FIMV_METADATA_STATUS_V6 0xf07C
96#define S5P_FIMV_METADATA_ADDR_MB_INFO_V6 0xf080
97#define S5P_FIMV_METADATA_SIZE_MB_INFO_V6 0xf084
98
99/* Decoder Registers */
100#define S5P_FIMV_D_CRC_CTRL_V6 0xf0b0
101#define S5P_FIMV_D_DEC_OPTIONS_V6 0xf0b4
102#define S5P_FIMV_D_OPT_FMO_ASO_CTRL_MASK_V6 4
103#define S5P_FIMV_D_OPT_DDELAY_EN_SHIFT_V6 3
104#define S5P_FIMV_D_OPT_LF_CTRL_SHIFT_V6 1
105#define S5P_FIMV_D_OPT_LF_CTRL_MASK_V6 0x3
106#define S5P_FIMV_D_OPT_TILE_MODE_SHIFT_V6 0
107
108#define S5P_FIMV_D_DISPLAY_DELAY_V6 0xf0b8
109
110#define S5P_FIMV_D_SET_FRAME_WIDTH_V6 0xf0bc
111#define S5P_FIMV_D_SET_FRAME_HEIGHT_V6 0xf0c0
112
113#define S5P_FIMV_D_SEI_ENABLE_V6 0xf0c4
114
115/* Buffer setting registers */
116#define S5P_FIMV_D_MIN_NUM_DPB_V6 0xf0f0
117#define S5P_FIMV_D_MIN_LUMA_DPB_SIZE_V6 0xf0f4
118#define S5P_FIMV_D_MIN_CHROMA_DPB_SIZE_V6 0xf0f8
119#define S5P_FIMV_D_MVC_NUM_VIEWS_V6 0xf0fc
120#define S5P_FIMV_D_MIN_NUM_MV_V6 0xf100
121#define S5P_FIMV_D_NUM_DPB_V6 0xf130
122#define S5P_FIMV_D_LUMA_DPB_SIZE_V6 0xf134
123#define S5P_FIMV_D_CHROMA_DPB_SIZE_V6 0xf138
124#define S5P_FIMV_D_MV_BUFFER_SIZE_V6 0xf13c
125
126#define S5P_FIMV_D_LUMA_DPB_V6 0xf140
127#define S5P_FIMV_D_CHROMA_DPB_V6 0xf240
128#define S5P_FIMV_D_MV_BUFFER_V6 0xf340
129
130#define S5P_FIMV_D_SCRATCH_BUFFER_ADDR_V6 0xf440
131#define S5P_FIMV_D_SCRATCH_BUFFER_SIZE_V6 0xf444
132#define S5P_FIMV_D_METADATA_BUFFER_ADDR_V6 0xf448
133#define S5P_FIMV_D_METADATA_BUFFER_SIZE_V6 0xf44c
134#define S5P_FIMV_D_NUM_MV_V6 0xf478
135#define S5P_FIMV_D_CPB_BUFFER_ADDR_V6 0xf4b0
136#define S5P_FIMV_D_CPB_BUFFER_SIZE_V6 0xf4b4
137
138#define S5P_FIMV_D_AVAILABLE_DPB_FLAG_UPPER_V6 0xf4b8
139#define S5P_FIMV_D_AVAILABLE_DPB_FLAG_LOWER_V6 0xf4bc
140#define S5P_FIMV_D_CPB_BUFFER_OFFSET_V6 0xf4c0
141#define S5P_FIMV_D_SLICE_IF_ENABLE_V6 0xf4c4
142#define S5P_FIMV_D_PICTURE_TAG_V6 0xf4c8
143#define S5P_FIMV_D_STREAM_DATA_SIZE_V6 0xf4d0
144
145/* Display information register */
146#define S5P_FIMV_D_DISPLAY_FRAME_WIDTH_V6 0xf500
147#define S5P_FIMV_D_DISPLAY_FRAME_HEIGHT_V6 0xf504
148
149/* Display status */
150#define S5P_FIMV_D_DISPLAY_STATUS_V6 0xf508
151
152#define S5P_FIMV_D_DISPLAY_LUMA_ADDR_V6 0xf50c
153#define S5P_FIMV_D_DISPLAY_CHROMA_ADDR_V6 0xf510
154
155#define S5P_FIMV_D_DISPLAY_FRAME_TYPE_V6 0xf514
156
157#define S5P_FIMV_D_DISPLAY_CROP_INFO1_V6 0xf518
158#define S5P_FIMV_D_DISPLAY_CROP_INFO2_V6 0xf51c
159#define S5P_FIMV_D_DISPLAY_PICTURE_PROFILE_V6 0xf520
160#define S5P_FIMV_D_DISPLAY_LUMA_CRC_TOP_V6 0xf524
161#define S5P_FIMV_D_DISPLAY_CHROMA_CRC_TOP_V6 0xf528
162#define S5P_FIMV_D_DISPLAY_LUMA_CRC_BOT_V6 0xf52c
163#define S5P_FIMV_D_DISPLAY_CHROMA_CRC_BOT_V6 0xf530
164#define S5P_FIMV_D_DISPLAY_ASPECT_RATIO_V6 0xf534
165#define S5P_FIMV_D_DISPLAY_EXTENDED_AR_V6 0xf538
166
167/* Decoded picture information register */
168#define S5P_FIMV_D_DECODED_FRAME_WIDTH_V6 0xf53c
169#define S5P_FIMV_D_DECODED_FRAME_HEIGHT_V6 0xf540
170#define S5P_FIMV_D_DECODED_STATUS_V6 0xf544
171#define S5P_FIMV_DEC_CRC_GEN_MASK_V6 0x1
172#define S5P_FIMV_DEC_CRC_GEN_SHIFT_V6 6
173
174#define S5P_FIMV_D_DECODED_LUMA_ADDR_V6 0xf548
175#define S5P_FIMV_D_DECODED_CHROMA_ADDR_V6 0xf54c
176
177#define S5P_FIMV_D_DECODED_FRAME_TYPE_V6 0xf550
178#define S5P_FIMV_DECODE_FRAME_MASK_V6 7
179
180#define S5P_FIMV_D_DECODED_CROP_INFO1_V6 0xf554
181#define S5P_FIMV_D_DECODED_CROP_INFO2_V6 0xf558
182#define S5P_FIMV_D_DECODED_PICTURE_PROFILE_V6 0xf55c
183#define S5P_FIMV_D_DECODED_NAL_SIZE_V6 0xf560
184#define S5P_FIMV_D_DECODED_LUMA_CRC_TOP_V6 0xf564
185#define S5P_FIMV_D_DECODED_CHROMA_CRC_TOP_V6 0xf568
186#define S5P_FIMV_D_DECODED_LUMA_CRC_BOT_V6 0xf56c
187#define S5P_FIMV_D_DECODED_CHROMA_CRC_BOT_V6 0xf570
188
189/* Returned value register for specific setting */
190#define S5P_FIMV_D_RET_PICTURE_TAG_TOP_V6 0xf574
191#define S5P_FIMV_D_RET_PICTURE_TAG_BOT_V6 0xf578
192#define S5P_FIMV_D_RET_PICTURE_TIME_TOP_V6 0xf57c
193#define S5P_FIMV_D_RET_PICTURE_TIME_BOT_V6 0xf580
194#define S5P_FIMV_D_CHROMA_FORMAT_V6 0xf588
195#define S5P_FIMV_D_MPEG4_INFO_V6 0xf58c
196#define S5P_FIMV_D_H264_INFO_V6 0xf590
197
198#define S5P_FIMV_D_METADATA_ADDR_CONCEALED_MB_V6 0xf594
199#define S5P_FIMV_D_METADATA_SIZE_CONCEALED_MB_V6 0xf598
200#define S5P_FIMV_D_METADATA_ADDR_VC1_PARAM_V6 0xf59c
201#define S5P_FIMV_D_METADATA_SIZE_VC1_PARAM_V6 0xf5a0
202#define S5P_FIMV_D_METADATA_ADDR_SEI_NAL_V6 0xf5a4
203#define S5P_FIMV_D_METADATA_SIZE_SEI_NAL_V6 0xf5a8
204#define S5P_FIMV_D_METADATA_ADDR_VUI_V6 0xf5ac
205#define S5P_FIMV_D_METADATA_SIZE_VUI_V6 0xf5b0
206
207#define S5P_FIMV_D_MVC_VIEW_ID_V6 0xf5b4
208
209/* SEI related information */
210#define S5P_FIMV_D_FRAME_PACK_SEI_AVAIL_V6 0xf5f0
211#define S5P_FIMV_D_FRAME_PACK_ARRGMENT_ID_V6 0xf5f4
212#define S5P_FIMV_D_FRAME_PACK_SEI_INFO_V6 0xf5f8
213#define S5P_FIMV_D_FRAME_PACK_GRID_POS_V6 0xf5fc
214
215/* Encoder Registers */
216#define S5P_FIMV_E_FRAME_WIDTH_V6 0xf770
217#define S5P_FIMV_E_FRAME_HEIGHT_V6 0xf774
218#define S5P_FIMV_E_CROPPED_FRAME_WIDTH_V6 0xf778
219#define S5P_FIMV_E_CROPPED_FRAME_HEIGHT_V6 0xf77c
220#define S5P_FIMV_E_FRAME_CROP_OFFSET_V6 0xf780
221#define S5P_FIMV_E_ENC_OPTIONS_V6 0xf784
222#define S5P_FIMV_E_PICTURE_PROFILE_V6 0xf788
223#define S5P_FIMV_E_FIXED_PICTURE_QP_V6 0xf790
224
225#define S5P_FIMV_E_RC_CONFIG_V6 0xf794
226#define S5P_FIMV_E_RC_QP_BOUND_V6 0xf798
227#define S5P_FIMV_E_RC_RPARAM_V6 0xf79c
228#define S5P_FIMV_E_MB_RC_CONFIG_V6 0xf7a0
229#define S5P_FIMV_E_PADDING_CTRL_V6 0xf7a4
230#define S5P_FIMV_E_MV_HOR_RANGE_V6 0xf7ac
231#define S5P_FIMV_E_MV_VER_RANGE_V6 0xf7b0
232
233#define S5P_FIMV_E_VBV_BUFFER_SIZE_V6 0xf84c
234#define S5P_FIMV_E_VBV_INIT_DELAY_V6 0xf850
235#define S5P_FIMV_E_NUM_DPB_V6 0xf890
236#define S5P_FIMV_E_LUMA_DPB_V6 0xf8c0
237#define S5P_FIMV_E_CHROMA_DPB_V6 0xf904
238#define S5P_FIMV_E_ME_BUFFER_V6 0xf948
239
240#define S5P_FIMV_E_SCRATCH_BUFFER_ADDR_V6 0xf98c
241#define S5P_FIMV_E_SCRATCH_BUFFER_SIZE_V6 0xf990
242#define S5P_FIMV_E_TMV_BUFFER0_V6 0xf994
243#define S5P_FIMV_E_TMV_BUFFER1_V6 0xf998
244#define S5P_FIMV_E_SOURCE_LUMA_ADDR_V6 0xf9f0
245#define S5P_FIMV_E_SOURCE_CHROMA_ADDR_V6 0xf9f4
246#define S5P_FIMV_E_STREAM_BUFFER_ADDR_V6 0xf9f8
247#define S5P_FIMV_E_STREAM_BUFFER_SIZE_V6 0xf9fc
248#define S5P_FIMV_E_ROI_BUFFER_ADDR_V6 0xfA00
249
250#define S5P_FIMV_E_PARAM_CHANGE_V6 0xfa04
251#define S5P_FIMV_E_IR_SIZE_V6 0xfa08
252#define S5P_FIMV_E_GOP_CONFIG_V6 0xfa0c
253#define S5P_FIMV_E_MSLICE_MODE_V6 0xfa10
254#define S5P_FIMV_E_MSLICE_SIZE_MB_V6 0xfa14
255#define S5P_FIMV_E_MSLICE_SIZE_BITS_V6 0xfa18
256#define S5P_FIMV_E_FRAME_INSERTION_V6 0xfa1c
257
258#define S5P_FIMV_E_RC_FRAME_RATE_V6 0xfa20
259#define S5P_FIMV_E_RC_BIT_RATE_V6 0xfa24
260#define S5P_FIMV_E_RC_QP_OFFSET_V6 0xfa28
261#define S5P_FIMV_E_RC_ROI_CTRL_V6 0xfa2c
262#define S5P_FIMV_E_PICTURE_TAG_V6 0xfa30
263#define S5P_FIMV_E_BIT_COUNT_ENABLE_V6 0xfa34
264#define S5P_FIMV_E_MAX_BIT_COUNT_V6 0xfa38
265#define S5P_FIMV_E_MIN_BIT_COUNT_V6 0xfa3c
266
267#define S5P_FIMV_E_METADATA_BUFFER_ADDR_V6 0xfa40
268#define S5P_FIMV_E_METADATA_BUFFER_SIZE_V6 0xfa44
269#define S5P_FIMV_E_STREAM_SIZE_V6 0xfa80
270#define S5P_FIMV_E_SLICE_TYPE_V6 0xfa84
271#define S5P_FIMV_E_PICTURE_COUNT_V6 0xfa88
272#define S5P_FIMV_E_RET_PICTURE_TAG_V6 0xfa8c
273#define S5P_FIMV_E_STREAM_BUFFER_WRITE_POINTER_V6 0xfa90
274
275#define S5P_FIMV_E_ENCODED_SOURCE_LUMA_ADDR_V6 0xfa94
276#define S5P_FIMV_E_ENCODED_SOURCE_CHROMA_ADDR_V6 0xfa98
277#define S5P_FIMV_E_RECON_LUMA_DPB_ADDR_V6 0xfa9c
278#define S5P_FIMV_E_RECON_CHROMA_DPB_ADDR_V6 0xfaa0
279#define S5P_FIMV_E_METADATA_ADDR_ENC_SLICE_V6 0xfaa4
280#define S5P_FIMV_E_METADATA_SIZE_ENC_SLICE_V6 0xfaa8
281
282#define S5P_FIMV_E_MPEG4_OPTIONS_V6 0xfb10
283#define S5P_FIMV_E_MPEG4_HEC_PERIOD_V6 0xfb14
284#define S5P_FIMV_E_ASPECT_RATIO_V6 0xfb50
285#define S5P_FIMV_E_EXTENDED_SAR_V6 0xfb54
286
287#define S5P_FIMV_E_H264_OPTIONS_V6 0xfb58
288#define S5P_FIMV_E_H264_LF_ALPHA_OFFSET_V6 0xfb5c
289#define S5P_FIMV_E_H264_LF_BETA_OFFSET_V6 0xfb60
290#define S5P_FIMV_E_H264_I_PERIOD_V6 0xfb64
291
292#define S5P_FIMV_E_H264_FMO_SLICE_GRP_MAP_TYPE_V6 0xfb68
293#define S5P_FIMV_E_H264_FMO_NUM_SLICE_GRP_MINUS1_V6 0xfb6c
294#define S5P_FIMV_E_H264_FMO_SLICE_GRP_CHANGE_DIR_V6 0xfb70
295#define S5P_FIMV_E_H264_FMO_SLICE_GRP_CHANGE_RATE_MINUS1_V6 0xfb74
296#define S5P_FIMV_E_H264_FMO_RUN_LENGTH_MINUS1_0_V6 0xfb78
297#define S5P_FIMV_E_H264_FMO_RUN_LENGTH_MINUS1_1_V6 0xfb7c
298#define S5P_FIMV_E_H264_FMO_RUN_LENGTH_MINUS1_2_V6 0xfb80
299#define S5P_FIMV_E_H264_FMO_RUN_LENGTH_MINUS1_3_V6 0xfb84
300
301#define S5P_FIMV_E_H264_ASO_SLICE_ORDER_0_V6 0xfb88
302#define S5P_FIMV_E_H264_ASO_SLICE_ORDER_1_V6 0xfb8c
303#define S5P_FIMV_E_H264_ASO_SLICE_ORDER_2_V6 0xfb90
304#define S5P_FIMV_E_H264_ASO_SLICE_ORDER_3_V6 0xfb94
305#define S5P_FIMV_E_H264_ASO_SLICE_ORDER_4_V6 0xfb98
306#define S5P_FIMV_E_H264_ASO_SLICE_ORDER_5_V6 0xfb9c
307#define S5P_FIMV_E_H264_ASO_SLICE_ORDER_6_V6 0xfba0
308#define S5P_FIMV_E_H264_ASO_SLICE_ORDER_7_V6 0xfba4
309
310#define S5P_FIMV_E_H264_CHROMA_QP_OFFSET_V6 0xfba8
311#define S5P_FIMV_E_H264_NUM_T_LAYER_V6 0xfbac
312
313#define S5P_FIMV_E_H264_HIERARCHICAL_QP_LAYER0_V6 0xfbb0
314#define S5P_FIMV_E_H264_HIERARCHICAL_QP_LAYER1_V6 0xfbb4
315#define S5P_FIMV_E_H264_HIERARCHICAL_QP_LAYER2_V6 0xfbb8
316#define S5P_FIMV_E_H264_HIERARCHICAL_QP_LAYER3_V6 0xfbbc
317#define S5P_FIMV_E_H264_HIERARCHICAL_QP_LAYER4_V6 0xfbc0
318#define S5P_FIMV_E_H264_HIERARCHICAL_QP_LAYER5_V6 0xfbc4
319#define S5P_FIMV_E_H264_HIERARCHICAL_QP_LAYER6_V6 0xfbc8
320
321#define S5P_FIMV_E_H264_FRAME_PACKING_SEI_INFO_V6 0xfc4c
322#define S5P_FIMV_ENC_FP_ARRANGEMENT_TYPE_SIDE_BY_SIDE_V6 0
323#define S5P_FIMV_ENC_FP_ARRANGEMENT_TYPE_TOP_BOTTOM_V6 1
324#define S5P_FIMV_ENC_FP_ARRANGEMENT_TYPE_TEMPORAL_V6 2
325
326#define S5P_FIMV_E_MVC_FRAME_QP_VIEW1_V6 0xfd40
327#define S5P_FIMV_E_MVC_RC_FRAME_RATE_VIEW1_V6 0xfd44
328#define S5P_FIMV_E_MVC_RC_BIT_RATE_VIEW1_V6 0xfd48
329#define S5P_FIMV_E_MVC_RC_QBOUND_VIEW1_V6 0xfd4c
330#define S5P_FIMV_E_MVC_RC_RPARA_VIEW1_V6 0xfd50
331#define S5P_FIMV_E_MVC_INTER_VIEW_PREDICTION_ON_V6 0xfd80
332
333/* Codec numbers */
334#define S5P_FIMV_CODEC_NONE_V6 -1
335
336
337#define S5P_FIMV_CODEC_H264_DEC_V6 0
338#define S5P_FIMV_CODEC_H264_MVC_DEC_V6 1
339
340#define S5P_FIMV_CODEC_MPEG4_DEC_V6 3
341#define S5P_FIMV_CODEC_FIMV1_DEC_V6 4
342#define S5P_FIMV_CODEC_FIMV2_DEC_V6 5
343#define S5P_FIMV_CODEC_FIMV3_DEC_V6 6
344#define S5P_FIMV_CODEC_FIMV4_DEC_V6 7
345#define S5P_FIMV_CODEC_H263_DEC_V6 8
346#define S5P_FIMV_CODEC_VC1RCV_DEC_V6 9
347#define S5P_FIMV_CODEC_VC1_DEC_V6 10
348/* FIXME: Add 11~12 */
349#define S5P_FIMV_CODEC_MPEG2_DEC_V6 13
350#define S5P_FIMV_CODEC_VP8_DEC_V6 14
351/* FIXME: Add 15~16 */
352#define S5P_FIMV_CODEC_H264_ENC_V6 20
353#define S5P_FIMV_CODEC_H264_MVC_ENC_V6 21
354
355#define S5P_FIMV_CODEC_MPEG4_ENC_V6 23
356#define S5P_FIMV_CODEC_H263_ENC_V6 24
357
358#define S5P_FIMV_NV12M_HALIGN_V6 16
359#define S5P_FIMV_NV12MT_HALIGN_V6 16
360#define S5P_FIMV_NV12MT_VALIGN_V6 16
361
362#define S5P_FIMV_TMV_BUFFER_ALIGN_V6 16
363#define S5P_FIMV_LUMA_DPB_BUFFER_ALIGN_V6 256
364#define S5P_FIMV_CHROMA_DPB_BUFFER_ALIGN_V6 256
365#define S5P_FIMV_ME_BUFFER_ALIGN_V6 256
366#define S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6 256
367
368#define S5P_FIMV_LUMA_MB_TO_PIXEL_V6 256
369#define S5P_FIMV_CHROMA_MB_TO_PIXEL_V6 128
370#define S5P_FIMV_NUM_TMV_BUFFERS_V6 2
371
372#define S5P_FIMV_MAX_FRAME_SIZE_V6 (2 * SZ_1M)
373#define S5P_FIMV_NUM_PIXELS_IN_MB_ROW_V6 16
374#define S5P_FIMV_NUM_PIXELS_IN_MB_COL_V6 16
375
376/* Buffer size requirements defined by hardware */
377#define S5P_FIMV_TMV_BUFFER_SIZE_V6(w, h) (((w) + 1) * ((h) + 1) * 8)
378#define S5P_FIMV_ME_BUFFER_SIZE_V6(imw, imh, mbw, mbh) \
379 ((DIV_ROUND_UP(imw, 64) * DIV_ROUND_UP(imh, 64) * 256) + \
380 (DIV_ROUND_UP((mbw) * (mbh), 32) * 16))
381#define S5P_FIMV_SCRATCH_BUF_SIZE_H264_DEC_V6(w, h) (((w) * 192) + 64)
382#define S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_DEC_V6(w, h) \
383 ((w) * ((h) * 64 + 144) + (2048/16 * (h) * 64) + \
384 (2048/16 * 256 + 8320))
385#define S5P_FIMV_SCRATCH_BUF_SIZE_VC1_DEC_V6(w, h) \
386 (2096 * ((w) + (h) + 1))
387#define S5P_FIMV_SCRATCH_BUF_SIZE_H263_DEC_V6(w, h) ((w) * 400)
388#define S5P_FIMV_SCRATCH_BUF_SIZE_VP8_DEC_V6(w, h) \
389 ((w) * 32 + (h) * 128 + (((w) + 1) / 2) * 64 + 2112)
390#define S5P_FIMV_SCRATCH_BUF_SIZE_H264_ENC_V6(w, h) \
391 (((w) * 64) + (((w) + 1) * 16) + (4096 * 16))
392#define S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_ENC_V6(w, h) \
393 (((w) * 16) + (((w) + 1) * 16))
394
395/* MFC Context buffer sizes */
396#define MFC_CTX_BUF_SIZE_V6 (28 * SZ_1K) /* 28KB */
397#define MFC_H264_DEC_CTX_BUF_SIZE_V6 (2 * SZ_1M) /* 2MB */
398#define MFC_OTHER_DEC_CTX_BUF_SIZE_V6 (20 * SZ_1K) /* 20KB */
399#define MFC_H264_ENC_CTX_BUF_SIZE_V6 (100 * SZ_1K) /* 100KB */
400#define MFC_OTHER_ENC_CTX_BUF_SIZE_V6 (12 * SZ_1K) /* 12KB */
401
402/* MFCv6 variant defines */
403#define MAX_FW_SIZE_V6 (SZ_1M) /* 1MB */
404#define MAX_CPB_SIZE_V6 (3 * SZ_1M) /* 3MB */
405#define MFC_VERSION_V6 0x61
406#define MFC_NUM_PORTS_V6 1
407
408#endif /* _REGS_FIMV_V6_H */
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc.h b/drivers/media/platform/s5p-mfc/regs-mfc.h
index a19bece41ba9..9319e93599ae 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc.h
+++ b/drivers/media/platform/s5p-mfc/regs-mfc.h
@@ -12,6 +12,9 @@
12#ifndef _REGS_FIMV_H 12#ifndef _REGS_FIMV_H
13#define _REGS_FIMV_H 13#define _REGS_FIMV_H
14 14
15#include <linux/kernel.h>
16#include <linux/sizes.h>
17
15#define S5P_FIMV_REG_SIZE (S5P_FIMV_END_ADDR - S5P_FIMV_START_ADDR) 18#define S5P_FIMV_REG_SIZE (S5P_FIMV_END_ADDR - S5P_FIMV_START_ADDR)
16#define S5P_FIMV_REG_COUNT ((S5P_FIMV_END_ADDR - S5P_FIMV_START_ADDR) / 4) 19#define S5P_FIMV_REG_COUNT ((S5P_FIMV_END_ADDR - S5P_FIMV_START_ADDR) / 4)
17 20
@@ -144,6 +147,7 @@
144#define S5P_FIMV_ENC_PROFILE_H264_MAIN 0 147#define S5P_FIMV_ENC_PROFILE_H264_MAIN 0
145#define S5P_FIMV_ENC_PROFILE_H264_HIGH 1 148#define S5P_FIMV_ENC_PROFILE_H264_HIGH 1
146#define S5P_FIMV_ENC_PROFILE_H264_BASELINE 2 149#define S5P_FIMV_ENC_PROFILE_H264_BASELINE 2
150#define S5P_FIMV_ENC_PROFILE_H264_CONSTRAINED_BASELINE 3
147#define S5P_FIMV_ENC_PROFILE_MPEG4_SIMPLE 0 151#define S5P_FIMV_ENC_PROFILE_MPEG4_SIMPLE 0
148#define S5P_FIMV_ENC_PROFILE_MPEG4_ADVANCED_SIMPLE 1 152#define S5P_FIMV_ENC_PROFILE_MPEG4_ADVANCED_SIMPLE 1
149#define S5P_FIMV_ENC_PIC_STRUCT 0x083c /* picture field/frame flag */ 153#define S5P_FIMV_ENC_PIC_STRUCT 0x083c /* picture field/frame flag */
@@ -213,6 +217,7 @@
213#define S5P_FIMV_DEC_STATUS_RESOLUTION_MASK (3<<4) 217#define S5P_FIMV_DEC_STATUS_RESOLUTION_MASK (3<<4)
214#define S5P_FIMV_DEC_STATUS_RESOLUTION_INC (1<<4) 218#define S5P_FIMV_DEC_STATUS_RESOLUTION_INC (1<<4)
215#define S5P_FIMV_DEC_STATUS_RESOLUTION_DEC (2<<4) 219#define S5P_FIMV_DEC_STATUS_RESOLUTION_DEC (2<<4)
220#define S5P_FIMV_DEC_STATUS_RESOLUTION_SHIFT 4
216 221
217/* Decode frame address */ 222/* Decode frame address */
218#define S5P_FIMV_DECODE_Y_ADR 0x2024 223#define S5P_FIMV_DECODE_Y_ADR 0x2024
@@ -377,6 +382,16 @@
377#define S5P_FIMV_R2H_CMD_EDFU_INIT_RET 16 382#define S5P_FIMV_R2H_CMD_EDFU_INIT_RET 16
378#define S5P_FIMV_R2H_CMD_ERR_RET 32 383#define S5P_FIMV_R2H_CMD_ERR_RET 32
379 384
385/* Dummy definition for MFCv6 compatibilty */
386#define S5P_FIMV_CODEC_H264_MVC_DEC -1
387#define S5P_FIMV_R2H_CMD_FIELD_DONE_RET -1
388#define S5P_FIMV_MFC_RESET -1
389#define S5P_FIMV_RISC_ON -1
390#define S5P_FIMV_RISC_BASE_ADDRESS -1
391#define S5P_FIMV_CODEC_VP8_DEC -1
392#define S5P_FIMV_REG_CLEAR_BEGIN 0
393#define S5P_FIMV_REG_CLEAR_COUNT 0
394
380/* Error handling defines */ 395/* Error handling defines */
381#define S5P_FIMV_ERR_WARNINGS_START 145 396#define S5P_FIMV_ERR_WARNINGS_START 145
382#define S5P_FIMV_ERR_DEC_MASK 0xFFFF 397#define S5P_FIMV_ERR_DEC_MASK 0xFFFF
@@ -414,5 +429,31 @@
414#define S5P_FIMV_SHARED_EXTENDED_SAR 0x0078 429#define S5P_FIMV_SHARED_EXTENDED_SAR 0x0078
415#define S5P_FIMV_SHARED_H264_I_PERIOD 0x009C 430#define S5P_FIMV_SHARED_H264_I_PERIOD 0x009C
416#define S5P_FIMV_SHARED_RC_CONTROL_CONFIG 0x00A0 431#define S5P_FIMV_SHARED_RC_CONTROL_CONFIG 0x00A0
432#define S5P_FIMV_SHARED_DISP_FRAME_TYPE_SHIFT 2
433
434/* Offset used by the hardware to store addresses */
435#define MFC_OFFSET_SHIFT 11
436
437#define FIRMWARE_ALIGN (128 * SZ_1K) /* 128KB */
438#define MFC_H264_CTX_BUF_SIZE (600 * SZ_1K) /* 600KB per H264 instance */
439#define MFC_CTX_BUF_SIZE (10 * SZ_1K) /* 10KB per instance */
440#define DESC_BUF_SIZE (128 * SZ_1K) /* 128KB for DESC buffer */
441#define SHARED_BUF_SIZE (8 * SZ_1K) /* 8KB for shared buffer */
442
443#define DEF_CPB_SIZE (256 * SZ_1K) /* 256KB */
444#define MAX_CPB_SIZE (4 * SZ_1M) /* 4MB */
445#define MAX_FW_SIZE (384 * SZ_1K)
446
447#define MFC_VERSION 0x51
448#define MFC_NUM_PORTS 2
449
450#define S5P_FIMV_SHARED_FRAME_PACK_SEI_AVAIL 0x16C
451#define S5P_FIMV_SHARED_FRAME_PACK_ARRGMENT_ID 0x170
452#define S5P_FIMV_SHARED_FRAME_PACK_SEI_INFO 0x174
453#define S5P_FIMV_SHARED_FRAME_PACK_GRID_POS 0x178
454
455/* Values for resolution change in display status */
456#define S5P_FIMV_RES_INCREASE 1
457#define S5P_FIMV_RES_DECREASE 2
417 458
418#endif /* _REGS_FIMV_H */ 459#endif /* _REGS_FIMV_H */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 5587ef15ca4f..130f4ac8649e 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -22,15 +22,15 @@
22#include <media/v4l2-event.h> 22#include <media/v4l2-event.h>
23#include <linux/workqueue.h> 23#include <linux/workqueue.h>
24#include <media/videobuf2-core.h> 24#include <media/videobuf2-core.h>
25#include "regs-mfc.h" 25#include "s5p_mfc_common.h"
26#include "s5p_mfc_ctrl.h" 26#include "s5p_mfc_ctrl.h"
27#include "s5p_mfc_debug.h" 27#include "s5p_mfc_debug.h"
28#include "s5p_mfc_dec.h" 28#include "s5p_mfc_dec.h"
29#include "s5p_mfc_enc.h" 29#include "s5p_mfc_enc.h"
30#include "s5p_mfc_intr.h" 30#include "s5p_mfc_intr.h"
31#include "s5p_mfc_opr.h" 31#include "s5p_mfc_opr.h"
32#include "s5p_mfc_cmd.h"
32#include "s5p_mfc_pm.h" 33#include "s5p_mfc_pm.h"
33#include "s5p_mfc_shm.h"
34 34
35#define S5P_MFC_NAME "s5p-mfc" 35#define S5P_MFC_NAME "s5p-mfc"
36#define S5P_MFC_DEC_NAME "s5p-mfc-dec" 36#define S5P_MFC_DEC_NAME "s5p-mfc-dec"
@@ -149,10 +149,12 @@ static void s5p_mfc_watchdog_worker(struct work_struct *work)
149 if (!ctx) 149 if (!ctx)
150 continue; 150 continue;
151 ctx->state = MFCINST_ERROR; 151 ctx->state = MFCINST_ERROR;
152 s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst); 152 s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->dst_queue,
153 s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src); 153 &ctx->vq_dst);
154 s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->src_queue,
155 &ctx->vq_src);
154 clear_work_bit(ctx); 156 clear_work_bit(ctx);
155 wake_up_ctx(ctx, S5P_FIMV_R2H_CMD_ERR_RET, 0); 157 wake_up_ctx(ctx, S5P_MFC_R2H_CMD_ERR_RET, 0);
156 } 158 }
157 clear_bit(0, &dev->hw_lock); 159 clear_bit(0, &dev->hw_lock);
158 spin_unlock_irqrestore(&dev->irqlock, flags); 160 spin_unlock_irqrestore(&dev->irqlock, flags);
@@ -199,6 +201,7 @@ static void s5p_mfc_clear_int_flags(struct s5p_mfc_dev *dev)
199static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx) 201static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
200{ 202{
201 struct s5p_mfc_buf *dst_buf; 203 struct s5p_mfc_buf *dst_buf;
204 struct s5p_mfc_dev *dev = ctx->dev;
202 205
203 ctx->state = MFCINST_FINISHED; 206 ctx->state = MFCINST_FINISHED;
204 ctx->sequence++; 207 ctx->sequence++;
@@ -213,8 +216,8 @@ static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
213 ctx->dst_queue_cnt--; 216 ctx->dst_queue_cnt--;
214 dst_buf->b->v4l2_buf.sequence = (ctx->sequence++); 217 dst_buf->b->v4l2_buf.sequence = (ctx->sequence++);
215 218
216 if (s5p_mfc_read_shm(ctx, PIC_TIME_TOP) == 219 if (s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_top, ctx) ==
217 s5p_mfc_read_shm(ctx, PIC_TIME_BOT)) 220 s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_bot, ctx))
218 dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE; 221 dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
219 else 222 else
220 dst_buf->b->v4l2_buf.field = V4L2_FIELD_INTERLACED; 223 dst_buf->b->v4l2_buf.field = V4L2_FIELD_INTERLACED;
@@ -228,8 +231,11 @@ static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
228{ 231{
229 struct s5p_mfc_dev *dev = ctx->dev; 232 struct s5p_mfc_dev *dev = ctx->dev;
230 struct s5p_mfc_buf *dst_buf, *src_buf; 233 struct s5p_mfc_buf *dst_buf, *src_buf;
231 size_t dec_y_addr = s5p_mfc_get_dec_y_adr(); 234 size_t dec_y_addr;
232 unsigned int frame_type = s5p_mfc_get_frame_type(); 235 unsigned int frame_type;
236
237 dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
238 frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
233 239
234 /* Copy timestamp / timecode from decoded src to dst and set 240 /* Copy timestamp / timecode from decoded src to dst and set
235 appropraite flags */ 241 appropraite flags */
@@ -265,10 +271,13 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
265{ 271{
266 struct s5p_mfc_dev *dev = ctx->dev; 272 struct s5p_mfc_dev *dev = ctx->dev;
267 struct s5p_mfc_buf *dst_buf; 273 struct s5p_mfc_buf *dst_buf;
268 size_t dspl_y_addr = s5p_mfc_get_dspl_y_adr(); 274 size_t dspl_y_addr;
269 unsigned int frame_type = s5p_mfc_get_frame_type(); 275 unsigned int frame_type;
270 unsigned int index; 276 unsigned int index;
271 277
278 dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
279 frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
280
272 /* If frame is same as previous then skip and do not dequeue */ 281 /* If frame is same as previous then skip and do not dequeue */
273 if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) { 282 if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) {
274 if (!ctx->after_packed_pb) 283 if (!ctx->after_packed_pb)
@@ -285,8 +294,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
285 list_del(&dst_buf->list); 294 list_del(&dst_buf->list);
286 ctx->dst_queue_cnt--; 295 ctx->dst_queue_cnt--;
287 dst_buf->b->v4l2_buf.sequence = ctx->sequence; 296 dst_buf->b->v4l2_buf.sequence = ctx->sequence;
288 if (s5p_mfc_read_shm(ctx, PIC_TIME_TOP) == 297 if (s5p_mfc_hw_call(dev->mfc_ops,
289 s5p_mfc_read_shm(ctx, PIC_TIME_BOT)) 298 get_pic_type_top, ctx) ==
299 s5p_mfc_hw_call(dev->mfc_ops,
300 get_pic_type_bot, ctx))
290 dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE; 301 dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
291 else 302 else
292 dst_buf->b->v4l2_buf.field = 303 dst_buf->b->v4l2_buf.field =
@@ -317,21 +328,23 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
317 328
318 unsigned int index; 329 unsigned int index;
319 330
320 dst_frame_status = s5p_mfc_get_dspl_status() 331 dst_frame_status = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_status, dev)
321 & S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK; 332 & S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK;
322 res_change = s5p_mfc_get_dspl_status() 333 res_change = (s5p_mfc_hw_call(dev->mfc_ops, get_dspl_status, dev)
323 & S5P_FIMV_DEC_STATUS_RESOLUTION_MASK; 334 & S5P_FIMV_DEC_STATUS_RESOLUTION_MASK)
335 >> S5P_FIMV_DEC_STATUS_RESOLUTION_SHIFT;
324 mfc_debug(2, "Frame Status: %x\n", dst_frame_status); 336 mfc_debug(2, "Frame Status: %x\n", dst_frame_status);
325 if (ctx->state == MFCINST_RES_CHANGE_INIT) 337 if (ctx->state == MFCINST_RES_CHANGE_INIT)
326 ctx->state = MFCINST_RES_CHANGE_FLUSH; 338 ctx->state = MFCINST_RES_CHANGE_FLUSH;
327 if (res_change) { 339 if (res_change == S5P_FIMV_RES_INCREASE ||
340 res_change == S5P_FIMV_RES_DECREASE) {
328 ctx->state = MFCINST_RES_CHANGE_INIT; 341 ctx->state = MFCINST_RES_CHANGE_INIT;
329 s5p_mfc_clear_int_flags(dev); 342 s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
330 wake_up_ctx(ctx, reason, err); 343 wake_up_ctx(ctx, reason, err);
331 if (test_and_clear_bit(0, &dev->hw_lock) == 0) 344 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
332 BUG(); 345 BUG();
333 s5p_mfc_clock_off(); 346 s5p_mfc_clock_off();
334 s5p_mfc_try_run(dev); 347 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
335 return; 348 return;
336 } 349 }
337 if (ctx->dpb_flush_flag) 350 if (ctx->dpb_flush_flag)
@@ -365,9 +378,12 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
365 && !list_empty(&ctx->src_queue)) { 378 && !list_empty(&ctx->src_queue)) {
366 src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, 379 src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
367 list); 380 list);
368 ctx->consumed_stream += s5p_mfc_get_consumed_stream(); 381 ctx->consumed_stream += s5p_mfc_hw_call(dev->mfc_ops,
369 if (ctx->codec_mode != S5P_FIMV_CODEC_H264_DEC && 382 get_consumed_stream, dev);
370 s5p_mfc_get_frame_type() == S5P_FIMV_DECODE_FRAME_P_FRAME 383 if (ctx->codec_mode != S5P_MFC_CODEC_H264_DEC &&
384 s5p_mfc_hw_call(dev->mfc_ops,
385 get_dec_frame_type, dev) ==
386 S5P_FIMV_DECODE_FRAME_P_FRAME
371 && ctx->consumed_stream + STUFF_BYTE < 387 && ctx->consumed_stream + STUFF_BYTE <
372 src_buf->b->v4l2_planes[0].bytesused) { 388 src_buf->b->v4l2_planes[0].bytesused) {
373 /* Run MFC again on the same buffer */ 389 /* Run MFC again on the same buffer */
@@ -379,7 +395,7 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
379 ctx->consumed_stream = 0; 395 ctx->consumed_stream = 0;
380 list_del(&src_buf->list); 396 list_del(&src_buf->list);
381 ctx->src_queue_cnt--; 397 ctx->src_queue_cnt--;
382 if (s5p_mfc_err_dec(err) > 0) 398 if (s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) > 0)
383 vb2_buffer_done(src_buf->b, VB2_BUF_STATE_ERROR); 399 vb2_buffer_done(src_buf->b, VB2_BUF_STATE_ERROR);
384 else 400 else
385 vb2_buffer_done(src_buf->b, VB2_BUF_STATE_DONE); 401 vb2_buffer_done(src_buf->b, VB2_BUF_STATE_DONE);
@@ -390,12 +406,12 @@ leave_handle_frame:
390 if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING) 406 if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING)
391 || ctx->dst_queue_cnt < ctx->dpb_count) 407 || ctx->dst_queue_cnt < ctx->dpb_count)
392 clear_work_bit(ctx); 408 clear_work_bit(ctx);
393 s5p_mfc_clear_int_flags(dev); 409 s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
394 wake_up_ctx(ctx, reason, err); 410 wake_up_ctx(ctx, reason, err);
395 if (test_and_clear_bit(0, &dev->hw_lock) == 0) 411 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
396 BUG(); 412 BUG();
397 s5p_mfc_clock_off(); 413 s5p_mfc_clock_off();
398 s5p_mfc_try_run(dev); 414 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
399} 415}
400 416
401/* Error handling for interrupt */ 417/* Error handling for interrupt */
@@ -412,7 +428,7 @@ static void s5p_mfc_handle_error(struct s5p_mfc_ctx *ctx,
412 428
413 dev = ctx->dev; 429 dev = ctx->dev;
414 mfc_err("Interrupt Error: %08x\n", err); 430 mfc_err("Interrupt Error: %08x\n", err);
415 s5p_mfc_clear_int_flags(dev); 431 s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
416 wake_up_dev(dev, reason, err); 432 wake_up_dev(dev, reason, err);
417 433
418 /* Error recovery is dependent on the state of context */ 434 /* Error recovery is dependent on the state of context */
@@ -441,9 +457,11 @@ static void s5p_mfc_handle_error(struct s5p_mfc_ctx *ctx,
441 ctx->state = MFCINST_ERROR; 457 ctx->state = MFCINST_ERROR;
442 /* Mark all dst buffers as having an error */ 458 /* Mark all dst buffers as having an error */
443 spin_lock_irqsave(&dev->irqlock, flags); 459 spin_lock_irqsave(&dev->irqlock, flags);
444 s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst); 460 s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->dst_queue,
461 &ctx->vq_dst);
445 /* Mark all src buffers as having an error */ 462 /* Mark all src buffers as having an error */
446 s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src); 463 s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->src_queue,
464 &ctx->vq_src);
447 spin_unlock_irqrestore(&dev->irqlock, flags); 465 spin_unlock_irqrestore(&dev->irqlock, flags);
448 if (test_and_clear_bit(0, &dev->hw_lock) == 0) 466 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
449 BUG(); 467 BUG();
@@ -461,7 +479,6 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
461 unsigned int reason, unsigned int err) 479 unsigned int reason, unsigned int err)
462{ 480{
463 struct s5p_mfc_dev *dev; 481 struct s5p_mfc_dev *dev;
464 unsigned int guard_width, guard_height;
465 482
466 if (ctx == NULL) 483 if (ctx == NULL)
467 return; 484 return;
@@ -470,55 +487,44 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
470 if (ctx->c_ops->post_seq_start(ctx)) 487 if (ctx->c_ops->post_seq_start(ctx))
471 mfc_err("post_seq_start() failed\n"); 488 mfc_err("post_seq_start() failed\n");
472 } else { 489 } else {
473 ctx->img_width = s5p_mfc_get_img_width(); 490 ctx->img_width = s5p_mfc_hw_call(dev->mfc_ops, get_img_width,
474 ctx->img_height = s5p_mfc_get_img_height(); 491 dev);
475 492 ctx->img_height = s5p_mfc_hw_call(dev->mfc_ops, get_img_height,
476 ctx->buf_width = ALIGN(ctx->img_width, 493 dev);
477 S5P_FIMV_NV12MT_HALIGN); 494
478 ctx->buf_height = ALIGN(ctx->img_height, 495 s5p_mfc_hw_call(dev->mfc_ops, dec_calc_dpb_size, ctx);
479 S5P_FIMV_NV12MT_VALIGN); 496
480 mfc_debug(2, "SEQ Done: Movie dimensions %dx%d, " 497 ctx->dpb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count,
481 "buffer dimensions: %dx%d\n", ctx->img_width, 498 dev);
482 ctx->img_height, ctx->buf_width, 499 ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count,
483 ctx->buf_height); 500 dev);
484 if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC) {
485 ctx->luma_size = ALIGN(ctx->buf_width *
486 ctx->buf_height, S5P_FIMV_DEC_BUF_ALIGN);
487 ctx->chroma_size = ALIGN(ctx->buf_width *
488 ALIGN((ctx->img_height >> 1),
489 S5P_FIMV_NV12MT_VALIGN),
490 S5P_FIMV_DEC_BUF_ALIGN);
491 ctx->mv_size = ALIGN(ctx->buf_width *
492 ALIGN((ctx->buf_height >> 2),
493 S5P_FIMV_NV12MT_VALIGN),
494 S5P_FIMV_DEC_BUF_ALIGN);
495 } else {
496 guard_width = ALIGN(ctx->img_width + 24,
497 S5P_FIMV_NV12MT_HALIGN);
498 guard_height = ALIGN(ctx->img_height + 16,
499 S5P_FIMV_NV12MT_VALIGN);
500 ctx->luma_size = ALIGN(guard_width *
501 guard_height, S5P_FIMV_DEC_BUF_ALIGN);
502 guard_width = ALIGN(ctx->img_width + 16,
503 S5P_FIMV_NV12MT_HALIGN);
504 guard_height = ALIGN((ctx->img_height >> 1) + 4,
505 S5P_FIMV_NV12MT_VALIGN);
506 ctx->chroma_size = ALIGN(guard_width *
507 guard_height, S5P_FIMV_DEC_BUF_ALIGN);
508 ctx->mv_size = 0;
509 }
510 ctx->dpb_count = s5p_mfc_get_dpb_count();
511 if (ctx->img_width == 0 || ctx->img_height == 0) 501 if (ctx->img_width == 0 || ctx->img_height == 0)
512 ctx->state = MFCINST_ERROR; 502 ctx->state = MFCINST_ERROR;
513 else 503 else
514 ctx->state = MFCINST_HEAD_PARSED; 504 ctx->state = MFCINST_HEAD_PARSED;
505
506 if ((ctx->codec_mode == S5P_MFC_CODEC_H264_DEC ||
507 ctx->codec_mode == S5P_MFC_CODEC_H264_MVC_DEC) &&
508 !list_empty(&ctx->src_queue)) {
509 struct s5p_mfc_buf *src_buf;
510 src_buf = list_entry(ctx->src_queue.next,
511 struct s5p_mfc_buf, list);
512 if (s5p_mfc_hw_call(dev->mfc_ops, get_consumed_stream,
513 dev) <
514 src_buf->b->v4l2_planes[0].bytesused)
515 ctx->head_processed = 0;
516 else
517 ctx->head_processed = 1;
518 } else {
519 ctx->head_processed = 1;
520 }
515 } 521 }
516 s5p_mfc_clear_int_flags(dev); 522 s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
517 clear_work_bit(ctx); 523 clear_work_bit(ctx);
518 if (test_and_clear_bit(0, &dev->hw_lock) == 0) 524 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
519 BUG(); 525 BUG();
520 s5p_mfc_clock_off(); 526 s5p_mfc_clock_off();
521 s5p_mfc_try_run(dev); 527 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
522 wake_up_ctx(ctx, reason, err); 528 wake_up_ctx(ctx, reason, err);
523} 529}
524 530
@@ -533,14 +539,14 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
533 if (ctx == NULL) 539 if (ctx == NULL)
534 return; 540 return;
535 dev = ctx->dev; 541 dev = ctx->dev;
536 s5p_mfc_clear_int_flags(dev); 542 s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
537 ctx->int_type = reason; 543 ctx->int_type = reason;
538 ctx->int_err = err; 544 ctx->int_err = err;
539 ctx->int_cond = 1; 545 ctx->int_cond = 1;
540 clear_work_bit(ctx); 546 clear_work_bit(ctx);
541 if (err == 0) { 547 if (err == 0) {
542 ctx->state = MFCINST_RUNNING; 548 ctx->state = MFCINST_RUNNING;
543 if (!ctx->dpb_flush_flag) { 549 if (!ctx->dpb_flush_flag && ctx->head_processed) {
544 spin_lock_irqsave(&dev->irqlock, flags); 550 spin_lock_irqsave(&dev->irqlock, flags);
545 if (!list_empty(&ctx->src_queue)) { 551 if (!list_empty(&ctx->src_queue)) {
546 src_buf = list_entry(ctx->src_queue.next, 552 src_buf = list_entry(ctx->src_queue.next,
@@ -560,7 +566,7 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
560 s5p_mfc_clock_off(); 566 s5p_mfc_clock_off();
561 567
562 wake_up(&ctx->queue); 568 wake_up(&ctx->queue);
563 s5p_mfc_try_run(dev); 569 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
564 } else { 570 } else {
565 if (test_and_clear_bit(0, &dev->hw_lock) == 0) 571 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
566 BUG(); 572 BUG();
@@ -602,7 +608,7 @@ static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx,
602 608
603 s5p_mfc_clock_off(); 609 s5p_mfc_clock_off();
604 wake_up(&ctx->queue); 610 wake_up(&ctx->queue);
605 s5p_mfc_try_run(dev); 611 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
606} 612}
607 613
608/* Interrupt processing */ 614/* Interrupt processing */
@@ -618,81 +624,83 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
618 atomic_set(&dev->watchdog_cnt, 0); 624 atomic_set(&dev->watchdog_cnt, 0);
619 ctx = dev->ctx[dev->curr_ctx]; 625 ctx = dev->ctx[dev->curr_ctx];
620 /* Get the reason of interrupt and the error code */ 626 /* Get the reason of interrupt and the error code */
621 reason = s5p_mfc_get_int_reason(); 627 reason = s5p_mfc_hw_call(dev->mfc_ops, get_int_reason, dev);
622 err = s5p_mfc_get_int_err(); 628 err = s5p_mfc_hw_call(dev->mfc_ops, get_int_err, dev);
623 mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err); 629 mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err);
624 switch (reason) { 630 switch (reason) {
625 case S5P_FIMV_R2H_CMD_ERR_RET: 631 case S5P_MFC_R2H_CMD_ERR_RET:
626 /* An error has occured */ 632 /* An error has occured */
627 if (ctx->state == MFCINST_RUNNING && 633 if (ctx->state == MFCINST_RUNNING &&
628 s5p_mfc_err_dec(err) >= S5P_FIMV_ERR_WARNINGS_START) 634 s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >=
635 dev->warn_start)
629 s5p_mfc_handle_frame(ctx, reason, err); 636 s5p_mfc_handle_frame(ctx, reason, err);
630 else 637 else
631 s5p_mfc_handle_error(ctx, reason, err); 638 s5p_mfc_handle_error(ctx, reason, err);
632 clear_bit(0, &dev->enter_suspend); 639 clear_bit(0, &dev->enter_suspend);
633 break; 640 break;
634 641
635 case S5P_FIMV_R2H_CMD_SLICE_DONE_RET: 642 case S5P_MFC_R2H_CMD_SLICE_DONE_RET:
636 case S5P_FIMV_R2H_CMD_FRAME_DONE_RET: 643 case S5P_MFC_R2H_CMD_FIELD_DONE_RET:
644 case S5P_MFC_R2H_CMD_FRAME_DONE_RET:
637 if (ctx->c_ops->post_frame_start) { 645 if (ctx->c_ops->post_frame_start) {
638 if (ctx->c_ops->post_frame_start(ctx)) 646 if (ctx->c_ops->post_frame_start(ctx))
639 mfc_err("post_frame_start() failed\n"); 647 mfc_err("post_frame_start() failed\n");
640 s5p_mfc_clear_int_flags(dev); 648 s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
641 wake_up_ctx(ctx, reason, err); 649 wake_up_ctx(ctx, reason, err);
642 if (test_and_clear_bit(0, &dev->hw_lock) == 0) 650 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
643 BUG(); 651 BUG();
644 s5p_mfc_clock_off(); 652 s5p_mfc_clock_off();
645 s5p_mfc_try_run(dev); 653 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
646 } else { 654 } else {
647 s5p_mfc_handle_frame(ctx, reason, err); 655 s5p_mfc_handle_frame(ctx, reason, err);
648 } 656 }
649 break; 657 break;
650 658
651 case S5P_FIMV_R2H_CMD_SEQ_DONE_RET: 659 case S5P_MFC_R2H_CMD_SEQ_DONE_RET:
652 s5p_mfc_handle_seq_done(ctx, reason, err); 660 s5p_mfc_handle_seq_done(ctx, reason, err);
653 break; 661 break;
654 662
655 case S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET: 663 case S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET:
656 ctx->inst_no = s5p_mfc_get_inst_no(); 664 ctx->inst_no = s5p_mfc_hw_call(dev->mfc_ops, get_inst_no, dev);
657 ctx->state = MFCINST_GOT_INST; 665 ctx->state = MFCINST_GOT_INST;
658 clear_work_bit(ctx); 666 clear_work_bit(ctx);
659 wake_up(&ctx->queue); 667 wake_up(&ctx->queue);
660 goto irq_cleanup_hw; 668 goto irq_cleanup_hw;
661 669
662 case S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET: 670 case S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET:
663 clear_work_bit(ctx); 671 clear_work_bit(ctx);
664 ctx->state = MFCINST_FREE; 672 ctx->state = MFCINST_FREE;
665 wake_up(&ctx->queue); 673 wake_up(&ctx->queue);
666 goto irq_cleanup_hw; 674 goto irq_cleanup_hw;
667 675
668 case S5P_FIMV_R2H_CMD_SYS_INIT_RET: 676 case S5P_MFC_R2H_CMD_SYS_INIT_RET:
669 case S5P_FIMV_R2H_CMD_FW_STATUS_RET: 677 case S5P_MFC_R2H_CMD_FW_STATUS_RET:
670 case S5P_FIMV_R2H_CMD_SLEEP_RET: 678 case S5P_MFC_R2H_CMD_SLEEP_RET:
671 case S5P_FIMV_R2H_CMD_WAKEUP_RET: 679 case S5P_MFC_R2H_CMD_WAKEUP_RET:
672 if (ctx) 680 if (ctx)
673 clear_work_bit(ctx); 681 clear_work_bit(ctx);
674 s5p_mfc_clear_int_flags(dev); 682 s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
675 wake_up_dev(dev, reason, err); 683 wake_up_dev(dev, reason, err);
676 clear_bit(0, &dev->hw_lock); 684 clear_bit(0, &dev->hw_lock);
677 clear_bit(0, &dev->enter_suspend); 685 clear_bit(0, &dev->enter_suspend);
678 break; 686 break;
679 687
680 case S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET: 688 case S5P_MFC_R2H_CMD_INIT_BUFFERS_RET:
681 s5p_mfc_handle_init_buffers(ctx, reason, err); 689 s5p_mfc_handle_init_buffers(ctx, reason, err);
682 break; 690 break;
683 691
684 case S5P_FIMV_R2H_CMD_ENC_COMPLETE_RET: 692 case S5P_MFC_R2H_CMD_COMPLETE_SEQ_RET:
685 s5p_mfc_handle_stream_complete(ctx, reason, err); 693 s5p_mfc_handle_stream_complete(ctx, reason, err);
686 break; 694 break;
687 695
688 default: 696 default:
689 mfc_debug(2, "Unknown int reason\n"); 697 mfc_debug(2, "Unknown int reason\n");
690 s5p_mfc_clear_int_flags(dev); 698 s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
691 } 699 }
692 mfc_debug_leave(); 700 mfc_debug_leave();
693 return IRQ_HANDLED; 701 return IRQ_HANDLED;
694irq_cleanup_hw: 702irq_cleanup_hw:
695 s5p_mfc_clear_int_flags(dev); 703 s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
696 ctx->int_type = reason; 704 ctx->int_type = reason;
697 ctx->int_err = err; 705 ctx->int_err = err;
698 ctx->int_cond = 1; 706 ctx->int_cond = 1;
@@ -701,7 +709,7 @@ irq_cleanup_hw:
701 709
702 s5p_mfc_clock_off(); 710 s5p_mfc_clock_off();
703 711
704 s5p_mfc_try_run(dev); 712 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
705 mfc_debug(2, "Exit via irq_cleanup_hw\n"); 713 mfc_debug(2, "Exit via irq_cleanup_hw\n");
706 return IRQ_HANDLED; 714 return IRQ_HANDLED;
707} 715}
@@ -749,6 +757,7 @@ static int s5p_mfc_open(struct file *file)
749 if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) { 757 if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
750 ctx->type = MFCINST_DECODER; 758 ctx->type = MFCINST_DECODER;
751 ctx->c_ops = get_dec_codec_ops(); 759 ctx->c_ops = get_dec_codec_ops();
760 s5p_mfc_dec_init(ctx);
752 /* Setup ctrl handler */ 761 /* Setup ctrl handler */
753 ret = s5p_mfc_dec_ctrls_setup(ctx); 762 ret = s5p_mfc_dec_ctrls_setup(ctx);
754 if (ret) { 763 if (ret) {
@@ -761,6 +770,7 @@ static int s5p_mfc_open(struct file *file)
761 /* only for encoder */ 770 /* only for encoder */
762 INIT_LIST_HEAD(&ctx->ref_queue); 771 INIT_LIST_HEAD(&ctx->ref_queue);
763 ctx->ref_queue_cnt = 0; 772 ctx->ref_queue_cnt = 0;
773 s5p_mfc_enc_init(ctx);
764 /* Setup ctrl handler */ 774 /* Setup ctrl handler */
765 ret = s5p_mfc_enc_ctrls_setup(ctx); 775 ret = s5p_mfc_enc_ctrls_setup(ctx);
766 if (ret) { 776 if (ret) {
@@ -886,19 +896,20 @@ static int s5p_mfc_release(struct file *file)
886 ctx->state = MFCINST_RETURN_INST; 896 ctx->state = MFCINST_RETURN_INST;
887 set_work_bit_irqsave(ctx); 897 set_work_bit_irqsave(ctx);
888 s5p_mfc_clean_ctx_int_flags(ctx); 898 s5p_mfc_clean_ctx_int_flags(ctx);
889 s5p_mfc_try_run(dev); 899 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
890 /* Wait until instance is returned or timeout occured */ 900 /* Wait until instance is returned or timeout occured */
891 if (s5p_mfc_wait_for_done_ctx 901 if (s5p_mfc_wait_for_done_ctx
892 (ctx, S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET, 0)) { 902 (ctx, S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)) {
893 s5p_mfc_clock_off(); 903 s5p_mfc_clock_off();
894 mfc_err("Err returning instance\n"); 904 mfc_err("Err returning instance\n");
895 } 905 }
896 mfc_debug(2, "After free instance\n"); 906 mfc_debug(2, "After free instance\n");
897 /* Free resources */ 907 /* Free resources */
898 s5p_mfc_release_codec_buffers(ctx); 908 s5p_mfc_hw_call(dev->mfc_ops, release_codec_buffers, ctx);
899 s5p_mfc_release_instance_buffer(ctx); 909 s5p_mfc_hw_call(dev->mfc_ops, release_instance_buffer, ctx);
900 if (ctx->type == MFCINST_DECODER) 910 if (ctx->type == MFCINST_DECODER)
901 s5p_mfc_release_dec_desc_buffer(ctx); 911 s5p_mfc_hw_call(dev->mfc_ops, release_dec_desc_buffer,
912 ctx);
902 913
903 ctx->inst_no = MFC_NO_INSTANCE_SET; 914 ctx->inst_no = MFC_NO_INSTANCE_SET;
904 } 915 }
@@ -910,6 +921,7 @@ static int s5p_mfc_release(struct file *file)
910 mfc_debug(2, "Last instance - release firmware\n"); 921 mfc_debug(2, "Last instance - release firmware\n");
911 /* reset <-> F/W release */ 922 /* reset <-> F/W release */
912 s5p_mfc_reset(dev); 923 s5p_mfc_reset(dev);
924 s5p_mfc_deinit_hw(dev);
913 s5p_mfc_release_firmware(dev); 925 s5p_mfc_release_firmware(dev);
914 del_timer_sync(&dev->watchdog_timer); 926 del_timer_sync(&dev->watchdog_timer);
915 if (s5p_mfc_power_off() < 0) 927 if (s5p_mfc_power_off() < 0)
@@ -1041,6 +1053,9 @@ static int s5p_mfc_probe(struct platform_device *pdev)
1041 return -ENODEV; 1053 return -ENODEV;
1042 } 1054 }
1043 1055
1056 dev->variant = (struct s5p_mfc_variant *)
1057 platform_get_device_id(pdev)->driver_data;
1058
1044 ret = s5p_mfc_init_pm(dev); 1059 ret = s5p_mfc_init_pm(dev);
1045 if (ret < 0) { 1060 if (ret < 0) {
1046 dev_err(&pdev->dev, "failed to get mfc clock source\n"); 1061 dev_err(&pdev->dev, "failed to get mfc clock source\n");
@@ -1076,6 +1091,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
1076 ret = -ENODEV; 1091 ret = -ENODEV;
1077 goto err_res; 1092 goto err_res;
1078 } 1093 }
1094
1079 dev->mem_dev_r = device_find_child(&dev->plat_dev->dev, "s5p-mfc-r", 1095 dev->mem_dev_r = device_find_child(&dev->plat_dev->dev, "s5p-mfc-r",
1080 match_child); 1096 match_child);
1081 if (!dev->mem_dev_r) { 1097 if (!dev->mem_dev_r) {
@@ -1139,6 +1155,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
1139 vfd->release = video_device_release, 1155 vfd->release = video_device_release,
1140 vfd->lock = &dev->mfc_mutex; 1156 vfd->lock = &dev->mfc_mutex;
1141 vfd->v4l2_dev = &dev->v4l2_dev; 1157 vfd->v4l2_dev = &dev->v4l2_dev;
1158 vfd->vfl_dir = VFL_DIR_M2M;
1142 snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_ENC_NAME); 1159 snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_ENC_NAME);
1143 dev->vfd_enc = vfd; 1160 dev->vfd_enc = vfd;
1144 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); 1161 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
@@ -1160,6 +1177,10 @@ static int s5p_mfc_probe(struct platform_device *pdev)
1160 dev->watchdog_timer.data = (unsigned long)dev; 1177 dev->watchdog_timer.data = (unsigned long)dev;
1161 dev->watchdog_timer.function = s5p_mfc_watchdog; 1178 dev->watchdog_timer.function = s5p_mfc_watchdog;
1162 1179
1180 /* Initialize HW ops and commands based on MFC version */
1181 s5p_mfc_init_hw_ops(dev);
1182 s5p_mfc_init_hw_cmds(dev);
1183
1163 pr_debug("%s--\n", __func__); 1184 pr_debug("%s--\n", __func__);
1164 return 0; 1185 return 0;
1165 1186
@@ -1280,9 +1301,78 @@ static const struct dev_pm_ops s5p_mfc_pm_ops = {
1280 NULL) 1301 NULL)
1281}; 1302};
1282 1303
1304struct s5p_mfc_buf_size_v5 mfc_buf_size_v5 = {
1305 .h264_ctx = MFC_H264_CTX_BUF_SIZE,
1306 .non_h264_ctx = MFC_CTX_BUF_SIZE,
1307 .dsc = DESC_BUF_SIZE,
1308 .shm = SHARED_BUF_SIZE,
1309};
1310
1311struct s5p_mfc_buf_size buf_size_v5 = {
1312 .fw = MAX_FW_SIZE,
1313 .cpb = MAX_CPB_SIZE,
1314 .priv = &mfc_buf_size_v5,
1315};
1316
1317struct s5p_mfc_buf_align mfc_buf_align_v5 = {
1318 .base = MFC_BASE_ALIGN_ORDER,
1319};
1320
1321static struct s5p_mfc_variant mfc_drvdata_v5 = {
1322 .version = MFC_VERSION,
1323 .port_num = MFC_NUM_PORTS,
1324 .buf_size = &buf_size_v5,
1325 .buf_align = &mfc_buf_align_v5,
1326 .mclk_name = "sclk_mfc",
1327 .fw_name = "s5p-mfc.fw",
1328};
1329
1330struct s5p_mfc_buf_size_v6 mfc_buf_size_v6 = {
1331 .dev_ctx = MFC_CTX_BUF_SIZE_V6,
1332 .h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V6,
1333 .other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V6,
1334 .h264_enc_ctx = MFC_H264_ENC_CTX_BUF_SIZE_V6,
1335 .other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V6,
1336};
1337
1338struct s5p_mfc_buf_size buf_size_v6 = {
1339 .fw = MAX_FW_SIZE_V6,
1340 .cpb = MAX_CPB_SIZE_V6,
1341 .priv = &mfc_buf_size_v6,
1342};
1343
1344struct s5p_mfc_buf_align mfc_buf_align_v6 = {
1345 .base = 0,
1346};
1347
1348static struct s5p_mfc_variant mfc_drvdata_v6 = {
1349 .version = MFC_VERSION_V6,
1350 .port_num = MFC_NUM_PORTS_V6,
1351 .buf_size = &buf_size_v6,
1352 .buf_align = &mfc_buf_align_v6,
1353 .mclk_name = "aclk_333",
1354 .fw_name = "s5p-mfc-v6.fw",
1355};
1356
1357static struct platform_device_id mfc_driver_ids[] = {
1358 {
1359 .name = "s5p-mfc",
1360 .driver_data = (unsigned long)&mfc_drvdata_v5,
1361 }, {
1362 .name = "s5p-mfc-v5",
1363 .driver_data = (unsigned long)&mfc_drvdata_v5,
1364 }, {
1365 .name = "s5p-mfc-v6",
1366 .driver_data = (unsigned long)&mfc_drvdata_v6,
1367 },
1368 {},
1369};
1370MODULE_DEVICE_TABLE(platform, mfc_driver_ids);
1371
1283static struct platform_driver s5p_mfc_driver = { 1372static struct platform_driver s5p_mfc_driver = {
1284 .probe = s5p_mfc_probe, 1373 .probe = s5p_mfc_probe,
1285 .remove = __devexit_p(s5p_mfc_remove), 1374 .remove = __devexit_p(s5p_mfc_remove),
1375 .id_table = mfc_driver_ids,
1286 .driver = { 1376 .driver = {
1287 .name = S5P_MFC_NAME, 1377 .name = S5P_MFC_NAME,
1288 .owner = THIS_MODULE, 1378 .owner = THIS_MODULE,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c
index 91a415573bd2..f0a41c95df84 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c 2 * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c
3 * 3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd. 4 * Copyright (C) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/ 5 * http://www.samsung.com/
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -10,111 +10,20 @@
10 * (at your option) any later version. 10 * (at your option) any later version.
11 */ 11 */
12 12
13#include "regs-mfc.h"
14#include "s5p_mfc_cmd.h" 13#include "s5p_mfc_cmd.h"
15#include "s5p_mfc_common.h" 14#include "s5p_mfc_common.h"
16#include "s5p_mfc_debug.h" 15#include "s5p_mfc_debug.h"
16#include "s5p_mfc_cmd_v5.h"
17#include "s5p_mfc_cmd_v6.h"
17 18
18/* This function is used to send a command to the MFC */ 19static struct s5p_mfc_hw_cmds *s5p_mfc_cmds;
19static int s5p_mfc_cmd_host2risc(struct s5p_mfc_dev *dev, int cmd,
20 struct s5p_mfc_cmd_args *args)
21{
22 int cur_cmd;
23 unsigned long timeout;
24
25 timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT);
26 /* wait until host to risc command register becomes 'H2R_CMD_EMPTY' */
27 do {
28 if (time_after(jiffies, timeout)) {
29 mfc_err("Timeout while waiting for hardware\n");
30 return -EIO;
31 }
32 cur_cmd = mfc_read(dev, S5P_FIMV_HOST2RISC_CMD);
33 } while (cur_cmd != S5P_FIMV_H2R_CMD_EMPTY);
34 mfc_write(dev, args->arg[0], S5P_FIMV_HOST2RISC_ARG1);
35 mfc_write(dev, args->arg[1], S5P_FIMV_HOST2RISC_ARG2);
36 mfc_write(dev, args->arg[2], S5P_FIMV_HOST2RISC_ARG3);
37 mfc_write(dev, args->arg[3], S5P_FIMV_HOST2RISC_ARG4);
38 /* Issue the command */
39 mfc_write(dev, cmd, S5P_FIMV_HOST2RISC_CMD);
40 return 0;
41}
42
43/* Initialize the MFC */
44int s5p_mfc_sys_init_cmd(struct s5p_mfc_dev *dev)
45{
46 struct s5p_mfc_cmd_args h2r_args;
47
48 memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
49 h2r_args.arg[0] = dev->fw_size;
50 return s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_SYS_INIT, &h2r_args);
51}
52
53/* Suspend the MFC hardware */
54int s5p_mfc_sleep_cmd(struct s5p_mfc_dev *dev)
55{
56 struct s5p_mfc_cmd_args h2r_args;
57
58 memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
59 return s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_SLEEP, &h2r_args);
60}
61 20
62/* Wake up the MFC hardware */ 21void s5p_mfc_init_hw_cmds(struct s5p_mfc_dev *dev)
63int s5p_mfc_wakeup_cmd(struct s5p_mfc_dev *dev)
64{ 22{
65 struct s5p_mfc_cmd_args h2r_args; 23 if (IS_MFCV6(dev))
24 s5p_mfc_cmds = s5p_mfc_init_hw_cmds_v6();
25 else
26 s5p_mfc_cmds = s5p_mfc_init_hw_cmds_v5();
66 27
67 memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args)); 28 dev->mfc_cmds = s5p_mfc_cmds;
68 return s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_WAKEUP, &h2r_args);
69} 29}
70
71
72int s5p_mfc_open_inst_cmd(struct s5p_mfc_ctx *ctx)
73{
74 struct s5p_mfc_dev *dev = ctx->dev;
75 struct s5p_mfc_cmd_args h2r_args;
76 int ret;
77
78 /* Preparing decoding - getting instance number */
79 mfc_debug(2, "Getting instance number (codec: %d)\n", ctx->codec_mode);
80 dev->curr_ctx = ctx->num;
81 memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
82 h2r_args.arg[0] = ctx->codec_mode;
83 h2r_args.arg[1] = 0; /* no crc & no pixelcache */
84 h2r_args.arg[2] = ctx->ctx_ofs;
85 h2r_args.arg[3] = ctx->ctx_size;
86 ret = s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_OPEN_INSTANCE,
87 &h2r_args);
88 if (ret) {
89 mfc_err("Failed to create a new instance\n");
90 ctx->state = MFCINST_ERROR;
91 }
92 return ret;
93}
94
95int s5p_mfc_close_inst_cmd(struct s5p_mfc_ctx *ctx)
96{
97 struct s5p_mfc_dev *dev = ctx->dev;
98 struct s5p_mfc_cmd_args h2r_args;
99 int ret;
100
101 if (ctx->state == MFCINST_FREE) {
102 mfc_err("Instance already returned\n");
103 ctx->state = MFCINST_ERROR;
104 return -EINVAL;
105 }
106 /* Closing decoding instance */
107 mfc_debug(2, "Returning instance number %d\n", ctx->inst_no);
108 dev->curr_ctx = ctx->num;
109 memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
110 h2r_args.arg[0] = ctx->inst_no;
111 ret = s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_CLOSE_INSTANCE,
112 &h2r_args);
113 if (ret) {
114 mfc_err("Failed to return an instance\n");
115 ctx->state = MFCINST_ERROR;
116 return -EINVAL;
117 }
118 return 0;
119}
120
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.h b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.h
index 8b090d3723e7..282e6c780702 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.h 2 * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.h
3 * 3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd. 4 * Copyright (C) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/ 5 * http://www.samsung.com/
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -21,10 +21,15 @@ struct s5p_mfc_cmd_args {
21 unsigned int arg[MAX_H2R_ARG]; 21 unsigned int arg[MAX_H2R_ARG];
22}; 22};
23 23
24int s5p_mfc_sys_init_cmd(struct s5p_mfc_dev *dev); 24struct s5p_mfc_hw_cmds {
25int s5p_mfc_sleep_cmd(struct s5p_mfc_dev *dev); 25 int (*cmd_host2risc)(struct s5p_mfc_dev *dev, int cmd,
26int s5p_mfc_wakeup_cmd(struct s5p_mfc_dev *dev); 26 struct s5p_mfc_cmd_args *args);
27int s5p_mfc_open_inst_cmd(struct s5p_mfc_ctx *ctx); 27 int (*sys_init_cmd)(struct s5p_mfc_dev *dev);
28int s5p_mfc_close_inst_cmd(struct s5p_mfc_ctx *ctx); 28 int (*sleep_cmd)(struct s5p_mfc_dev *dev);
29 int (*wakeup_cmd)(struct s5p_mfc_dev *dev);
30 int (*open_inst_cmd)(struct s5p_mfc_ctx *ctx);
31 int (*close_inst_cmd)(struct s5p_mfc_ctx *ctx);
32};
29 33
34void s5p_mfc_init_hw_cmds(struct s5p_mfc_dev *dev);
30#endif /* S5P_MFC_CMD_H_ */ 35#endif /* S5P_MFC_CMD_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
new file mode 100644
index 000000000000..138778083c63
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
@@ -0,0 +1,166 @@
1/*
2 * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include "regs-mfc.h"
14#include "s5p_mfc_cmd.h"
15#include "s5p_mfc_common.h"
16#include "s5p_mfc_debug.h"
17
18/* This function is used to send a command to the MFC */
19int s5p_mfc_cmd_host2risc_v5(struct s5p_mfc_dev *dev, int cmd,
20 struct s5p_mfc_cmd_args *args)
21{
22 int cur_cmd;
23 unsigned long timeout;
24
25 timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT);
26 /* wait until host to risc command register becomes 'H2R_CMD_EMPTY' */
27 do {
28 if (time_after(jiffies, timeout)) {
29 mfc_err("Timeout while waiting for hardware\n");
30 return -EIO;
31 }
32 cur_cmd = mfc_read(dev, S5P_FIMV_HOST2RISC_CMD);
33 } while (cur_cmd != S5P_FIMV_H2R_CMD_EMPTY);
34 mfc_write(dev, args->arg[0], S5P_FIMV_HOST2RISC_ARG1);
35 mfc_write(dev, args->arg[1], S5P_FIMV_HOST2RISC_ARG2);
36 mfc_write(dev, args->arg[2], S5P_FIMV_HOST2RISC_ARG3);
37 mfc_write(dev, args->arg[3], S5P_FIMV_HOST2RISC_ARG4);
38 /* Issue the command */
39 mfc_write(dev, cmd, S5P_FIMV_HOST2RISC_CMD);
40 return 0;
41}
42
43/* Initialize the MFC */
44int s5p_mfc_sys_init_cmd_v5(struct s5p_mfc_dev *dev)
45{
46 struct s5p_mfc_cmd_args h2r_args;
47
48 memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
49 h2r_args.arg[0] = dev->fw_size;
50 return s5p_mfc_cmd_host2risc_v5(dev, S5P_FIMV_H2R_CMD_SYS_INIT,
51 &h2r_args);
52}
53
54/* Suspend the MFC hardware */
55int s5p_mfc_sleep_cmd_v5(struct s5p_mfc_dev *dev)
56{
57 struct s5p_mfc_cmd_args h2r_args;
58
59 memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
60 return s5p_mfc_cmd_host2risc_v5(dev, S5P_FIMV_H2R_CMD_SLEEP, &h2r_args);
61}
62
63/* Wake up the MFC hardware */
64int s5p_mfc_wakeup_cmd_v5(struct s5p_mfc_dev *dev)
65{
66 struct s5p_mfc_cmd_args h2r_args;
67
68 memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
69 return s5p_mfc_cmd_host2risc_v5(dev, S5P_FIMV_H2R_CMD_WAKEUP,
70 &h2r_args);
71}
72
73
74int s5p_mfc_open_inst_cmd_v5(struct s5p_mfc_ctx *ctx)
75{
76 struct s5p_mfc_dev *dev = ctx->dev;
77 struct s5p_mfc_cmd_args h2r_args;
78 int ret;
79
80 /* Preparing decoding - getting instance number */
81 mfc_debug(2, "Getting instance number (codec: %d)\n", ctx->codec_mode);
82 dev->curr_ctx = ctx->num;
83 memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
84 switch (ctx->codec_mode) {
85 case S5P_MFC_CODEC_H264_DEC:
86 h2r_args.arg[0] = S5P_FIMV_CODEC_H264_DEC;
87 break;
88 case S5P_MFC_CODEC_VC1_DEC:
89 h2r_args.arg[0] = S5P_FIMV_CODEC_VC1_DEC;
90 break;
91 case S5P_MFC_CODEC_MPEG4_DEC:
92 h2r_args.arg[0] = S5P_FIMV_CODEC_MPEG4_DEC;
93 break;
94 case S5P_MFC_CODEC_MPEG2_DEC:
95 h2r_args.arg[0] = S5P_FIMV_CODEC_MPEG2_DEC;
96 break;
97 case S5P_MFC_CODEC_H263_DEC:
98 h2r_args.arg[0] = S5P_FIMV_CODEC_H263_DEC;
99 break;
100 case S5P_MFC_CODEC_VC1RCV_DEC:
101 h2r_args.arg[0] = S5P_FIMV_CODEC_VC1RCV_DEC;
102 break;
103 case S5P_MFC_CODEC_H264_ENC:
104 h2r_args.arg[0] = S5P_FIMV_CODEC_H264_ENC;
105 break;
106 case S5P_MFC_CODEC_MPEG4_ENC:
107 h2r_args.arg[0] = S5P_FIMV_CODEC_MPEG4_ENC;
108 break;
109 case S5P_MFC_CODEC_H263_ENC:
110 h2r_args.arg[0] = S5P_FIMV_CODEC_H263_ENC;
111 break;
112 default:
113 h2r_args.arg[0] = S5P_FIMV_CODEC_NONE;
114 };
115 h2r_args.arg[1] = 0; /* no crc & no pixelcache */
116 h2r_args.arg[2] = ctx->ctx.ofs;
117 h2r_args.arg[3] = ctx->ctx.size;
118 ret = s5p_mfc_cmd_host2risc_v5(dev, S5P_FIMV_H2R_CMD_OPEN_INSTANCE,
119 &h2r_args);
120 if (ret) {
121 mfc_err("Failed to create a new instance\n");
122 ctx->state = MFCINST_ERROR;
123 }
124 return ret;
125}
126
127int s5p_mfc_close_inst_cmd_v5(struct s5p_mfc_ctx *ctx)
128{
129 struct s5p_mfc_dev *dev = ctx->dev;
130 struct s5p_mfc_cmd_args h2r_args;
131 int ret;
132
133 if (ctx->state == MFCINST_FREE) {
134 mfc_err("Instance already returned\n");
135 ctx->state = MFCINST_ERROR;
136 return -EINVAL;
137 }
138 /* Closing decoding instance */
139 mfc_debug(2, "Returning instance number %d\n", ctx->inst_no);
140 dev->curr_ctx = ctx->num;
141 memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
142 h2r_args.arg[0] = ctx->inst_no;
143 ret = s5p_mfc_cmd_host2risc_v5(dev, S5P_FIMV_H2R_CMD_CLOSE_INSTANCE,
144 &h2r_args);
145 if (ret) {
146 mfc_err("Failed to return an instance\n");
147 ctx->state = MFCINST_ERROR;
148 return -EINVAL;
149 }
150 return 0;
151}
152
153/* Initialize cmd function pointers for MFC v5 */
154static struct s5p_mfc_hw_cmds s5p_mfc_cmds_v5 = {
155 .cmd_host2risc = s5p_mfc_cmd_host2risc_v5,
156 .sys_init_cmd = s5p_mfc_sys_init_cmd_v5,
157 .sleep_cmd = s5p_mfc_sleep_cmd_v5,
158 .wakeup_cmd = s5p_mfc_wakeup_cmd_v5,
159 .open_inst_cmd = s5p_mfc_open_inst_cmd_v5,
160 .close_inst_cmd = s5p_mfc_close_inst_cmd_v5,
161};
162
163struct s5p_mfc_hw_cmds *s5p_mfc_init_hw_cmds_v5(void)
164{
165 return &s5p_mfc_cmds_v5;
166}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.h b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.h
new file mode 100644
index 000000000000..6928a5514c1b
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.h
@@ -0,0 +1,20 @@
1/*
2 * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.h
3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#ifndef S5P_MFC_CMD_V5_H_
14#define S5P_MFC_CMD_V5_H_
15
16#include "s5p_mfc_common.h"
17
18struct s5p_mfc_hw_cmds *s5p_mfc_init_hw_cmds_v5(void);
19
20#endif /* S5P_MFC_CMD_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
new file mode 100644
index 000000000000..754bfbcb1c43
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
@@ -0,0 +1,156 @@
1/*
2 * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include "s5p_mfc_common.h"
14
15#include "s5p_mfc_cmd.h"
16#include "s5p_mfc_debug.h"
17#include "s5p_mfc_intr.h"
18#include "s5p_mfc_opr.h"
19
20int s5p_mfc_cmd_host2risc_v6(struct s5p_mfc_dev *dev, int cmd,
21 struct s5p_mfc_cmd_args *args)
22{
23 mfc_debug(2, "Issue the command: %d\n", cmd);
24
25 /* Reset RISC2HOST command */
26 mfc_write(dev, 0x0, S5P_FIMV_RISC2HOST_CMD_V6);
27
28 /* Issue the command */
29 mfc_write(dev, cmd, S5P_FIMV_HOST2RISC_CMD_V6);
30 mfc_write(dev, 0x1, S5P_FIMV_HOST2RISC_INT_V6);
31
32 return 0;
33}
34
35int s5p_mfc_sys_init_cmd_v6(struct s5p_mfc_dev *dev)
36{
37 struct s5p_mfc_cmd_args h2r_args;
38 struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv;
39
40 s5p_mfc_hw_call(dev->mfc_ops, alloc_dev_context_buffer, dev);
41 mfc_write(dev, dev->ctx_buf.dma, S5P_FIMV_CONTEXT_MEM_ADDR_V6);
42 mfc_write(dev, buf_size->dev_ctx, S5P_FIMV_CONTEXT_MEM_SIZE_V6);
43 return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SYS_INIT_V6,
44 &h2r_args);
45}
46
47int s5p_mfc_sleep_cmd_v6(struct s5p_mfc_dev *dev)
48{
49 struct s5p_mfc_cmd_args h2r_args;
50
51 memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
52 return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SLEEP_V6,
53 &h2r_args);
54}
55
56int s5p_mfc_wakeup_cmd_v6(struct s5p_mfc_dev *dev)
57{
58 struct s5p_mfc_cmd_args h2r_args;
59
60 memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
61 return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_WAKEUP_V6,
62 &h2r_args);
63}
64
65/* Open a new instance and get its number */
66int s5p_mfc_open_inst_cmd_v6(struct s5p_mfc_ctx *ctx)
67{
68 struct s5p_mfc_dev *dev = ctx->dev;
69 struct s5p_mfc_cmd_args h2r_args;
70 int codec_type;
71
72 mfc_debug(2, "Requested codec mode: %d\n", ctx->codec_mode);
73 dev->curr_ctx = ctx->num;
74 switch (ctx->codec_mode) {
75 case S5P_MFC_CODEC_H264_DEC:
76 codec_type = S5P_FIMV_CODEC_H264_DEC_V6;
77 break;
78 case S5P_MFC_CODEC_H264_MVC_DEC:
79 codec_type = S5P_FIMV_CODEC_H264_MVC_DEC_V6;
80 break;
81 case S5P_MFC_CODEC_VC1_DEC:
82 codec_type = S5P_FIMV_CODEC_VC1_DEC_V6;
83 break;
84 case S5P_MFC_CODEC_MPEG4_DEC:
85 codec_type = S5P_FIMV_CODEC_MPEG4_DEC_V6;
86 break;
87 case S5P_MFC_CODEC_MPEG2_DEC:
88 codec_type = S5P_FIMV_CODEC_MPEG2_DEC_V6;
89 break;
90 case S5P_MFC_CODEC_H263_DEC:
91 codec_type = S5P_FIMV_CODEC_H263_DEC_V6;
92 break;
93 case S5P_MFC_CODEC_VC1RCV_DEC:
94 codec_type = S5P_FIMV_CODEC_VC1RCV_DEC_V6;
95 break;
96 case S5P_MFC_CODEC_VP8_DEC:
97 codec_type = S5P_FIMV_CODEC_VP8_DEC_V6;
98 break;
99 case S5P_MFC_CODEC_H264_ENC:
100 codec_type = S5P_FIMV_CODEC_H264_ENC_V6;
101 break;
102 case S5P_MFC_CODEC_H264_MVC_ENC:
103 codec_type = S5P_FIMV_CODEC_H264_MVC_ENC_V6;
104 break;
105 case S5P_MFC_CODEC_MPEG4_ENC:
106 codec_type = S5P_FIMV_CODEC_MPEG4_ENC_V6;
107 break;
108 case S5P_MFC_CODEC_H263_ENC:
109 codec_type = S5P_FIMV_CODEC_H263_ENC_V6;
110 break;
111 default:
112 codec_type = S5P_FIMV_CODEC_NONE_V6;
113 };
114 mfc_write(dev, codec_type, S5P_FIMV_CODEC_TYPE_V6);
115 mfc_write(dev, ctx->ctx.dma, S5P_FIMV_CONTEXT_MEM_ADDR_V6);
116 mfc_write(dev, ctx->ctx.size, S5P_FIMV_CONTEXT_MEM_SIZE_V6);
117 mfc_write(dev, 0, S5P_FIMV_D_CRC_CTRL_V6); /* no crc */
118
119 return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_OPEN_INSTANCE_V6,
120 &h2r_args);
121}
122
123/* Close instance */
124int s5p_mfc_close_inst_cmd_v6(struct s5p_mfc_ctx *ctx)
125{
126 struct s5p_mfc_dev *dev = ctx->dev;
127 struct s5p_mfc_cmd_args h2r_args;
128 int ret = 0;
129
130 dev->curr_ctx = ctx->num;
131 if (ctx->state != MFCINST_FREE) {
132 mfc_write(dev, ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6);
133 ret = s5p_mfc_cmd_host2risc_v6(dev,
134 S5P_FIMV_H2R_CMD_CLOSE_INSTANCE_V6,
135 &h2r_args);
136 } else {
137 ret = -EINVAL;
138 }
139
140 return ret;
141}
142
143/* Initialize cmd function pointers for MFC v6 */
144static struct s5p_mfc_hw_cmds s5p_mfc_cmds_v6 = {
145 .cmd_host2risc = s5p_mfc_cmd_host2risc_v6,
146 .sys_init_cmd = s5p_mfc_sys_init_cmd_v6,
147 .sleep_cmd = s5p_mfc_sleep_cmd_v6,
148 .wakeup_cmd = s5p_mfc_wakeup_cmd_v6,
149 .open_inst_cmd = s5p_mfc_open_inst_cmd_v6,
150 .close_inst_cmd = s5p_mfc_close_inst_cmd_v6,
151};
152
153struct s5p_mfc_hw_cmds *s5p_mfc_init_hw_cmds_v6(void)
154{
155 return &s5p_mfc_cmds_v6;
156}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.h b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.h
new file mode 100644
index 000000000000..b7a8e57837b5
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.h
@@ -0,0 +1,20 @@
1/*
2 * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.h
3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#ifndef S5P_MFC_CMD_V6_H_
14#define S5P_MFC_CMD_V6_H_
15
16#include "s5p_mfc_common.h"
17
18struct s5p_mfc_hw_cmds *s5p_mfc_init_hw_cmds_v6(void);
19
20#endif /* S5P_MFC_CMD_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index 519b0d66d8d1..f02e0497ca98 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -16,13 +16,14 @@
16#ifndef S5P_MFC_COMMON_H_ 16#ifndef S5P_MFC_COMMON_H_
17#define S5P_MFC_COMMON_H_ 17#define S5P_MFC_COMMON_H_
18 18
19#include "regs-mfc.h"
20#include <linux/platform_device.h> 19#include <linux/platform_device.h>
21#include <linux/videodev2.h> 20#include <linux/videodev2.h>
22#include <media/v4l2-ctrls.h> 21#include <media/v4l2-ctrls.h>
23#include <media/v4l2-device.h> 22#include <media/v4l2-device.h>
24#include <media/v4l2-ioctl.h> 23#include <media/v4l2-ioctl.h>
25#include <media/videobuf2-core.h> 24#include <media/videobuf2-core.h>
25#include "regs-mfc.h"
26#include "regs-mfc-v6.h"
26 27
27/* Definitions related to MFC memory */ 28/* Definitions related to MFC memory */
28 29
@@ -30,17 +31,6 @@
30* while mmaping */ 31* while mmaping */
31#define DST_QUEUE_OFF_BASE (TASK_SIZE / 2) 32#define DST_QUEUE_OFF_BASE (TASK_SIZE / 2)
32 33
33/* Offset used by the hardware to store addresses */
34#define MFC_OFFSET_SHIFT 11
35
36#define FIRMWARE_ALIGN 0x20000 /* 128KB */
37#define MFC_H264_CTX_BUF_SIZE 0x96000 /* 600KB per H264 instance */
38#define MFC_CTX_BUF_SIZE 0x2800 /* 10KB per instance */
39#define DESC_BUF_SIZE 0x20000 /* 128KB for DESC buffer */
40#define SHARED_BUF_SIZE 0x2000 /* 8KB for shared buffer */
41
42#define DEF_CPB_SIZE 0x40000 /* 512KB */
43
44#define MFC_BANK1_ALLOC_CTX 0 34#define MFC_BANK1_ALLOC_CTX 0
45#define MFC_BANK2_ALLOC_CTX 1 35#define MFC_BANK2_ALLOC_CTX 1
46 36
@@ -74,7 +64,40 @@ static inline dma_addr_t s5p_mfc_mem_cookie(void *a, void *b)
74#define MFC_ENC_CAP_PLANE_COUNT 1 64#define MFC_ENC_CAP_PLANE_COUNT 1
75#define MFC_ENC_OUT_PLANE_COUNT 2 65#define MFC_ENC_OUT_PLANE_COUNT 2
76#define STUFF_BYTE 4 66#define STUFF_BYTE 4
77#define MFC_MAX_CTRLS 64 67#define MFC_MAX_CTRLS 70
68
69#define S5P_MFC_CODEC_NONE -1
70#define S5P_MFC_CODEC_H264_DEC 0
71#define S5P_MFC_CODEC_H264_MVC_DEC 1
72#define S5P_MFC_CODEC_VC1_DEC 2
73#define S5P_MFC_CODEC_MPEG4_DEC 3
74#define S5P_MFC_CODEC_MPEG2_DEC 4
75#define S5P_MFC_CODEC_H263_DEC 5
76#define S5P_MFC_CODEC_VC1RCV_DEC 6
77#define S5P_MFC_CODEC_VP8_DEC 7
78
79#define S5P_MFC_CODEC_H264_ENC 20
80#define S5P_MFC_CODEC_H264_MVC_ENC 21
81#define S5P_MFC_CODEC_MPEG4_ENC 22
82#define S5P_MFC_CODEC_H263_ENC 23
83
84#define S5P_MFC_R2H_CMD_EMPTY 0
85#define S5P_MFC_R2H_CMD_SYS_INIT_RET 1
86#define S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET 2
87#define S5P_MFC_R2H_CMD_SEQ_DONE_RET 3
88#define S5P_MFC_R2H_CMD_INIT_BUFFERS_RET 4
89#define S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET 6
90#define S5P_MFC_R2H_CMD_SLEEP_RET 7
91#define S5P_MFC_R2H_CMD_WAKEUP_RET 8
92#define S5P_MFC_R2H_CMD_COMPLETE_SEQ_RET 9
93#define S5P_MFC_R2H_CMD_DPB_FLUSH_RET 10
94#define S5P_MFC_R2H_CMD_NAL_ABORT_RET 11
95#define S5P_MFC_R2H_CMD_FW_STATUS_RET 12
96#define S5P_MFC_R2H_CMD_FRAME_DONE_RET 13
97#define S5P_MFC_R2H_CMD_FIELD_DONE_RET 14
98#define S5P_MFC_R2H_CMD_SLICE_DONE_RET 15
99#define S5P_MFC_R2H_CMD_ENC_BUFFER_FUL_RET 16
100#define S5P_MFC_R2H_CMD_ERR_RET 32
78 101
79#define mfc_read(dev, offset) readl(dev->regs_base + (offset)) 102#define mfc_read(dev, offset) readl(dev->regs_base + (offset))
80#define mfc_write(dev, data, offset) writel((data), dev->regs_base + \ 103#define mfc_write(dev, data, offset) writel((data), dev->regs_base + \
@@ -177,6 +200,58 @@ struct s5p_mfc_pm {
177 struct device *device; 200 struct device *device;
178}; 201};
179 202
203struct s5p_mfc_buf_size_v5 {
204 unsigned int h264_ctx;
205 unsigned int non_h264_ctx;
206 unsigned int dsc;
207 unsigned int shm;
208};
209
210struct s5p_mfc_buf_size_v6 {
211 unsigned int dev_ctx;
212 unsigned int h264_dec_ctx;
213 unsigned int other_dec_ctx;
214 unsigned int h264_enc_ctx;
215 unsigned int other_enc_ctx;
216};
217
218struct s5p_mfc_buf_size {
219 unsigned int fw;
220 unsigned int cpb;
221 void *priv;
222};
223
224struct s5p_mfc_buf_align {
225 unsigned int base;
226};
227
228struct s5p_mfc_variant {
229 unsigned int version;
230 unsigned int port_num;
231 struct s5p_mfc_buf_size *buf_size;
232 struct s5p_mfc_buf_align *buf_align;
233 char *mclk_name;
234 char *fw_name;
235};
236
237/**
238 * struct s5p_mfc_priv_buf - represents internal used buffer
239 * @alloc: allocation-specific context for each buffer
240 * (videobuf2 allocator)
241 * @ofs: offset of each buffer, will be used for MFC
242 * @virt: kernel virtual address, only valid when the
243 * buffer accessed by driver
244 * @dma: DMA address, only valid when kernel DMA API used
245 * @size: size of the buffer
246 */
247struct s5p_mfc_priv_buf {
248 void *alloc;
249 unsigned long ofs;
250 void *virt;
251 dma_addr_t dma;
252 size_t size;
253};
254
180/** 255/**
181 * struct s5p_mfc_dev - The struct containing driver internal parameters. 256 * struct s5p_mfc_dev - The struct containing driver internal parameters.
182 * 257 *
@@ -191,6 +266,7 @@ struct s5p_mfc_pm {
191 * @dec_ctrl_handler: control framework handler for decoding 266 * @dec_ctrl_handler: control framework handler for decoding
192 * @enc_ctrl_handler: control framework handler for encoding 267 * @enc_ctrl_handler: control framework handler for encoding
193 * @pm: power management control 268 * @pm: power management control
269 * @variant: MFC hardware variant information
194 * @num_inst: couter of active MFC instances 270 * @num_inst: couter of active MFC instances
195 * @irqlock: lock for operations on videobuf2 queues 271 * @irqlock: lock for operations on videobuf2 queues
196 * @condlock: lock for changing/checking if a context is ready to be 272 * @condlock: lock for changing/checking if a context is ready to be
@@ -212,6 +288,10 @@ struct s5p_mfc_pm {
212 * @watchdog_work: worker for the watchdog 288 * @watchdog_work: worker for the watchdog
213 * @alloc_ctx: videobuf2 allocator contexts for two memory banks 289 * @alloc_ctx: videobuf2 allocator contexts for two memory banks
214 * @enter_suspend: flag set when entering suspend 290 * @enter_suspend: flag set when entering suspend
291 * @ctx_buf: common context memory (MFCv6)
292 * @warn_start: hardware error code from which warnings start
293 * @mfc_ops: ops structure holding HW operation function pointers
294 * @mfc_cmds: cmd structure holding HW commands function pointers
215 * 295 *
216 */ 296 */
217struct s5p_mfc_dev { 297struct s5p_mfc_dev {
@@ -226,6 +306,7 @@ struct s5p_mfc_dev {
226 struct v4l2_ctrl_handler dec_ctrl_handler; 306 struct v4l2_ctrl_handler dec_ctrl_handler;
227 struct v4l2_ctrl_handler enc_ctrl_handler; 307 struct v4l2_ctrl_handler enc_ctrl_handler;
228 struct s5p_mfc_pm pm; 308 struct s5p_mfc_pm pm;
309 struct s5p_mfc_variant *variant;
229 int num_inst; 310 int num_inst;
230 spinlock_t irqlock; /* lock when operating on videobuf2 queues */ 311 spinlock_t irqlock; /* lock when operating on videobuf2 queues */
231 spinlock_t condlock; /* lock when changing/checking if a context is 312 spinlock_t condlock; /* lock when changing/checking if a context is
@@ -248,6 +329,11 @@ struct s5p_mfc_dev {
248 struct work_struct watchdog_work; 329 struct work_struct watchdog_work;
249 void *alloc_ctx[2]; 330 void *alloc_ctx[2];
250 unsigned long enter_suspend; 331 unsigned long enter_suspend;
332
333 struct s5p_mfc_priv_buf ctx_buf;
334 int warn_start;
335 struct s5p_mfc_hw_ops *mfc_ops;
336 struct s5p_mfc_hw_cmds *mfc_cmds;
251}; 337};
252 338
253/** 339/**
@@ -262,7 +348,6 @@ struct s5p_mfc_h264_enc_params {
262 u8 max_ref_pic; 348 u8 max_ref_pic;
263 u8 num_ref_pic_4p; 349 u8 num_ref_pic_4p;
264 int _8x8_transform; 350 int _8x8_transform;
265 int rc_mb;
266 int rc_mb_dark; 351 int rc_mb_dark;
267 int rc_mb_smooth; 352 int rc_mb_smooth;
268 int rc_mb_static; 353 int rc_mb_static;
@@ -281,6 +366,23 @@ struct s5p_mfc_h264_enc_params {
281 enum v4l2_mpeg_video_h264_level level_v4l2; 366 enum v4l2_mpeg_video_h264_level level_v4l2;
282 int level; 367 int level;
283 u16 cpb_size; 368 u16 cpb_size;
369 int interlace;
370 u8 hier_qp;
371 u8 hier_qp_type;
372 u8 hier_qp_layer;
373 u8 hier_qp_layer_qp[7];
374 u8 sei_frame_packing;
375 u8 sei_fp_curr_frame_0;
376 u8 sei_fp_arrangement_type;
377
378 u8 fmo;
379 u8 fmo_map_type;
380 u8 fmo_slice_grp;
381 u8 fmo_chg_dir;
382 u32 fmo_chg_rate;
383 u32 fmo_run_len[4];
384 u8 aso;
385 u32 aso_slice_order[8];
284}; 386};
285 387
286/** 388/**
@@ -319,9 +421,11 @@ struct s5p_mfc_enc_params {
319 u8 pad_cb; 421 u8 pad_cb;
320 u8 pad_cr; 422 u8 pad_cr;
321 int rc_frame; 423 int rc_frame;
424 int rc_mb;
322 u32 rc_bitrate; 425 u32 rc_bitrate;
323 u16 rc_reaction_coeff; 426 u16 rc_reaction_coeff;
324 u16 vbv_size; 427 u16 vbv_size;
428 u32 vbv_delay;
325 429
326 enum v4l2_mpeg_video_header_mode seq_hdr_mode; 430 enum v4l2_mpeg_video_header_mode seq_hdr_mode;
327 enum v4l2_mpeg_mfc51_video_frame_skip_mode frame_skip_mode; 431 enum v4l2_mpeg_mfc51_video_frame_skip_mode frame_skip_mode;
@@ -330,7 +434,6 @@ struct s5p_mfc_enc_params {
330 u8 num_b_frame; 434 u8 num_b_frame;
331 u32 rc_framerate_num; 435 u32 rc_framerate_num;
332 u32 rc_framerate_denom; 436 u32 rc_framerate_denom;
333 int interlace;
334 437
335 union { 438 union {
336 struct s5p_mfc_h264_enc_params h264; 439 struct s5p_mfc_h264_enc_params h264;
@@ -388,6 +491,8 @@ struct s5p_mfc_codec_ops {
388 * decoding buffer 491 * decoding buffer
389 * @dpb_flush_flag: flag used to indicate that a DPB buffers are being 492 * @dpb_flush_flag: flag used to indicate that a DPB buffers are being
390 * flushed 493 * flushed
494 * @head_processed: flag mentioning whether the header data is processed
495 * completely or not
391 * @bank1_buf: handle to memory allocated for temporary buffers from 496 * @bank1_buf: handle to memory allocated for temporary buffers from
392 * memory bank 1 497 * memory bank 1
393 * @bank1_phys: address of the temporary buffers from memory bank 1 498 * @bank1_phys: address of the temporary buffers from memory bank 1
@@ -412,19 +517,20 @@ struct s5p_mfc_codec_ops {
412 * @display_delay_enable: display delay for H264 enable flag 517 * @display_delay_enable: display delay for H264 enable flag
413 * @after_packed_pb: flag used to track buffer when stream is in 518 * @after_packed_pb: flag used to track buffer when stream is in
414 * Packed PB format 519 * Packed PB format
520 * @sei_fp_parse: enable/disable parsing of frame packing SEI information
415 * @dpb_count: count of the DPB buffers required by MFC hw 521 * @dpb_count: count of the DPB buffers required by MFC hw
416 * @total_dpb_count: count of DPB buffers with additional buffers 522 * @total_dpb_count: count of DPB buffers with additional buffers
417 * requested by the application 523 * requested by the application
418 * @ctx_buf: handle to the memory associated with this context 524 * @ctx: context buffer information
419 * @ctx_phys: address of the memory associated with this context 525 * @dsc: descriptor buffer information
420 * @ctx_size: size of the memory associated with this context 526 * @shm: shared memory buffer information
421 * @desc_buf: description buffer for decoding handle 527 * @mv_count: number of MV buffers allocated for decoding
422 * @desc_phys: description buffer for decoding address
423 * @shm_alloc: handle for the shared memory buffer
424 * @shm: virtual address for the shared memory buffer
425 * @shm_ofs: address offset for shared memory
426 * @enc_params: encoding parameters for MFC 528 * @enc_params: encoding parameters for MFC
427 * @enc_dst_buf_size: size of the buffers for encoder output 529 * @enc_dst_buf_size: size of the buffers for encoder output
530 * @luma_dpb_size: dpb buffer size for luma
531 * @chroma_dpb_size: dpb buffer size for chroma
532 * @me_buffer_size: size of the motion estimation buffer
533 * @tmv_buffer_size: size of temporal predictor motion vector buffer
428 * @frame_type: used to force the type of the next encoded frame 534 * @frame_type: used to force the type of the next encoded frame
429 * @ref_queue: list of the reference buffers for encoding 535 * @ref_queue: list of the reference buffers for encoding
430 * @ref_queue_cnt: number of the buffers in the reference list 536 * @ref_queue_cnt: number of the buffers in the reference list
@@ -473,6 +579,7 @@ struct s5p_mfc_ctx {
473 unsigned long consumed_stream; 579 unsigned long consumed_stream;
474 580
475 unsigned int dpb_flush_flag; 581 unsigned int dpb_flush_flag;
582 unsigned int head_processed;
476 583
477 /* Buffers */ 584 /* Buffers */
478 void *bank1_buf; 585 void *bank1_buf;
@@ -502,37 +609,41 @@ struct s5p_mfc_ctx {
502 int display_delay; 609 int display_delay;
503 int display_delay_enable; 610 int display_delay_enable;
504 int after_packed_pb; 611 int after_packed_pb;
612 int sei_fp_parse;
505 613
506 int dpb_count; 614 int dpb_count;
507 int total_dpb_count; 615 int total_dpb_count;
508 616 int mv_count;
509 /* Buffers */ 617 /* Buffers */
510 void *ctx_buf; 618 struct s5p_mfc_priv_buf ctx;
511 size_t ctx_phys; 619 struct s5p_mfc_priv_buf dsc;
512 size_t ctx_ofs; 620 struct s5p_mfc_priv_buf shm;
513 size_t ctx_size;
514
515 void *desc_buf;
516 size_t desc_phys;
517
518
519 void *shm_alloc;
520 void *shm;
521 size_t shm_ofs;
522 621
523 struct s5p_mfc_enc_params enc_params; 622 struct s5p_mfc_enc_params enc_params;
524 623
525 size_t enc_dst_buf_size; 624 size_t enc_dst_buf_size;
625 size_t luma_dpb_size;
626 size_t chroma_dpb_size;
627 size_t me_buffer_size;
628 size_t tmv_buffer_size;
526 629
527 enum v4l2_mpeg_mfc51_video_force_frame_type force_frame_type; 630 enum v4l2_mpeg_mfc51_video_force_frame_type force_frame_type;
528 631
529 struct list_head ref_queue; 632 struct list_head ref_queue;
530 unsigned int ref_queue_cnt; 633 unsigned int ref_queue_cnt;
531 634
635 enum v4l2_mpeg_video_multi_slice_mode slice_mode;
636 union {
637 unsigned int mb;
638 unsigned int bits;
639 } slice_size;
640
532 struct s5p_mfc_codec_ops *c_ops; 641 struct s5p_mfc_codec_ops *c_ops;
533 642
534 struct v4l2_ctrl *ctrls[MFC_MAX_CTRLS]; 643 struct v4l2_ctrl *ctrls[MFC_MAX_CTRLS];
535 struct v4l2_ctrl_handler ctrl_handler; 644 struct v4l2_ctrl_handler ctrl_handler;
645 unsigned int frame_tag;
646 size_t scratch_buf_size;
536}; 647};
537 648
538/* 649/*
@@ -565,6 +676,9 @@ struct mfc_control {
565 __u8 is_volatile; 676 __u8 is_volatile;
566}; 677};
567 678
679/* Macro for making hardware specific calls */
680#define s5p_mfc_hw_call(f, op, args...) \
681 ((f && f->op) ? f->op(args) : -ENODEV)
568 682
569#define fh_to_ctx(__fh) container_of(__fh, struct s5p_mfc_ctx, fh) 683#define fh_to_ctx(__fh) container_of(__fh, struct s5p_mfc_ctx, fh)
570#define ctrl_to_ctx(__ctrl) \ 684#define ctrl_to_ctx(__ctrl) \
@@ -575,4 +689,9 @@ void set_work_bit(struct s5p_mfc_ctx *ctx);
575void clear_work_bit_irqsave(struct s5p_mfc_ctx *ctx); 689void clear_work_bit_irqsave(struct s5p_mfc_ctx *ctx);
576void set_work_bit_irqsave(struct s5p_mfc_ctx *ctx); 690void set_work_bit_irqsave(struct s5p_mfc_ctx *ctx);
577 691
692#define HAS_PORTNUM(dev) (dev ? (dev->variant ? \
693 (dev->variant->port_num ? 1 : 0) : 0) : 0)
694#define IS_TWOPORT(dev) (dev->variant->port_num == 2 ? 1 : 0)
695#define IS_MFCV6(dev) (dev->variant->version >= 0x60 ? 1 : 0)
696
578#endif /* S5P_MFC_COMMON_H_ */ 697#endif /* S5P_MFC_COMMON_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
index 0deba6bc687c..585b7b0ed8ec 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
@@ -15,11 +15,11 @@
15#include <linux/firmware.h> 15#include <linux/firmware.h>
16#include <linux/jiffies.h> 16#include <linux/jiffies.h>
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include "regs-mfc.h"
19#include "s5p_mfc_cmd.h" 18#include "s5p_mfc_cmd.h"
20#include "s5p_mfc_common.h" 19#include "s5p_mfc_common.h"
21#include "s5p_mfc_debug.h" 20#include "s5p_mfc_debug.h"
22#include "s5p_mfc_intr.h" 21#include "s5p_mfc_intr.h"
22#include "s5p_mfc_opr.h"
23#include "s5p_mfc_pm.h" 23#include "s5p_mfc_pm.h"
24 24
25static void *s5p_mfc_bitproc_buf; 25static void *s5p_mfc_bitproc_buf;
@@ -37,13 +37,19 @@ int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
37 /* Firmare has to be present as a separate file or compiled 37 /* Firmare has to be present as a separate file or compiled
38 * into kernel. */ 38 * into kernel. */
39 mfc_debug_enter(); 39 mfc_debug_enter();
40
40 err = request_firmware((const struct firmware **)&fw_blob, 41 err = request_firmware((const struct firmware **)&fw_blob,
41 "s5p-mfc.fw", dev->v4l2_dev.dev); 42 dev->variant->fw_name, dev->v4l2_dev.dev);
42 if (err != 0) { 43 if (err != 0) {
43 mfc_err("Firmware is not present in the /lib/firmware directory nor compiled in kernel\n"); 44 mfc_err("Firmware is not present in the /lib/firmware directory nor compiled in kernel\n");
44 return -EINVAL; 45 return -EINVAL;
45 } 46 }
46 dev->fw_size = ALIGN(fw_blob->size, FIRMWARE_ALIGN); 47 dev->fw_size = dev->variant->buf_size->fw;
48 if (fw_blob->size > dev->fw_size) {
49 mfc_err("MFC firmware is too big to be loaded\n");
50 release_firmware(fw_blob);
51 return -ENOMEM;
52 }
47 if (s5p_mfc_bitproc_buf) { 53 if (s5p_mfc_bitproc_buf) {
48 mfc_err("Attempting to allocate firmware when it seems that it is already loaded\n"); 54 mfc_err("Attempting to allocate firmware when it seems that it is already loaded\n");
49 release_firmware(fw_blob); 55 release_firmware(fw_blob);
@@ -77,32 +83,37 @@ int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
77 return -EIO; 83 return -EIO;
78 } 84 }
79 dev->bank1 = s5p_mfc_bitproc_phys; 85 dev->bank1 = s5p_mfc_bitproc_phys;
80 b_base = vb2_dma_contig_memops.alloc( 86 if (HAS_PORTNUM(dev) && IS_TWOPORT(dev)) {
81 dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], 1 << MFC_BASE_ALIGN_ORDER); 87 b_base = vb2_dma_contig_memops.alloc(
82 if (IS_ERR(b_base)) { 88 dev->alloc_ctx[MFC_BANK2_ALLOC_CTX],
83 vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf); 89 1 << MFC_BASE_ALIGN_ORDER);
84 s5p_mfc_bitproc_phys = 0; 90 if (IS_ERR(b_base)) {
85 s5p_mfc_bitproc_buf = NULL; 91 vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
86 mfc_err("Allocating bank2 base failed\n"); 92 s5p_mfc_bitproc_phys = 0;
87 release_firmware(fw_blob); 93 s5p_mfc_bitproc_buf = NULL;
88 return -ENOMEM; 94 mfc_err("Allocating bank2 base failed\n");
89 } 95 release_firmware(fw_blob);
90 bank2_base_phys = s5p_mfc_mem_cookie( 96 return -ENOMEM;
91 dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], b_base); 97 }
92 vb2_dma_contig_memops.put(b_base); 98 bank2_base_phys = s5p_mfc_mem_cookie(
93 if (bank2_base_phys & ((1 << MFC_BASE_ALIGN_ORDER) - 1)) { 99 dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], b_base);
94 mfc_err("The base memory for bank 2 is not aligned to 128KB\n"); 100 vb2_dma_contig_memops.put(b_base);
95 vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf); 101 if (bank2_base_phys & ((1 << MFC_BASE_ALIGN_ORDER) - 1)) {
96 s5p_mfc_bitproc_phys = 0; 102 mfc_err("The base memory for bank 2 is not aligned to 128KB\n");
97 s5p_mfc_bitproc_buf = NULL; 103 vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
98 release_firmware(fw_blob); 104 s5p_mfc_bitproc_phys = 0;
99 return -EIO; 105 s5p_mfc_bitproc_buf = NULL;
106 release_firmware(fw_blob);
107 return -EIO;
108 }
109 /* Valid buffers passed to MFC encoder with LAST_FRAME command
110 * should not have address of bank2 - MFC will treat it as a null frame.
111 * To avoid such situation we set bank2 address below the pool address.
112 */
113 dev->bank2 = bank2_base_phys - (1 << MFC_BASE_ALIGN_ORDER);
114 } else {
115 dev->bank2 = dev->bank1;
100 } 116 }
101 /* Valid buffers passed to MFC encoder with LAST_FRAME command
102 * should not have address of bank2 - MFC will treat it as a null frame.
103 * To avoid such situation we set bank2 address below the pool address.
104 */
105 dev->bank2 = bank2_base_phys - (1 << MFC_BASE_ALIGN_ORDER);
106 memcpy(s5p_mfc_bitproc_virt, fw_blob->data, fw_blob->size); 117 memcpy(s5p_mfc_bitproc_virt, fw_blob->data, fw_blob->size);
107 wmb(); 118 wmb();
108 release_firmware(fw_blob); 119 release_firmware(fw_blob);
@@ -119,8 +130,9 @@ int s5p_mfc_reload_firmware(struct s5p_mfc_dev *dev)
119 /* Firmare has to be present as a separate file or compiled 130 /* Firmare has to be present as a separate file or compiled
120 * into kernel. */ 131 * into kernel. */
121 mfc_debug_enter(); 132 mfc_debug_enter();
133
122 err = request_firmware((const struct firmware **)&fw_blob, 134 err = request_firmware((const struct firmware **)&fw_blob,
123 "s5p-mfc.fw", dev->v4l2_dev.dev); 135 dev->variant->fw_name, dev->v4l2_dev.dev);
124 if (err != 0) { 136 if (err != 0) {
125 mfc_err("Firmware is not present in the /lib/firmware directory nor compiled in kernel\n"); 137 mfc_err("Firmware is not present in the /lib/firmware directory nor compiled in kernel\n");
126 return -EINVAL; 138 return -EINVAL;
@@ -161,46 +173,81 @@ int s5p_mfc_reset(struct s5p_mfc_dev *dev)
161{ 173{
162 unsigned int mc_status; 174 unsigned int mc_status;
163 unsigned long timeout; 175 unsigned long timeout;
176 int i;
164 177
165 mfc_debug_enter(); 178 mfc_debug_enter();
166 /* Stop procedure */
167 /* reset RISC */
168 mfc_write(dev, 0x3f6, S5P_FIMV_SW_RESET);
169 /* All reset except for MC */
170 mfc_write(dev, 0x3e2, S5P_FIMV_SW_RESET);
171 mdelay(10);
172
173 timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT);
174 /* Check MC status */
175 do {
176 if (time_after(jiffies, timeout)) {
177 mfc_err("Timeout while resetting MFC\n");
178 return -EIO;
179 }
180 179
181 mc_status = mfc_read(dev, S5P_FIMV_MC_STATUS); 180 if (IS_MFCV6(dev)) {
181 /* Reset IP */
182 /* except RISC, reset */
183 mfc_write(dev, 0xFEE, S5P_FIMV_MFC_RESET_V6);
184 /* reset release */
185 mfc_write(dev, 0x0, S5P_FIMV_MFC_RESET_V6);
186
187 /* Zero Initialization of MFC registers */
188 mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD_V6);
189 mfc_write(dev, 0, S5P_FIMV_HOST2RISC_CMD_V6);
190 mfc_write(dev, 0, S5P_FIMV_FW_VERSION_V6);
191
192 for (i = 0; i < S5P_FIMV_REG_CLEAR_COUNT_V6; i++)
193 mfc_write(dev, 0, S5P_FIMV_REG_CLEAR_BEGIN_V6 + (i*4));
182 194
183 } while (mc_status & 0x3); 195 /* Reset */
196 mfc_write(dev, 0, S5P_FIMV_RISC_ON_V6);
197 mfc_write(dev, 0x1FFF, S5P_FIMV_MFC_RESET_V6);
198 mfc_write(dev, 0, S5P_FIMV_MFC_RESET_V6);
199 } else {
200 /* Stop procedure */
201 /* reset RISC */
202 mfc_write(dev, 0x3f6, S5P_FIMV_SW_RESET);
203 /* All reset except for MC */
204 mfc_write(dev, 0x3e2, S5P_FIMV_SW_RESET);
205 mdelay(10);
206
207 timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT);
208 /* Check MC status */
209 do {
210 if (time_after(jiffies, timeout)) {
211 mfc_err("Timeout while resetting MFC\n");
212 return -EIO;
213 }
214
215 mc_status = mfc_read(dev, S5P_FIMV_MC_STATUS);
216
217 } while (mc_status & 0x3);
218
219 mfc_write(dev, 0x0, S5P_FIMV_SW_RESET);
220 mfc_write(dev, 0x3fe, S5P_FIMV_SW_RESET);
221 }
184 222
185 mfc_write(dev, 0x0, S5P_FIMV_SW_RESET);
186 mfc_write(dev, 0x3fe, S5P_FIMV_SW_RESET);
187 mfc_debug_leave(); 223 mfc_debug_leave();
188 return 0; 224 return 0;
189} 225}
190 226
191static inline void s5p_mfc_init_memctrl(struct s5p_mfc_dev *dev) 227static inline void s5p_mfc_init_memctrl(struct s5p_mfc_dev *dev)
192{ 228{
193 mfc_write(dev, dev->bank1, S5P_FIMV_MC_DRAMBASE_ADR_A); 229 if (IS_MFCV6(dev)) {
194 mfc_write(dev, dev->bank2, S5P_FIMV_MC_DRAMBASE_ADR_B); 230 mfc_write(dev, dev->bank1, S5P_FIMV_RISC_BASE_ADDRESS_V6);
195 mfc_debug(2, "Bank1: %08x, Bank2: %08x\n", dev->bank1, dev->bank2); 231 mfc_debug(2, "Base Address : %08x\n", dev->bank1);
232 } else {
233 mfc_write(dev, dev->bank1, S5P_FIMV_MC_DRAMBASE_ADR_A);
234 mfc_write(dev, dev->bank2, S5P_FIMV_MC_DRAMBASE_ADR_B);
235 mfc_debug(2, "Bank1: %08x, Bank2: %08x\n",
236 dev->bank1, dev->bank2);
237 }
196} 238}
197 239
198static inline void s5p_mfc_clear_cmds(struct s5p_mfc_dev *dev) 240static inline void s5p_mfc_clear_cmds(struct s5p_mfc_dev *dev)
199{ 241{
200 mfc_write(dev, 0xffffffff, S5P_FIMV_SI_CH0_INST_ID); 242 if (IS_MFCV6(dev)) {
201 mfc_write(dev, 0xffffffff, S5P_FIMV_SI_CH1_INST_ID); 243 /* Zero initialization should be done before RESET.
202 mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD); 244 * Nothing to do here. */
203 mfc_write(dev, 0, S5P_FIMV_HOST2RISC_CMD); 245 } else {
246 mfc_write(dev, 0xffffffff, S5P_FIMV_SI_CH0_INST_ID);
247 mfc_write(dev, 0xffffffff, S5P_FIMV_SI_CH1_INST_ID);
248 mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD);
249 mfc_write(dev, 0, S5P_FIMV_HOST2RISC_CMD);
250 }
204} 251}
205 252
206/* Initialize hardware */ 253/* Initialize hardware */
@@ -228,9 +275,12 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
228 s5p_mfc_clear_cmds(dev); 275 s5p_mfc_clear_cmds(dev);
229 /* 3. Release reset signal to the RISC */ 276 /* 3. Release reset signal to the RISC */
230 s5p_mfc_clean_dev_int_flags(dev); 277 s5p_mfc_clean_dev_int_flags(dev);
231 mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET); 278 if (IS_MFCV6(dev))
279 mfc_write(dev, 0x1, S5P_FIMV_RISC_ON_V6);
280 else
281 mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
232 mfc_debug(2, "Will now wait for completion of firmware transfer\n"); 282 mfc_debug(2, "Will now wait for completion of firmware transfer\n");
233 if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_FW_STATUS_RET)) { 283 if (s5p_mfc_wait_for_done_dev(dev, S5P_MFC_R2H_CMD_FW_STATUS_RET)) {
234 mfc_err("Failed to load firmware\n"); 284 mfc_err("Failed to load firmware\n");
235 s5p_mfc_reset(dev); 285 s5p_mfc_reset(dev);
236 s5p_mfc_clock_off(); 286 s5p_mfc_clock_off();
@@ -238,7 +288,7 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
238 } 288 }
239 s5p_mfc_clean_dev_int_flags(dev); 289 s5p_mfc_clean_dev_int_flags(dev);
240 /* 4. Initialize firmware */ 290 /* 4. Initialize firmware */
241 ret = s5p_mfc_sys_init_cmd(dev); 291 ret = s5p_mfc_hw_call(dev->mfc_cmds, sys_init_cmd, dev);
242 if (ret) { 292 if (ret) {
243 mfc_err("Failed to send command to MFC - timeout\n"); 293 mfc_err("Failed to send command to MFC - timeout\n");
244 s5p_mfc_reset(dev); 294 s5p_mfc_reset(dev);
@@ -246,7 +296,7 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
246 return ret; 296 return ret;
247 } 297 }
248 mfc_debug(2, "Ok, now will write a command to init the system\n"); 298 mfc_debug(2, "Ok, now will write a command to init the system\n");
249 if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_SYS_INIT_RET)) { 299 if (s5p_mfc_wait_for_done_dev(dev, S5P_MFC_R2H_CMD_SYS_INIT_RET)) {
250 mfc_err("Failed to load firmware\n"); 300 mfc_err("Failed to load firmware\n");
251 s5p_mfc_reset(dev); 301 s5p_mfc_reset(dev);
252 s5p_mfc_clock_off(); 302 s5p_mfc_clock_off();
@@ -254,7 +304,7 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
254 } 304 }
255 dev->int_cond = 0; 305 dev->int_cond = 0;
256 if (dev->int_err != 0 || dev->int_type != 306 if (dev->int_err != 0 || dev->int_type !=
257 S5P_FIMV_R2H_CMD_SYS_INIT_RET) { 307 S5P_MFC_R2H_CMD_SYS_INIT_RET) {
258 /* Failure. */ 308 /* Failure. */
259 mfc_err("Failed to init firmware - error: %d int: %d\n", 309 mfc_err("Failed to init firmware - error: %d int: %d\n",
260 dev->int_err, dev->int_type); 310 dev->int_err, dev->int_type);
@@ -262,7 +312,11 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
262 s5p_mfc_clock_off(); 312 s5p_mfc_clock_off();
263 return -EIO; 313 return -EIO;
264 } 314 }
265 ver = mfc_read(dev, S5P_FIMV_FW_VERSION); 315 if (IS_MFCV6(dev))
316 ver = mfc_read(dev, S5P_FIMV_FW_VERSION_V6);
317 else
318 ver = mfc_read(dev, S5P_FIMV_FW_VERSION);
319
266 mfc_debug(2, "MFC F/W version : %02xyy, %02xmm, %02xdd\n", 320 mfc_debug(2, "MFC F/W version : %02xyy, %02xmm, %02xdd\n",
267 (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF); 321 (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
268 s5p_mfc_clock_off(); 322 s5p_mfc_clock_off();
@@ -271,6 +325,17 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
271} 325}
272 326
273 327
328/* Deinitialize hardware */
329void s5p_mfc_deinit_hw(struct s5p_mfc_dev *dev)
330{
331 s5p_mfc_clock_on();
332
333 s5p_mfc_reset(dev);
334 s5p_mfc_hw_call(dev->mfc_ops, release_dev_context_buffer, dev);
335
336 s5p_mfc_clock_off();
337}
338
274int s5p_mfc_sleep(struct s5p_mfc_dev *dev) 339int s5p_mfc_sleep(struct s5p_mfc_dev *dev)
275{ 340{
276 int ret; 341 int ret;
@@ -278,19 +343,19 @@ int s5p_mfc_sleep(struct s5p_mfc_dev *dev)
278 mfc_debug_enter(); 343 mfc_debug_enter();
279 s5p_mfc_clock_on(); 344 s5p_mfc_clock_on();
280 s5p_mfc_clean_dev_int_flags(dev); 345 s5p_mfc_clean_dev_int_flags(dev);
281 ret = s5p_mfc_sleep_cmd(dev); 346 ret = s5p_mfc_hw_call(dev->mfc_cmds, sleep_cmd, dev);
282 if (ret) { 347 if (ret) {
283 mfc_err("Failed to send command to MFC - timeout\n"); 348 mfc_err("Failed to send command to MFC - timeout\n");
284 return ret; 349 return ret;
285 } 350 }
286 if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_SLEEP_RET)) { 351 if (s5p_mfc_wait_for_done_dev(dev, S5P_MFC_R2H_CMD_SLEEP_RET)) {
287 mfc_err("Failed to sleep\n"); 352 mfc_err("Failed to sleep\n");
288 return -EIO; 353 return -EIO;
289 } 354 }
290 s5p_mfc_clock_off(); 355 s5p_mfc_clock_off();
291 dev->int_cond = 0; 356 dev->int_cond = 0;
292 if (dev->int_err != 0 || dev->int_type != 357 if (dev->int_err != 0 || dev->int_type !=
293 S5P_FIMV_R2H_CMD_SLEEP_RET) { 358 S5P_MFC_R2H_CMD_SLEEP_RET) {
294 /* Failure. */ 359 /* Failure. */
295 mfc_err("Failed to sleep - error: %d int: %d\n", dev->int_err, 360 mfc_err("Failed to sleep - error: %d int: %d\n", dev->int_err,
296 dev->int_type); 361 dev->int_type);
@@ -320,22 +385,25 @@ int s5p_mfc_wakeup(struct s5p_mfc_dev *dev)
320 s5p_mfc_clear_cmds(dev); 385 s5p_mfc_clear_cmds(dev);
321 s5p_mfc_clean_dev_int_flags(dev); 386 s5p_mfc_clean_dev_int_flags(dev);
322 /* 3. Initialize firmware */ 387 /* 3. Initialize firmware */
323 ret = s5p_mfc_wakeup_cmd(dev); 388 ret = s5p_mfc_hw_call(dev->mfc_cmds, wakeup_cmd, dev);
324 if (ret) { 389 if (ret) {
325 mfc_err("Failed to send command to MFC - timeout\n"); 390 mfc_err("Failed to send command to MFC - timeout\n");
326 return ret; 391 return ret;
327 } 392 }
328 /* 4. Release reset signal to the RISC */ 393 /* 4. Release reset signal to the RISC */
329 mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET); 394 if (IS_MFCV6(dev))
395 mfc_write(dev, 0x1, S5P_FIMV_RISC_ON_V6);
396 else
397 mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
330 mfc_debug(2, "Ok, now will write a command to wakeup the system\n"); 398 mfc_debug(2, "Ok, now will write a command to wakeup the system\n");
331 if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_WAKEUP_RET)) { 399 if (s5p_mfc_wait_for_done_dev(dev, S5P_MFC_R2H_CMD_WAKEUP_RET)) {
332 mfc_err("Failed to load firmware\n"); 400 mfc_err("Failed to load firmware\n");
333 return -EIO; 401 return -EIO;
334 } 402 }
335 s5p_mfc_clock_off(); 403 s5p_mfc_clock_off();
336 dev->int_cond = 0; 404 dev->int_cond = 0;
337 if (dev->int_err != 0 || dev->int_type != 405 if (dev->int_err != 0 || dev->int_type !=
338 S5P_FIMV_R2H_CMD_WAKEUP_RET) { 406 S5P_MFC_R2H_CMD_WAKEUP_RET) {
339 /* Failure. */ 407 /* Failure. */
340 mfc_err("Failed to wakeup - error: %d int: %d\n", dev->int_err, 408 mfc_err("Failed to wakeup - error: %d int: %d\n", dev->int_err,
341 dev->int_type); 409 dev->int_type);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h
index e1e0c544b6a2..90aa9b9886d5 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h
@@ -20,6 +20,7 @@ int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev);
20int s5p_mfc_reload_firmware(struct s5p_mfc_dev *dev); 20int s5p_mfc_reload_firmware(struct s5p_mfc_dev *dev);
21 21
22int s5p_mfc_init_hw(struct s5p_mfc_dev *dev); 22int s5p_mfc_init_hw(struct s5p_mfc_dev *dev);
23void s5p_mfc_deinit_hw(struct s5p_mfc_dev *dev);
23 24
24int s5p_mfc_sleep(struct s5p_mfc_dev *dev); 25int s5p_mfc_sleep(struct s5p_mfc_dev *dev);
25int s5p_mfc_wakeup(struct s5p_mfc_dev *dev); 26int s5p_mfc_wakeup(struct s5p_mfc_dev *dev);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 6ee21bb71398..eb6a70b0f821 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -23,85 +23,114 @@
23#include <linux/workqueue.h> 23#include <linux/workqueue.h>
24#include <media/v4l2-ctrls.h> 24#include <media/v4l2-ctrls.h>
25#include <media/videobuf2-core.h> 25#include <media/videobuf2-core.h>
26#include "regs-mfc.h"
27#include "s5p_mfc_common.h" 26#include "s5p_mfc_common.h"
28#include "s5p_mfc_debug.h" 27#include "s5p_mfc_debug.h"
29#include "s5p_mfc_dec.h" 28#include "s5p_mfc_dec.h"
30#include "s5p_mfc_intr.h" 29#include "s5p_mfc_intr.h"
31#include "s5p_mfc_opr.h" 30#include "s5p_mfc_opr.h"
32#include "s5p_mfc_pm.h" 31#include "s5p_mfc_pm.h"
33#include "s5p_mfc_shm.h" 32
33#define DEF_SRC_FMT_DEC V4L2_PIX_FMT_H264
34#define DEF_DST_FMT_DEC V4L2_PIX_FMT_NV12MT_16X16
34 35
35static struct s5p_mfc_fmt formats[] = { 36static struct s5p_mfc_fmt formats[] = {
36 { 37 {
38 .name = "4:2:0 2 Planes 16x16 Tiles",
39 .fourcc = V4L2_PIX_FMT_NV12MT_16X16,
40 .codec_mode = S5P_MFC_CODEC_NONE,
41 .type = MFC_FMT_RAW,
42 .num_planes = 2,
43 },
44 {
37 .name = "4:2:0 2 Planes 64x32 Tiles", 45 .name = "4:2:0 2 Planes 64x32 Tiles",
38 .fourcc = V4L2_PIX_FMT_NV12MT, 46 .fourcc = V4L2_PIX_FMT_NV12MT,
39 .codec_mode = S5P_FIMV_CODEC_NONE, 47 .codec_mode = S5P_MFC_CODEC_NONE,
40 .type = MFC_FMT_RAW, 48 .type = MFC_FMT_RAW,
41 .num_planes = 2, 49 .num_planes = 2,
42 }, 50 },
43 { 51 {
44 .name = "4:2:0 2 Planes", 52 .name = "4:2:0 2 Planes Y/CbCr",
45 .fourcc = V4L2_PIX_FMT_NV12M, 53 .fourcc = V4L2_PIX_FMT_NV12M,
46 .codec_mode = S5P_FIMV_CODEC_NONE, 54 .codec_mode = S5P_MFC_CODEC_NONE,
47 .type = MFC_FMT_RAW, 55 .type = MFC_FMT_RAW,
48 .num_planes = 2, 56 .num_planes = 2,
57 },
58 {
59 .name = "4:2:0 2 Planes Y/CrCb",
60 .fourcc = V4L2_PIX_FMT_NV21M,
61 .codec_mode = S5P_MFC_CODEC_NONE,
62 .type = MFC_FMT_RAW,
63 .num_planes = 2,
49 }, 64 },
50 { 65 {
51 .name = "H264 Encoded Stream", 66 .name = "H264 Encoded Stream",
52 .fourcc = V4L2_PIX_FMT_H264, 67 .fourcc = V4L2_PIX_FMT_H264,
53 .codec_mode = S5P_FIMV_CODEC_H264_DEC, 68 .codec_mode = S5P_MFC_CODEC_H264_DEC,
54 .type = MFC_FMT_DEC, 69 .type = MFC_FMT_DEC,
55 .num_planes = 1, 70 .num_planes = 1,
56 }, 71 },
57 { 72 {
58 .name = "H263 Encoded Stream", 73 .name = "H264/MVC Encoded Stream",
59 .fourcc = V4L2_PIX_FMT_H263, 74 .fourcc = V4L2_PIX_FMT_H264_MVC,
60 .codec_mode = S5P_FIMV_CODEC_H263_DEC, 75 .codec_mode = S5P_MFC_CODEC_H264_MVC_DEC,
61 .type = MFC_FMT_DEC, 76 .type = MFC_FMT_DEC,
62 .num_planes = 1, 77 .num_planes = 1,
63 }, 78 },
64 { 79 {
65 .name = "MPEG1 Encoded Stream", 80 .name = "H263 Encoded Stream",
66 .fourcc = V4L2_PIX_FMT_MPEG1, 81 .fourcc = V4L2_PIX_FMT_H263,
67 .codec_mode = S5P_FIMV_CODEC_MPEG2_DEC, 82 .codec_mode = S5P_MFC_CODEC_H263_DEC,
68 .type = MFC_FMT_DEC, 83 .type = MFC_FMT_DEC,
69 .num_planes = 1, 84 .num_planes = 1,
70 }, 85 },
71 { 86 {
72 .name = "MPEG2 Encoded Stream", 87 .name = "MPEG1 Encoded Stream",
73 .fourcc = V4L2_PIX_FMT_MPEG2, 88 .fourcc = V4L2_PIX_FMT_MPEG1,
74 .codec_mode = S5P_FIMV_CODEC_MPEG2_DEC, 89 .codec_mode = S5P_MFC_CODEC_MPEG2_DEC,
75 .type = MFC_FMT_DEC, 90 .type = MFC_FMT_DEC,
76 .num_planes = 1, 91 .num_planes = 1,
77 }, 92 },
78 { 93 {
79 .name = "MPEG4 Encoded Stream", 94 .name = "MPEG2 Encoded Stream",
80 .fourcc = V4L2_PIX_FMT_MPEG4, 95 .fourcc = V4L2_PIX_FMT_MPEG2,
81 .codec_mode = S5P_FIMV_CODEC_MPEG4_DEC, 96 .codec_mode = S5P_MFC_CODEC_MPEG2_DEC,
82 .type = MFC_FMT_DEC, 97 .type = MFC_FMT_DEC,
83 .num_planes = 1, 98 .num_planes = 1,
84 }, 99 },
85 { 100 {
86 .name = "XviD Encoded Stream", 101 .name = "MPEG4 Encoded Stream",
87 .fourcc = V4L2_PIX_FMT_XVID, 102 .fourcc = V4L2_PIX_FMT_MPEG4,
88 .codec_mode = S5P_FIMV_CODEC_MPEG4_DEC, 103 .codec_mode = S5P_MFC_CODEC_MPEG4_DEC,
89 .type = MFC_FMT_DEC, 104 .type = MFC_FMT_DEC,
90 .num_planes = 1, 105 .num_planes = 1,
91 }, 106 },
92 { 107 {
93 .name = "VC1 Encoded Stream", 108 .name = "XviD Encoded Stream",
94 .fourcc = V4L2_PIX_FMT_VC1_ANNEX_G, 109 .fourcc = V4L2_PIX_FMT_XVID,
95 .codec_mode = S5P_FIMV_CODEC_VC1_DEC, 110 .codec_mode = S5P_MFC_CODEC_MPEG4_DEC,
96 .type = MFC_FMT_DEC, 111 .type = MFC_FMT_DEC,
97 .num_planes = 1, 112 .num_planes = 1,
98 }, 113 },
99 { 114 {
100 .name = "VC1 RCV Encoded Stream", 115 .name = "VC1 Encoded Stream",
101 .fourcc = V4L2_PIX_FMT_VC1_ANNEX_L, 116 .fourcc = V4L2_PIX_FMT_VC1_ANNEX_G,
102 .codec_mode = S5P_FIMV_CODEC_VC1RCV_DEC, 117 .codec_mode = S5P_MFC_CODEC_VC1_DEC,
103 .type = MFC_FMT_DEC, 118 .type = MFC_FMT_DEC,
104 .num_planes = 1, 119 .num_planes = 1,
120 },
121 {
122 .name = "VC1 RCV Encoded Stream",
123 .fourcc = V4L2_PIX_FMT_VC1_ANNEX_L,
124 .codec_mode = S5P_MFC_CODEC_VC1RCV_DEC,
125 .type = MFC_FMT_DEC,
126 .num_planes = 1,
127 },
128 {
129 .name = "VP8 Encoded Stream",
130 .fourcc = V4L2_PIX_FMT_VP8,
131 .codec_mode = S5P_MFC_CODEC_VP8_DEC,
132 .type = MFC_FMT_DEC,
133 .num_planes = 1,
105 }, 134 },
106}; 135};
107 136
@@ -297,7 +326,7 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
297 /* If the MFC is parsing the header, 326 /* If the MFC is parsing the header,
298 * so wait until it is finished */ 327 * so wait until it is finished */
299 s5p_mfc_clean_ctx_int_flags(ctx); 328 s5p_mfc_clean_ctx_int_flags(ctx);
300 s5p_mfc_wait_for_done_ctx(ctx, S5P_FIMV_R2H_CMD_SEQ_DONE_RET, 329 s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_SEQ_DONE_RET,
301 0); 330 0);
302 } 331 }
303 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && 332 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
@@ -342,21 +371,36 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
342/* Try format */ 371/* Try format */
343static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) 372static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
344{ 373{
374 struct s5p_mfc_dev *dev = video_drvdata(file);
345 struct s5p_mfc_fmt *fmt; 375 struct s5p_mfc_fmt *fmt;
346 376
347 if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { 377 mfc_debug(2, "Type is %d\n", f->type);
348 mfc_err("This node supports decoding only\n"); 378 if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
349 return -EINVAL; 379 fmt = find_format(f, MFC_FMT_DEC);
350 } 380 if (!fmt) {
351 fmt = find_format(f, MFC_FMT_DEC); 381 mfc_err("Unsupported format for source.\n");
352 if (!fmt) { 382 return -EINVAL;
353 mfc_err("Unsupported format\n"); 383 }
354 return -EINVAL; 384 if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) {
355 } 385 mfc_err("Not supported format.\n");
356 if (fmt->type != MFC_FMT_DEC) { 386 return -EINVAL;
357 mfc_err("\n"); 387 }
358 return -EINVAL; 388 } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
389 fmt = find_format(f, MFC_FMT_RAW);
390 if (!fmt) {
391 mfc_err("Unsupported format for destination.\n");
392 return -EINVAL;
393 }
394 if (IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) {
395 mfc_err("Not supported format.\n");
396 return -EINVAL;
397 } else if (!IS_MFCV6(dev) &&
398 (fmt->fourcc != V4L2_PIX_FMT_NV12MT)) {
399 mfc_err("Not supported format.\n");
400 return -EINVAL;
401 }
359 } 402 }
403
360 return 0; 404 return 0;
361} 405}
362 406
@@ -379,8 +423,29 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
379 ret = -EBUSY; 423 ret = -EBUSY;
380 goto out; 424 goto out;
381 } 425 }
426 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
427 fmt = find_format(f, MFC_FMT_RAW);
428 if (!fmt) {
429 mfc_err("Unsupported format for source.\n");
430 return -EINVAL;
431 }
432 if (!IS_MFCV6(dev) && (fmt->fourcc != V4L2_PIX_FMT_NV12MT)) {
433 mfc_err("Not supported format.\n");
434 return -EINVAL;
435 } else if (IS_MFCV6(dev) &&
436 (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) {
437 mfc_err("Not supported format.\n");
438 return -EINVAL;
439 }
440 ctx->dst_fmt = fmt;
441 mfc_debug_leave();
442 return ret;
443 } else if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
444 mfc_err("Wrong type error for S_FMT : %d", f->type);
445 return -EINVAL;
446 }
382 fmt = find_format(f, MFC_FMT_DEC); 447 fmt = find_format(f, MFC_FMT_DEC);
383 if (!fmt || fmt->codec_mode == S5P_FIMV_CODEC_NONE) { 448 if (!fmt || fmt->codec_mode == S5P_MFC_CODEC_NONE) {
384 mfc_err("Unknown codec\n"); 449 mfc_err("Unknown codec\n");
385 ret = -EINVAL; 450 ret = -EINVAL;
386 goto out; 451 goto out;
@@ -391,6 +456,10 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
391 ret = -EINVAL; 456 ret = -EINVAL;
392 goto out; 457 goto out;
393 } 458 }
459 if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) {
460 mfc_err("Not supported format.\n");
461 return -EINVAL;
462 }
394 ctx->src_fmt = fmt; 463 ctx->src_fmt = fmt;
395 ctx->codec_mode = fmt->codec_mode; 464 ctx->codec_mode = fmt->codec_mode;
396 mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode); 465 mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode);
@@ -476,7 +545,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
476 return -ENOMEM; 545 return -ENOMEM;
477 } 546 }
478 ctx->total_dpb_count = reqbufs->count; 547 ctx->total_dpb_count = reqbufs->count;
479 ret = s5p_mfc_alloc_codec_buffers(ctx); 548 ret = s5p_mfc_hw_call(dev->mfc_ops, alloc_codec_buffers, ctx);
480 if (ret) { 549 if (ret) {
481 mfc_err("Failed to allocate decoding buffers\n"); 550 mfc_err("Failed to allocate decoding buffers\n");
482 reqbufs->count = 0; 551 reqbufs->count = 0;
@@ -492,15 +561,16 @@ static int vidioc_reqbufs(struct file *file, void *priv,
492 reqbufs->count = 0; 561 reqbufs->count = 0;
493 s5p_mfc_clock_on(); 562 s5p_mfc_clock_on();
494 ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); 563 ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
495 s5p_mfc_release_codec_buffers(ctx); 564 s5p_mfc_hw_call(dev->mfc_ops, release_codec_buffers,
565 ctx);
496 s5p_mfc_clock_off(); 566 s5p_mfc_clock_off();
497 return -ENOMEM; 567 return -ENOMEM;
498 } 568 }
499 if (s5p_mfc_ctx_ready(ctx)) 569 if (s5p_mfc_ctx_ready(ctx))
500 set_work_bit_irqsave(ctx); 570 set_work_bit_irqsave(ctx);
501 s5p_mfc_try_run(dev); 571 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
502 s5p_mfc_wait_for_done_ctx(ctx, 572 s5p_mfc_wait_for_done_ctx(ctx,
503 S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET, 0); 573 S5P_MFC_R2H_CMD_INIT_BUFFERS_RET, 0);
504 } 574 }
505 return ret; 575 return ret;
506} 576}
@@ -582,18 +652,22 @@ static int vidioc_streamon(struct file *file, void *priv,
582 ctx->src_bufs_cnt = 0; 652 ctx->src_bufs_cnt = 0;
583 ctx->capture_state = QUEUE_FREE; 653 ctx->capture_state = QUEUE_FREE;
584 ctx->output_state = QUEUE_FREE; 654 ctx->output_state = QUEUE_FREE;
585 s5p_mfc_alloc_instance_buffer(ctx); 655 s5p_mfc_hw_call(dev->mfc_ops, alloc_instance_buffer,
586 s5p_mfc_alloc_dec_temp_buffers(ctx); 656 ctx);
657 s5p_mfc_hw_call(dev->mfc_ops, alloc_dec_temp_buffers,
658 ctx);
587 set_work_bit_irqsave(ctx); 659 set_work_bit_irqsave(ctx);
588 s5p_mfc_clean_ctx_int_flags(ctx); 660 s5p_mfc_clean_ctx_int_flags(ctx);
589 s5p_mfc_try_run(dev); 661 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
590 662
591 if (s5p_mfc_wait_for_done_ctx(ctx, 663 if (s5p_mfc_wait_for_done_ctx(ctx,
592 S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET, 0)) { 664 S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET, 0)) {
593 /* Error or timeout */ 665 /* Error or timeout */
594 mfc_err("Error getting instance from hardware\n"); 666 mfc_err("Error getting instance from hardware\n");
595 s5p_mfc_release_instance_buffer(ctx); 667 s5p_mfc_hw_call(dev->mfc_ops,
596 s5p_mfc_release_dec_desc_buffer(ctx); 668 release_instance_buffer, ctx);
669 s5p_mfc_hw_call(dev->mfc_ops,
670 release_dec_desc_buffer, ctx);
597 return -EIO; 671 return -EIO;
598 } 672 }
599 mfc_debug(2, "Got instance number: %d\n", ctx->inst_no); 673 mfc_debug(2, "Got instance number: %d\n", ctx->inst_no);
@@ -662,7 +736,7 @@ static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl)
662 /* Should wait for the header to be parsed */ 736 /* Should wait for the header to be parsed */
663 s5p_mfc_clean_ctx_int_flags(ctx); 737 s5p_mfc_clean_ctx_int_flags(ctx);
664 s5p_mfc_wait_for_done_ctx(ctx, 738 s5p_mfc_wait_for_done_ctx(ctx,
665 S5P_FIMV_R2H_CMD_SEQ_DONE_RET, 0); 739 S5P_MFC_R2H_CMD_SEQ_DONE_RET, 0);
666 if (ctx->state >= MFCINST_HEAD_PARSED && 740 if (ctx->state >= MFCINST_HEAD_PARSED &&
667 ctx->state < MFCINST_ABORT) { 741 ctx->state < MFCINST_ABORT) {
668 ctrl->val = ctx->dpb_count; 742 ctrl->val = ctx->dpb_count;
@@ -686,6 +760,7 @@ static int vidioc_g_crop(struct file *file, void *priv,
686 struct v4l2_crop *cr) 760 struct v4l2_crop *cr)
687{ 761{
688 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); 762 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
763 struct s5p_mfc_dev *dev = ctx->dev;
689 u32 left, right, top, bottom; 764 u32 left, right, top, bottom;
690 765
691 if (ctx->state != MFCINST_HEAD_PARSED && 766 if (ctx->state != MFCINST_HEAD_PARSED &&
@@ -695,10 +770,10 @@ static int vidioc_g_crop(struct file *file, void *priv,
695 return -EINVAL; 770 return -EINVAL;
696 } 771 }
697 if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_H264) { 772 if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_H264) {
698 left = s5p_mfc_read_shm(ctx, CROP_INFO_H); 773 left = s5p_mfc_hw_call(dev->mfc_ops, get_crop_info_h, ctx);
699 right = left >> S5P_FIMV_SHARED_CROP_RIGHT_SHIFT; 774 right = left >> S5P_FIMV_SHARED_CROP_RIGHT_SHIFT;
700 left = left & S5P_FIMV_SHARED_CROP_LEFT_MASK; 775 left = left & S5P_FIMV_SHARED_CROP_LEFT_MASK;
701 top = s5p_mfc_read_shm(ctx, CROP_INFO_V); 776 top = s5p_mfc_hw_call(dev->mfc_ops, get_crop_info_v, ctx);
702 bottom = top >> S5P_FIMV_SHARED_CROP_BOTTOM_SHIFT; 777 bottom = top >> S5P_FIMV_SHARED_CROP_BOTTOM_SHIFT;
703 top = top & S5P_FIMV_SHARED_CROP_TOP_MASK; 778 top = top & S5P_FIMV_SHARED_CROP_TOP_MASK;
704 cr->c.left = left; 779 cr->c.left = left;
@@ -749,6 +824,7 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
749 void *allocators[]) 824 void *allocators[])
750{ 825{
751 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv); 826 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
827 struct s5p_mfc_dev *dev = ctx->dev;
752 828
753 /* Video output for decoding (source) 829 /* Video output for decoding (source)
754 * this can be set after getting an instance */ 830 * this can be set after getting an instance */
@@ -784,7 +860,13 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
784 vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { 860 vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
785 psize[0] = ctx->luma_size; 861 psize[0] = ctx->luma_size;
786 psize[1] = ctx->chroma_size; 862 psize[1] = ctx->chroma_size;
787 allocators[0] = ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX]; 863
864 if (IS_MFCV6(dev))
865 allocators[0] =
866 ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
867 else
868 allocators[0] =
869 ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX];
788 allocators[1] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX]; 870 allocators[1] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
789 } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && 871 } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
790 ctx->state == MFCINST_INIT) { 872 ctx->state == MFCINST_INIT) {
@@ -876,7 +958,7 @@ static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
876 /* If context is ready then dev = work->data;schedule it to run */ 958 /* If context is ready then dev = work->data;schedule it to run */
877 if (s5p_mfc_ctx_ready(ctx)) 959 if (s5p_mfc_ctx_ready(ctx))
878 set_work_bit_irqsave(ctx); 960 set_work_bit_irqsave(ctx);
879 s5p_mfc_try_run(dev); 961 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
880 return 0; 962 return 0;
881} 963}
882 964
@@ -892,19 +974,21 @@ static int s5p_mfc_stop_streaming(struct vb2_queue *q)
892 dev->curr_ctx == ctx->num && dev->hw_lock) { 974 dev->curr_ctx == ctx->num && dev->hw_lock) {
893 ctx->state = MFCINST_ABORT; 975 ctx->state = MFCINST_ABORT;
894 s5p_mfc_wait_for_done_ctx(ctx, 976 s5p_mfc_wait_for_done_ctx(ctx,
895 S5P_FIMV_R2H_CMD_FRAME_DONE_RET, 0); 977 S5P_MFC_R2H_CMD_FRAME_DONE_RET, 0);
896 aborted = 1; 978 aborted = 1;
897 } 979 }
898 spin_lock_irqsave(&dev->irqlock, flags); 980 spin_lock_irqsave(&dev->irqlock, flags);
899 if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { 981 if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
900 s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst); 982 s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->dst_queue,
983 &ctx->vq_dst);
901 INIT_LIST_HEAD(&ctx->dst_queue); 984 INIT_LIST_HEAD(&ctx->dst_queue);
902 ctx->dst_queue_cnt = 0; 985 ctx->dst_queue_cnt = 0;
903 ctx->dpb_flush_flag = 1; 986 ctx->dpb_flush_flag = 1;
904 ctx->dec_dst_flag = 0; 987 ctx->dec_dst_flag = 0;
905 } 988 }
906 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { 989 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
907 s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src); 990 s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->src_queue,
991 &ctx->vq_src);
908 INIT_LIST_HEAD(&ctx->src_queue); 992 INIT_LIST_HEAD(&ctx->src_queue);
909 ctx->src_queue_cnt = 0; 993 ctx->src_queue_cnt = 0;
910 } 994 }
@@ -944,7 +1028,7 @@ static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
944 } 1028 }
945 if (s5p_mfc_ctx_ready(ctx)) 1029 if (s5p_mfc_ctx_ready(ctx))
946 set_work_bit_irqsave(ctx); 1030 set_work_bit_irqsave(ctx);
947 s5p_mfc_try_run(dev); 1031 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
948} 1032}
949 1033
950static struct vb2_ops s5p_mfc_dec_qops = { 1034static struct vb2_ops s5p_mfc_dec_qops = {
@@ -1028,3 +1112,13 @@ void s5p_mfc_dec_ctrls_delete(struct s5p_mfc_ctx *ctx)
1028 ctx->ctrls[i] = NULL; 1112 ctx->ctrls[i] = NULL;
1029} 1113}
1030 1114
1115void s5p_mfc_dec_init(struct s5p_mfc_ctx *ctx)
1116{
1117 struct v4l2_format f;
1118 f.fmt.pix_mp.pixelformat = DEF_SRC_FMT_DEC;
1119 ctx->src_fmt = find_format(&f, MFC_FMT_DEC);
1120 f.fmt.pix_mp.pixelformat = DEF_DST_FMT_DEC;
1121 ctx->dst_fmt = find_format(&f, MFC_FMT_RAW);
1122 mfc_debug(2, "Default src_fmt is %x, dest_fmt is %x\n",
1123 (unsigned int)ctx->src_fmt, (unsigned int)ctx->dst_fmt);
1124}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.h b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.h
index fdf1d99a9d15..d06a7cab5eb1 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.h
@@ -19,5 +19,6 @@ const struct v4l2_ioctl_ops *get_dec_v4l2_ioctl_ops(void);
19struct s5p_mfc_fmt *get_dec_def_fmt(bool src); 19struct s5p_mfc_fmt *get_dec_def_fmt(bool src);
20int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx); 20int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx);
21void s5p_mfc_dec_ctrls_delete(struct s5p_mfc_ctx *ctx); 21void s5p_mfc_dec_ctrls_delete(struct s5p_mfc_ctx *ctx);
22void s5p_mfc_dec_init(struct s5p_mfc_ctx *ctx);
22 23
23#endif /* S5P_MFC_DEC_H_ */ 24#endif /* S5P_MFC_DEC_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 179e4db60b15..2af6d522f4ac 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -25,48 +25,64 @@
25#include <linux/workqueue.h> 25#include <linux/workqueue.h>
26#include <media/v4l2-ctrls.h> 26#include <media/v4l2-ctrls.h>
27#include <media/videobuf2-core.h> 27#include <media/videobuf2-core.h>
28#include "regs-mfc.h"
29#include "s5p_mfc_common.h" 28#include "s5p_mfc_common.h"
30#include "s5p_mfc_debug.h" 29#include "s5p_mfc_debug.h"
31#include "s5p_mfc_enc.h" 30#include "s5p_mfc_enc.h"
32#include "s5p_mfc_intr.h" 31#include "s5p_mfc_intr.h"
33#include "s5p_mfc_opr.h" 32#include "s5p_mfc_opr.h"
34 33
34#define DEF_SRC_FMT_ENC V4L2_PIX_FMT_NV12MT
35#define DEF_DST_FMT_ENC V4L2_PIX_FMT_H264
36
35static struct s5p_mfc_fmt formats[] = { 37static struct s5p_mfc_fmt formats[] = {
36 { 38 {
37 .name = "4:2:0 2 Planes 64x32 Tiles", 39 .name = "4:2:0 2 Planes 16x16 Tiles",
38 .fourcc = V4L2_PIX_FMT_NV12MT, 40 .fourcc = V4L2_PIX_FMT_NV12MT_16X16,
39 .codec_mode = S5P_FIMV_CODEC_NONE, 41 .codec_mode = S5P_MFC_CODEC_NONE,
40 .type = MFC_FMT_RAW, 42 .type = MFC_FMT_RAW,
41 .num_planes = 2, 43 .num_planes = 2,
44 },
45 {
46 .name = "4:2:0 2 Planes 64x32 Tiles",
47 .fourcc = V4L2_PIX_FMT_NV12MT,
48 .codec_mode = S5P_MFC_CODEC_NONE,
49 .type = MFC_FMT_RAW,
50 .num_planes = 2,
51 },
52 {
53 .name = "4:2:0 2 Planes Y/CbCr",
54 .fourcc = V4L2_PIX_FMT_NV12M,
55 .codec_mode = S5P_MFC_CODEC_NONE,
56 .type = MFC_FMT_RAW,
57 .num_planes = 2,
42 }, 58 },
43 { 59 {
44 .name = "4:2:0 2 Planes", 60 .name = "4:2:0 2 Planes Y/CrCb",
45 .fourcc = V4L2_PIX_FMT_NV12M, 61 .fourcc = V4L2_PIX_FMT_NV21M,
46 .codec_mode = S5P_FIMV_CODEC_NONE, 62 .codec_mode = S5P_MFC_CODEC_NONE,
47 .type = MFC_FMT_RAW, 63 .type = MFC_FMT_RAW,
48 .num_planes = 2, 64 .num_planes = 2,
49 }, 65 },
50 { 66 {
51 .name = "H264 Encoded Stream", 67 .name = "H264 Encoded Stream",
52 .fourcc = V4L2_PIX_FMT_H264, 68 .fourcc = V4L2_PIX_FMT_H264,
53 .codec_mode = S5P_FIMV_CODEC_H264_ENC, 69 .codec_mode = S5P_MFC_CODEC_H264_ENC,
54 .type = MFC_FMT_ENC, 70 .type = MFC_FMT_ENC,
55 .num_planes = 1, 71 .num_planes = 1,
56 }, 72 },
57 { 73 {
58 .name = "MPEG4 Encoded Stream", 74 .name = "MPEG4 Encoded Stream",
59 .fourcc = V4L2_PIX_FMT_MPEG4, 75 .fourcc = V4L2_PIX_FMT_MPEG4,
60 .codec_mode = S5P_FIMV_CODEC_MPEG4_ENC, 76 .codec_mode = S5P_MFC_CODEC_MPEG4_ENC,
61 .type = MFC_FMT_ENC, 77 .type = MFC_FMT_ENC,
62 .num_planes = 1, 78 .num_planes = 1,
63 }, 79 },
64 { 80 {
65 .name = "H263 Encoded Stream", 81 .name = "H263 Encoded Stream",
66 .fourcc = V4L2_PIX_FMT_H263, 82 .fourcc = V4L2_PIX_FMT_H263,
67 .codec_mode = S5P_FIMV_CODEC_H263_ENC, 83 .codec_mode = S5P_MFC_CODEC_H263_ENC,
68 .type = MFC_FMT_ENC, 84 .type = MFC_FMT_ENC,
69 .num_planes = 1, 85 .num_planes = 1,
70 }, 86 },
71}; 87};
72 88
@@ -574,7 +590,8 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
574 if (ctx->state == MFCINST_GOT_INST && ctx->dst_queue_cnt >= 1) 590 if (ctx->state == MFCINST_GOT_INST && ctx->dst_queue_cnt >= 1)
575 return 1; 591 return 1;
576 /* context is ready to encode a frame */ 592 /* context is ready to encode a frame */
577 if (ctx->state == MFCINST_RUNNING && 593 if ((ctx->state == MFCINST_RUNNING ||
594 ctx->state == MFCINST_HEAD_PARSED) &&
578 ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1) 595 ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1)
579 return 1; 596 return 1;
580 /* context is ready to encode remaining frames */ 597 /* context is ready to encode remaining frames */
@@ -619,7 +636,8 @@ static int enc_pre_seq_start(struct s5p_mfc_ctx *ctx)
619 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); 636 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
620 dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0); 637 dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
621 dst_size = vb2_plane_size(dst_mb->b, 0); 638 dst_size = vb2_plane_size(dst_mb->b, 0);
622 s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size); 639 s5p_mfc_hw_call(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr,
640 dst_size);
623 spin_unlock_irqrestore(&dev->irqlock, flags); 641 spin_unlock_irqrestore(&dev->irqlock, flags);
624 return 0; 642 return 0;
625} 643}
@@ -638,14 +656,23 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx)
638 list_del(&dst_mb->list); 656 list_del(&dst_mb->list);
639 ctx->dst_queue_cnt--; 657 ctx->dst_queue_cnt--;
640 vb2_set_plane_payload(dst_mb->b, 0, 658 vb2_set_plane_payload(dst_mb->b, 0,
641 s5p_mfc_get_enc_strm_size()); 659 s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev));
642 vb2_buffer_done(dst_mb->b, VB2_BUF_STATE_DONE); 660 vb2_buffer_done(dst_mb->b, VB2_BUF_STATE_DONE);
643 spin_unlock_irqrestore(&dev->irqlock, flags); 661 spin_unlock_irqrestore(&dev->irqlock, flags);
644 } 662 }
645 ctx->state = MFCINST_RUNNING; 663 if (IS_MFCV6(dev)) {
646 if (s5p_mfc_ctx_ready(ctx)) 664 ctx->state = MFCINST_HEAD_PARSED; /* for INIT_BUFFER cmd */
647 set_work_bit_irqsave(ctx); 665 } else {
648 s5p_mfc_try_run(dev); 666 ctx->state = MFCINST_RUNNING;
667 if (s5p_mfc_ctx_ready(ctx))
668 set_work_bit_irqsave(ctx);
669 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
670 }
671
672 if (IS_MFCV6(dev))
673 ctx->dpb_count = s5p_mfc_hw_call(dev->mfc_ops,
674 get_enc_dpb_count, dev);
675
649 return 0; 676 return 0;
650} 677}
651 678
@@ -662,14 +689,16 @@ static int enc_pre_frame_start(struct s5p_mfc_ctx *ctx)
662 src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); 689 src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
663 src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0); 690 src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0);
664 src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1); 691 src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1);
665 s5p_mfc_set_enc_frame_buffer(ctx, src_y_addr, src_c_addr); 692 s5p_mfc_hw_call(dev->mfc_ops, set_enc_frame_buffer, ctx, src_y_addr,
693 src_c_addr);
666 spin_unlock_irqrestore(&dev->irqlock, flags); 694 spin_unlock_irqrestore(&dev->irqlock, flags);
667 695
668 spin_lock_irqsave(&dev->irqlock, flags); 696 spin_lock_irqsave(&dev->irqlock, flags);
669 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); 697 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
670 dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0); 698 dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
671 dst_size = vb2_plane_size(dst_mb->b, 0); 699 dst_size = vb2_plane_size(dst_mb->b, 0);
672 s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size); 700 s5p_mfc_hw_call(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr,
701 dst_size);
673 spin_unlock_irqrestore(&dev->irqlock, flags); 702 spin_unlock_irqrestore(&dev->irqlock, flags);
674 703
675 return 0; 704 return 0;
@@ -685,15 +714,16 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
685 unsigned int strm_size; 714 unsigned int strm_size;
686 unsigned long flags; 715 unsigned long flags;
687 716
688 slice_type = s5p_mfc_get_enc_slice_type(); 717 slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev);
689 strm_size = s5p_mfc_get_enc_strm_size(); 718 strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev);
690 mfc_debug(2, "Encoded slice type: %d", slice_type); 719 mfc_debug(2, "Encoded slice type: %d", slice_type);
691 mfc_debug(2, "Encoded stream size: %d", strm_size); 720 mfc_debug(2, "Encoded stream size: %d", strm_size);
692 mfc_debug(2, "Display order: %d", 721 mfc_debug(2, "Display order: %d",
693 mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT)); 722 mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT));
694 spin_lock_irqsave(&dev->irqlock, flags); 723 spin_lock_irqsave(&dev->irqlock, flags);
695 if (slice_type >= 0) { 724 if (slice_type >= 0) {
696 s5p_mfc_get_enc_frame_buffer(ctx, &enc_y_addr, &enc_c_addr); 725 s5p_mfc_hw_call(dev->mfc_ops, get_enc_frame_buffer, ctx,
726 &enc_y_addr, &enc_c_addr);
697 list_for_each_entry(mb_entry, &ctx->src_queue, list) { 727 list_for_each_entry(mb_entry, &ctx->src_queue, list) {
698 mb_y_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 0); 728 mb_y_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 0);
699 mb_c_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 1); 729 mb_c_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 1);
@@ -939,15 +969,16 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
939 pix_fmt_mp->plane_fmt[0].bytesperline = 0; 969 pix_fmt_mp->plane_fmt[0].bytesperline = 0;
940 ctx->dst_bufs_cnt = 0; 970 ctx->dst_bufs_cnt = 0;
941 ctx->capture_state = QUEUE_FREE; 971 ctx->capture_state = QUEUE_FREE;
942 s5p_mfc_alloc_instance_buffer(ctx); 972 s5p_mfc_hw_call(dev->mfc_ops, alloc_instance_buffer, ctx);
943 set_work_bit_irqsave(ctx); 973 set_work_bit_irqsave(ctx);
944 s5p_mfc_clean_ctx_int_flags(ctx); 974 s5p_mfc_clean_ctx_int_flags(ctx);
945 s5p_mfc_try_run(dev); 975 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
946 if (s5p_mfc_wait_for_done_ctx(ctx, \ 976 if (s5p_mfc_wait_for_done_ctx(ctx, \
947 S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET, 1)) { 977 S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET, 1)) {
948 /* Error or timeout */ 978 /* Error or timeout */
949 mfc_err("Error getting instance from hardware\n"); 979 mfc_err("Error getting instance from hardware\n");
950 s5p_mfc_release_instance_buffer(ctx); 980 s5p_mfc_hw_call(dev->mfc_ops, release_instance_buffer,
981 ctx);
951 ret = -EIO; 982 ret = -EIO;
952 goto out; 983 goto out;
953 } 984 }
@@ -958,6 +989,17 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
958 mfc_err("failed to set output format\n"); 989 mfc_err("failed to set output format\n");
959 return -EINVAL; 990 return -EINVAL;
960 } 991 }
992
993 if (!IS_MFCV6(dev) &&
994 (fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16)) {
995 mfc_err("Not supported format.\n");
996 return -EINVAL;
997 } else if (IS_MFCV6(dev) &&
998 (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) {
999 mfc_err("Not supported format.\n");
1000 return -EINVAL;
1001 }
1002
961 if (fmt->num_planes != pix_fmt_mp->num_planes) { 1003 if (fmt->num_planes != pix_fmt_mp->num_planes) {
962 mfc_err("failed to set output format\n"); 1004 mfc_err("failed to set output format\n");
963 ret = -EINVAL; 1005 ret = -EINVAL;
@@ -970,45 +1012,13 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
970 mfc_debug(2, "fmt - w: %d, h: %d, ctx - w: %d, h: %d\n", 1012 mfc_debug(2, "fmt - w: %d, h: %d, ctx - w: %d, h: %d\n",
971 pix_fmt_mp->width, pix_fmt_mp->height, 1013 pix_fmt_mp->width, pix_fmt_mp->height,
972 ctx->img_width, ctx->img_height); 1014 ctx->img_width, ctx->img_height);
973 if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M) { 1015
974 ctx->buf_width = ALIGN(ctx->img_width, 1016 s5p_mfc_hw_call(dev->mfc_ops, enc_calc_src_size, ctx);
975 S5P_FIMV_NV12M_HALIGN); 1017 pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size;
976 ctx->luma_size = ALIGN(ctx->img_width, 1018 pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width;
977 S5P_FIMV_NV12M_HALIGN) * ALIGN(ctx->img_height, 1019 pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
978 S5P_FIMV_NV12M_LVALIGN); 1020 pix_fmt_mp->plane_fmt[1].bytesperline = ctx->buf_width;
979 ctx->chroma_size = ALIGN(ctx->img_width, 1021
980 S5P_FIMV_NV12M_HALIGN) * ALIGN((ctx->img_height
981 >> 1), S5P_FIMV_NV12M_CVALIGN);
982
983 ctx->luma_size = ALIGN(ctx->luma_size,
984 S5P_FIMV_NV12M_SALIGN);
985 ctx->chroma_size = ALIGN(ctx->chroma_size,
986 S5P_FIMV_NV12M_SALIGN);
987
988 pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size;
989 pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width;
990 pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
991 pix_fmt_mp->plane_fmt[1].bytesperline = ctx->buf_width;
992
993 } else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT) {
994 ctx->buf_width = ALIGN(ctx->img_width,
995 S5P_FIMV_NV12MT_HALIGN);
996 ctx->luma_size = ALIGN(ctx->img_width,
997 S5P_FIMV_NV12MT_HALIGN) * ALIGN(ctx->img_height,
998 S5P_FIMV_NV12MT_VALIGN);
999 ctx->chroma_size = ALIGN(ctx->img_width,
1000 S5P_FIMV_NV12MT_HALIGN) * ALIGN((ctx->img_height
1001 >> 1), S5P_FIMV_NV12MT_VALIGN);
1002 ctx->luma_size = ALIGN(ctx->luma_size,
1003 S5P_FIMV_NV12MT_SALIGN);
1004 ctx->chroma_size = ALIGN(ctx->chroma_size,
1005 S5P_FIMV_NV12MT_SALIGN);
1006
1007 pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size;
1008 pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width;
1009 pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
1010 pix_fmt_mp->plane_fmt[1].bytesperline = ctx->buf_width;
1011 }
1012 ctx->src_bufs_cnt = 0; 1022 ctx->src_bufs_cnt = 0;
1013 ctx->output_state = QUEUE_FREE; 1023 ctx->output_state = QUEUE_FREE;
1014 } else { 1024 } else {
@@ -1023,6 +1033,7 @@ out:
1023static int vidioc_reqbufs(struct file *file, void *priv, 1033static int vidioc_reqbufs(struct file *file, void *priv,
1024 struct v4l2_requestbuffers *reqbufs) 1034 struct v4l2_requestbuffers *reqbufs)
1025{ 1035{
1036 struct s5p_mfc_dev *dev = video_drvdata(file);
1026 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); 1037 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
1027 int ret = 0; 1038 int ret = 0;
1028 1039
@@ -1042,12 +1053,16 @@ static int vidioc_reqbufs(struct file *file, void *priv,
1042 return ret; 1053 return ret;
1043 } 1054 }
1044 ctx->capture_state = QUEUE_BUFS_REQUESTED; 1055 ctx->capture_state = QUEUE_BUFS_REQUESTED;
1045 ret = s5p_mfc_alloc_codec_buffers(ctx); 1056
1046 if (ret) { 1057 if (!IS_MFCV6(dev)) {
1047 mfc_err("Failed to allocate encoding buffers\n"); 1058 ret = s5p_mfc_hw_call(ctx->dev->mfc_ops,
1048 reqbufs->count = 0; 1059 alloc_codec_buffers, ctx);
1049 ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); 1060 if (ret) {
1050 return -ENOMEM; 1061 mfc_err("Failed to allocate encoding buffers\n");
1062 reqbufs->count = 0;
1063 ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
1064 return -ENOMEM;
1065 }
1051 } 1066 }
1052 } else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { 1067 } else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1053 if (ctx->output_state != QUEUE_FREE) { 1068 if (ctx->output_state != QUEUE_FREE) {
@@ -1310,6 +1325,13 @@ static int s5p_mfc_enc_s_ctrl(struct v4l2_ctrl *ctrl)
1310 p->codec.h264.profile = 1325 p->codec.h264.profile =
1311 S5P_FIMV_ENC_PROFILE_H264_BASELINE; 1326 S5P_FIMV_ENC_PROFILE_H264_BASELINE;
1312 break; 1327 break;
1328 case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
1329 if (IS_MFCV6(dev))
1330 p->codec.h264.profile =
1331 S5P_FIMV_ENC_PROFILE_H264_CONSTRAINED_BASELINE;
1332 else
1333 ret = -EINVAL;
1334 break;
1313 default: 1335 default:
1314 ret = -EINVAL; 1336 ret = -EINVAL;
1315 } 1337 }
@@ -1349,7 +1371,7 @@ static int s5p_mfc_enc_s_ctrl(struct v4l2_ctrl *ctrl)
1349 p->codec.h264._8x8_transform = ctrl->val; 1371 p->codec.h264._8x8_transform = ctrl->val;
1350 break; 1372 break;
1351 case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: 1373 case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
1352 p->codec.h264.rc_mb = ctrl->val; 1374 p->rc_mb = ctrl->val;
1353 break; 1375 break;
1354 case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: 1376 case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
1355 p->codec.h264.rc_frame_qp = ctrl->val; 1377 p->codec.h264.rc_frame_qp = ctrl->val;
@@ -1500,7 +1522,7 @@ int vidioc_encoder_cmd(struct file *file, void *priv,
1500 mfc_debug(2, "EOS: empty src queue, entering finishing state"); 1522 mfc_debug(2, "EOS: empty src queue, entering finishing state");
1501 ctx->state = MFCINST_FINISHING; 1523 ctx->state = MFCINST_FINISHING;
1502 spin_unlock_irqrestore(&dev->irqlock, flags); 1524 spin_unlock_irqrestore(&dev->irqlock, flags);
1503 s5p_mfc_try_run(dev); 1525 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
1504 } else { 1526 } else {
1505 mfc_debug(2, "EOS: marking last buffer of stream"); 1527 mfc_debug(2, "EOS: marking last buffer of stream");
1506 buf = list_entry(ctx->src_queue.prev, 1528 buf = list_entry(ctx->src_queue.prev,
@@ -1583,6 +1605,7 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
1583 unsigned int psize[], void *allocators[]) 1605 unsigned int psize[], void *allocators[])
1584{ 1606{
1585 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv); 1607 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
1608 struct s5p_mfc_dev *dev = ctx->dev;
1586 1609
1587 if (ctx->state != MFCINST_GOT_INST) { 1610 if (ctx->state != MFCINST_GOT_INST) {
1588 mfc_err("inavlid state: %d\n", ctx->state); 1611 mfc_err("inavlid state: %d\n", ctx->state);
@@ -1611,8 +1634,17 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
1611 *buf_count = MFC_MAX_BUFFERS; 1634 *buf_count = MFC_MAX_BUFFERS;
1612 psize[0] = ctx->luma_size; 1635 psize[0] = ctx->luma_size;
1613 psize[1] = ctx->chroma_size; 1636 psize[1] = ctx->chroma_size;
1614 allocators[0] = ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX]; 1637 if (IS_MFCV6(dev)) {
1615 allocators[1] = ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX]; 1638 allocators[0] =
1639 ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
1640 allocators[1] =
1641 ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
1642 } else {
1643 allocators[0] =
1644 ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX];
1645 allocators[1] =
1646 ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX];
1647 }
1616 } else { 1648 } else {
1617 mfc_err("inavlid queue type: %d\n", vq->type); 1649 mfc_err("inavlid queue type: %d\n", vq->type);
1618 return -EINVAL; 1650 return -EINVAL;
@@ -1715,7 +1747,7 @@ static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
1715 /* If context is ready then dev = work->data;schedule it to run */ 1747 /* If context is ready then dev = work->data;schedule it to run */
1716 if (s5p_mfc_ctx_ready(ctx)) 1748 if (s5p_mfc_ctx_ready(ctx))
1717 set_work_bit_irqsave(ctx); 1749 set_work_bit_irqsave(ctx);
1718 s5p_mfc_try_run(dev); 1750 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
1719 return 0; 1751 return 0;
1720} 1752}
1721 1753
@@ -1729,19 +1761,21 @@ static int s5p_mfc_stop_streaming(struct vb2_queue *q)
1729 ctx->state == MFCINST_RUNNING) && 1761 ctx->state == MFCINST_RUNNING) &&
1730 dev->curr_ctx == ctx->num && dev->hw_lock) { 1762 dev->curr_ctx == ctx->num && dev->hw_lock) {
1731 ctx->state = MFCINST_ABORT; 1763 ctx->state = MFCINST_ABORT;
1732 s5p_mfc_wait_for_done_ctx(ctx, S5P_FIMV_R2H_CMD_FRAME_DONE_RET, 1764 s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_FRAME_DONE_RET,
1733 0); 1765 0);
1734 } 1766 }
1735 ctx->state = MFCINST_FINISHED; 1767 ctx->state = MFCINST_FINISHED;
1736 spin_lock_irqsave(&dev->irqlock, flags); 1768 spin_lock_irqsave(&dev->irqlock, flags);
1737 if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { 1769 if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
1738 s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst); 1770 s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->dst_queue,
1771 &ctx->vq_dst);
1739 INIT_LIST_HEAD(&ctx->dst_queue); 1772 INIT_LIST_HEAD(&ctx->dst_queue);
1740 ctx->dst_queue_cnt = 0; 1773 ctx->dst_queue_cnt = 0;
1741 } 1774 }
1742 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { 1775 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1743 cleanup_ref_queue(ctx); 1776 cleanup_ref_queue(ctx);
1744 s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src); 1777 s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->src_queue,
1778 &ctx->vq_src);
1745 INIT_LIST_HEAD(&ctx->src_queue); 1779 INIT_LIST_HEAD(&ctx->src_queue);
1746 ctx->src_queue_cnt = 0; 1780 ctx->src_queue_cnt = 0;
1747 } 1781 }
@@ -1782,7 +1816,7 @@ static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
1782 } 1816 }
1783 if (s5p_mfc_ctx_ready(ctx)) 1817 if (s5p_mfc_ctx_ready(ctx))
1784 set_work_bit_irqsave(ctx); 1818 set_work_bit_irqsave(ctx);
1785 s5p_mfc_try_run(dev); 1819 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
1786} 1820}
1787 1821
1788static struct vb2_ops s5p_mfc_enc_qops = { 1822static struct vb2_ops s5p_mfc_enc_qops = {
@@ -1880,3 +1914,13 @@ void s5p_mfc_enc_ctrls_delete(struct s5p_mfc_ctx *ctx)
1880 for (i = 0; i < NUM_CTRLS; i++) 1914 for (i = 0; i < NUM_CTRLS; i++)
1881 ctx->ctrls[i] = NULL; 1915 ctx->ctrls[i] = NULL;
1882} 1916}
1917
1918void s5p_mfc_enc_init(struct s5p_mfc_ctx *ctx)
1919{
1920 struct v4l2_format f;
1921 f.fmt.pix_mp.pixelformat = DEF_SRC_FMT_ENC;
1922 ctx->src_fmt = find_format(&f, MFC_FMT_RAW);
1923 f.fmt.pix_mp.pixelformat = DEF_DST_FMT_ENC;
1924 ctx->dst_fmt = find_format(&f, MFC_FMT_ENC);
1925}
1926
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.h b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.h
index ca9fd66bd310..5118d46b3a9e 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.h
@@ -19,5 +19,6 @@ const struct v4l2_ioctl_ops *get_enc_v4l2_ioctl_ops(void);
19struct s5p_mfc_fmt *get_enc_def_fmt(bool src); 19struct s5p_mfc_fmt *get_enc_def_fmt(bool src);
20int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx); 20int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx);
21void s5p_mfc_enc_ctrls_delete(struct s5p_mfc_ctx *ctx); 21void s5p_mfc_enc_ctrls_delete(struct s5p_mfc_ctx *ctx);
22void s5p_mfc_enc_init(struct s5p_mfc_ctx *ctx);
22 23
23#endif /* S5P_MFC_ENC_H_ */ 24#endif /* S5P_MFC_ENC_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_intr.c b/drivers/media/platform/s5p-mfc/s5p_mfc_intr.c
index 37860e299021..5b8f0e085e6d 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_intr.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_intr.c
@@ -17,7 +17,6 @@
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/wait.h> 19#include <linux/wait.h>
20#include "regs-mfc.h"
21#include "s5p_mfc_common.h" 20#include "s5p_mfc_common.h"
22#include "s5p_mfc_debug.h" 21#include "s5p_mfc_debug.h"
23#include "s5p_mfc_intr.h" 22#include "s5p_mfc_intr.h"
@@ -28,7 +27,7 @@ int s5p_mfc_wait_for_done_dev(struct s5p_mfc_dev *dev, int command)
28 27
29 ret = wait_event_interruptible_timeout(dev->queue, 28 ret = wait_event_interruptible_timeout(dev->queue,
30 (dev->int_cond && (dev->int_type == command 29 (dev->int_cond && (dev->int_type == command
31 || dev->int_type == S5P_FIMV_R2H_CMD_ERR_RET)), 30 || dev->int_type == S5P_MFC_R2H_CMD_ERR_RET)),
32 msecs_to_jiffies(MFC_INT_TIMEOUT)); 31 msecs_to_jiffies(MFC_INT_TIMEOUT));
33 if (ret == 0) { 32 if (ret == 0) {
34 mfc_err("Interrupt (dev->int_type:%d, command:%d) timed out\n", 33 mfc_err("Interrupt (dev->int_type:%d, command:%d) timed out\n",
@@ -40,7 +39,7 @@ int s5p_mfc_wait_for_done_dev(struct s5p_mfc_dev *dev, int command)
40 } 39 }
41 mfc_debug(1, "Finished waiting (dev->int_type:%d, command: %d)\n", 40 mfc_debug(1, "Finished waiting (dev->int_type:%d, command: %d)\n",
42 dev->int_type, command); 41 dev->int_type, command);
43 if (dev->int_type == S5P_FIMV_R2H_CMD_ERR_RET) 42 if (dev->int_type == S5P_MFC_R2H_CMD_ERR_RET)
44 return 1; 43 return 1;
45 return 0; 44 return 0;
46} 45}
@@ -60,12 +59,12 @@ int s5p_mfc_wait_for_done_ctx(struct s5p_mfc_ctx *ctx,
60 if (interrupt) { 59 if (interrupt) {
61 ret = wait_event_interruptible_timeout(ctx->queue, 60 ret = wait_event_interruptible_timeout(ctx->queue,
62 (ctx->int_cond && (ctx->int_type == command 61 (ctx->int_cond && (ctx->int_type == command
63 || ctx->int_type == S5P_FIMV_R2H_CMD_ERR_RET)), 62 || ctx->int_type == S5P_MFC_R2H_CMD_ERR_RET)),
64 msecs_to_jiffies(MFC_INT_TIMEOUT)); 63 msecs_to_jiffies(MFC_INT_TIMEOUT));
65 } else { 64 } else {
66 ret = wait_event_timeout(ctx->queue, 65 ret = wait_event_timeout(ctx->queue,
67 (ctx->int_cond && (ctx->int_type == command 66 (ctx->int_cond && (ctx->int_type == command
68 || ctx->int_type == S5P_FIMV_R2H_CMD_ERR_RET)), 67 || ctx->int_type == S5P_MFC_R2H_CMD_ERR_RET)),
69 msecs_to_jiffies(MFC_INT_TIMEOUT)); 68 msecs_to_jiffies(MFC_INT_TIMEOUT));
70 } 69 }
71 if (ret == 0) { 70 if (ret == 0) {
@@ -78,7 +77,7 @@ int s5p_mfc_wait_for_done_ctx(struct s5p_mfc_ctx *ctx,
78 } 77 }
79 mfc_debug(1, "Finished waiting (ctx->int_type:%d, command: %d)\n", 78 mfc_debug(1, "Finished waiting (ctx->int_type:%d, command: %d)\n",
80 ctx->int_type, command); 79 ctx->int_type, command);
81 if (ctx->int_type == S5P_FIMV_R2H_CMD_ERR_RET) 80 if (ctx->int_type == S5P_MFC_R2H_CMD_ERR_RET)
82 return 1; 81 return 1;
83 return 0; 82 return 0;
84} 83}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
index 767a51271dc2..6932e90d4065 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
@@ -1,10 +1,10 @@
1/* 1/*
2 * drivers/media/platform/samsung/mfc5/s5p_mfc_opr.c 2 * drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
3 * 3 *
4 * Samsung MFC (Multi Function Codec - FIMV) driver 4 * Samsung MFC (Multi Function Codec - FIMV) driver
5 * This file contains hw related functions. 5 * This file contains hw related functions.
6 * 6 *
7 * Kamil Debski, Copyright (c) 2011 Samsung Electronics 7 * Kamil Debski, Copyright (c) 2012 Samsung Electronics Co., Ltd.
8 * http://www.samsung.com/ 8 * http://www.samsung.com/
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
@@ -12,1414 +12,20 @@
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14 14
15#include "regs-mfc.h"
16#include "s5p_mfc_cmd.h"
17#include "s5p_mfc_common.h"
18#include "s5p_mfc_ctrl.h"
19#include "s5p_mfc_debug.h"
20#include "s5p_mfc_intr.h"
21#include "s5p_mfc_opr.h" 15#include "s5p_mfc_opr.h"
22#include "s5p_mfc_pm.h" 16#include "s5p_mfc_opr_v5.h"
23#include "s5p_mfc_shm.h" 17#include "s5p_mfc_opr_v6.h"
24#include <asm/cacheflush.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/err.h>
28#include <linux/firmware.h>
29#include <linux/io.h>
30#include <linux/jiffies.h>
31#include <linux/mm.h>
32#include <linux/sched.h>
33 18
34#define OFFSETA(x) (((x) - dev->bank1) >> MFC_OFFSET_SHIFT) 19static struct s5p_mfc_hw_ops *s5p_mfc_ops;
35#define OFFSETB(x) (((x) - dev->bank2) >> MFC_OFFSET_SHIFT)
36 20
37/* Allocate temporary buffers for decoding */ 21void s5p_mfc_init_hw_ops(struct s5p_mfc_dev *dev)
38int s5p_mfc_alloc_dec_temp_buffers(struct s5p_mfc_ctx *ctx)
39{ 22{
40 void *desc_virt; 23 if (IS_MFCV6(dev)) {
41 struct s5p_mfc_dev *dev = ctx->dev; 24 s5p_mfc_ops = s5p_mfc_init_hw_ops_v6();
42 25 dev->warn_start = S5P_FIMV_ERR_WARNINGS_START_V6;
43 ctx->desc_buf = vb2_dma_contig_memops.alloc(
44 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], DESC_BUF_SIZE);
45 if (IS_ERR_VALUE((int)ctx->desc_buf)) {
46 ctx->desc_buf = NULL;
47 mfc_err("Allocating DESC buffer failed\n");
48 return -ENOMEM;
49 }
50 ctx->desc_phys = s5p_mfc_mem_cookie(
51 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->desc_buf);
52 BUG_ON(ctx->desc_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
53 desc_virt = vb2_dma_contig_memops.vaddr(ctx->desc_buf);
54 if (desc_virt == NULL) {
55 vb2_dma_contig_memops.put(ctx->desc_buf);
56 ctx->desc_phys = 0;
57 ctx->desc_buf = NULL;
58 mfc_err("Remapping DESC buffer failed\n");
59 return -ENOMEM;
60 }
61 memset(desc_virt, 0, DESC_BUF_SIZE);
62 wmb();
63 return 0;
64}
65
66/* Release temporary buffers for decoding */
67void s5p_mfc_release_dec_desc_buffer(struct s5p_mfc_ctx *ctx)
68{
69 if (ctx->desc_phys) {
70 vb2_dma_contig_memops.put(ctx->desc_buf);
71 ctx->desc_phys = 0;
72 ctx->desc_buf = NULL;
73 }
74}
75
76/* Allocate codec buffers */
77int s5p_mfc_alloc_codec_buffers(struct s5p_mfc_ctx *ctx)
78{
79 struct s5p_mfc_dev *dev = ctx->dev;
80 unsigned int enc_ref_y_size = 0;
81 unsigned int enc_ref_c_size = 0;
82 unsigned int guard_width, guard_height;
83
84 if (ctx->type == MFCINST_DECODER) {
85 mfc_debug(2, "Luma size:%d Chroma size:%d MV size:%d\n",
86 ctx->luma_size, ctx->chroma_size, ctx->mv_size);
87 mfc_debug(2, "Totals bufs: %d\n", ctx->total_dpb_count);
88 } else if (ctx->type == MFCINST_ENCODER) {
89 enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
90 * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
91 enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN);
92
93 if (ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC) {
94 enc_ref_c_size = ALIGN(ctx->img_width,
95 S5P_FIMV_NV12MT_HALIGN)
96 * ALIGN(ctx->img_height >> 1,
97 S5P_FIMV_NV12MT_VALIGN);
98 enc_ref_c_size = ALIGN(enc_ref_c_size,
99 S5P_FIMV_NV12MT_SALIGN);
100 } else {
101 guard_width = ALIGN(ctx->img_width + 16,
102 S5P_FIMV_NV12MT_HALIGN);
103 guard_height = ALIGN((ctx->img_height >> 1) + 4,
104 S5P_FIMV_NV12MT_VALIGN);
105 enc_ref_c_size = ALIGN(guard_width * guard_height,
106 S5P_FIMV_NV12MT_SALIGN);
107 }
108 mfc_debug(2, "recon luma size: %d chroma size: %d\n",
109 enc_ref_y_size, enc_ref_c_size);
110 } else { 26 } else {
111 return -EINVAL; 27 s5p_mfc_ops = s5p_mfc_init_hw_ops_v5();
112 } 28 dev->warn_start = S5P_FIMV_ERR_WARNINGS_START;
113 /* Codecs have different memory requirements */
114 switch (ctx->codec_mode) {
115 case S5P_FIMV_CODEC_H264_DEC:
116 ctx->bank1_size =
117 ALIGN(S5P_FIMV_DEC_NB_IP_SIZE +
118 S5P_FIMV_DEC_VERT_NB_MV_SIZE,
119 S5P_FIMV_DEC_BUF_ALIGN);
120 ctx->bank2_size = ctx->total_dpb_count * ctx->mv_size;
121 break;
122 case S5P_FIMV_CODEC_MPEG4_DEC:
123 ctx->bank1_size =
124 ALIGN(S5P_FIMV_DEC_NB_DCAC_SIZE +
125 S5P_FIMV_DEC_UPNB_MV_SIZE +
126 S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
127 S5P_FIMV_DEC_STX_PARSER_SIZE +
128 S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE,
129 S5P_FIMV_DEC_BUF_ALIGN);
130 ctx->bank2_size = 0;
131 break;
132 case S5P_FIMV_CODEC_VC1RCV_DEC:
133 case S5P_FIMV_CODEC_VC1_DEC:
134 ctx->bank1_size =
135 ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE +
136 S5P_FIMV_DEC_UPNB_MV_SIZE +
137 S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
138 S5P_FIMV_DEC_NB_DCAC_SIZE +
139 3 * S5P_FIMV_DEC_VC1_BITPLANE_SIZE,
140 S5P_FIMV_DEC_BUF_ALIGN);
141 ctx->bank2_size = 0;
142 break;
143 case S5P_FIMV_CODEC_MPEG2_DEC:
144 ctx->bank1_size = 0;
145 ctx->bank2_size = 0;
146 break;
147 case S5P_FIMV_CODEC_H263_DEC:
148 ctx->bank1_size =
149 ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE +
150 S5P_FIMV_DEC_UPNB_MV_SIZE +
151 S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
152 S5P_FIMV_DEC_NB_DCAC_SIZE,
153 S5P_FIMV_DEC_BUF_ALIGN);
154 ctx->bank2_size = 0;
155 break;
156 case S5P_FIMV_CODEC_H264_ENC:
157 ctx->bank1_size = (enc_ref_y_size * 2) +
158 S5P_FIMV_ENC_UPMV_SIZE +
159 S5P_FIMV_ENC_COLFLG_SIZE +
160 S5P_FIMV_ENC_INTRAMD_SIZE +
161 S5P_FIMV_ENC_NBORINFO_SIZE;
162 ctx->bank2_size = (enc_ref_y_size * 2) +
163 (enc_ref_c_size * 4) +
164 S5P_FIMV_ENC_INTRAPRED_SIZE;
165 break;
166 case S5P_FIMV_CODEC_MPEG4_ENC:
167 ctx->bank1_size = (enc_ref_y_size * 2) +
168 S5P_FIMV_ENC_UPMV_SIZE +
169 S5P_FIMV_ENC_COLFLG_SIZE +
170 S5P_FIMV_ENC_ACDCCOEF_SIZE;
171 ctx->bank2_size = (enc_ref_y_size * 2) +
172 (enc_ref_c_size * 4);
173 break;
174 case S5P_FIMV_CODEC_H263_ENC:
175 ctx->bank1_size = (enc_ref_y_size * 2) +
176 S5P_FIMV_ENC_UPMV_SIZE +
177 S5P_FIMV_ENC_ACDCCOEF_SIZE;
178 ctx->bank2_size = (enc_ref_y_size * 2) +
179 (enc_ref_c_size * 4);
180 break;
181 default:
182 break;
183 }
184 /* Allocate only if memory from bank 1 is necessary */
185 if (ctx->bank1_size > 0) {
186 ctx->bank1_buf = vb2_dma_contig_memops.alloc(
187 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_size);
188 if (IS_ERR(ctx->bank1_buf)) {
189 ctx->bank1_buf = NULL;
190 printk(KERN_ERR
191 "Buf alloc for decoding failed (port A)\n");
192 return -ENOMEM;
193 }
194 ctx->bank1_phys = s5p_mfc_mem_cookie(
195 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_buf);
196 BUG_ON(ctx->bank1_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
197 }
198 /* Allocate only if memory from bank 2 is necessary */
199 if (ctx->bank2_size > 0) {
200 ctx->bank2_buf = vb2_dma_contig_memops.alloc(
201 dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_size);
202 if (IS_ERR(ctx->bank2_buf)) {
203 ctx->bank2_buf = NULL;
204 mfc_err("Buf alloc for decoding failed (port B)\n");
205 return -ENOMEM;
206 }
207 ctx->bank2_phys = s5p_mfc_mem_cookie(
208 dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_buf);
209 BUG_ON(ctx->bank2_phys & ((1 << MFC_BANK2_ALIGN_ORDER) - 1));
210 }
211 return 0;
212}
213
214/* Release buffers allocated for codec */
215void s5p_mfc_release_codec_buffers(struct s5p_mfc_ctx *ctx)
216{
217 if (ctx->bank1_buf) {
218 vb2_dma_contig_memops.put(ctx->bank1_buf);
219 ctx->bank1_buf = NULL;
220 ctx->bank1_phys = 0;
221 ctx->bank1_size = 0;
222 }
223 if (ctx->bank2_buf) {
224 vb2_dma_contig_memops.put(ctx->bank2_buf);
225 ctx->bank2_buf = NULL;
226 ctx->bank2_phys = 0;
227 ctx->bank2_size = 0;
228 } 29 }
30 dev->mfc_ops = s5p_mfc_ops;
229} 31}
230
231/* Allocate memory for instance data buffer */
232int s5p_mfc_alloc_instance_buffer(struct s5p_mfc_ctx *ctx)
233{
234 void *context_virt;
235 struct s5p_mfc_dev *dev = ctx->dev;
236
237 if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC ||
238 ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC)
239 ctx->ctx_size = MFC_H264_CTX_BUF_SIZE;
240 else
241 ctx->ctx_size = MFC_CTX_BUF_SIZE;
242 ctx->ctx_buf = vb2_dma_contig_memops.alloc(
243 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx_size);
244 if (IS_ERR(ctx->ctx_buf)) {
245 mfc_err("Allocating context buffer failed\n");
246 ctx->ctx_phys = 0;
247 ctx->ctx_buf = NULL;
248 return -ENOMEM;
249 }
250 ctx->ctx_phys = s5p_mfc_mem_cookie(
251 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx_buf);
252 BUG_ON(ctx->ctx_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
253 ctx->ctx_ofs = OFFSETA(ctx->ctx_phys);
254 context_virt = vb2_dma_contig_memops.vaddr(ctx->ctx_buf);
255 if (context_virt == NULL) {
256 mfc_err("Remapping instance buffer failed\n");
257 vb2_dma_contig_memops.put(ctx->ctx_buf);
258 ctx->ctx_phys = 0;
259 ctx->ctx_buf = NULL;
260 return -ENOMEM;
261 }
262 /* Zero content of the allocated memory */
263 memset(context_virt, 0, ctx->ctx_size);
264 wmb();
265 if (s5p_mfc_init_shm(ctx) < 0) {
266 vb2_dma_contig_memops.put(ctx->ctx_buf);
267 ctx->ctx_phys = 0;
268 ctx->ctx_buf = NULL;
269 return -ENOMEM;
270 }
271 return 0;
272}
273
274/* Release instance buffer */
275void s5p_mfc_release_instance_buffer(struct s5p_mfc_ctx *ctx)
276{
277 if (ctx->ctx_buf) {
278 vb2_dma_contig_memops.put(ctx->ctx_buf);
279 ctx->ctx_phys = 0;
280 ctx->ctx_buf = NULL;
281 }
282 if (ctx->shm_alloc) {
283 vb2_dma_contig_memops.put(ctx->shm_alloc);
284 ctx->shm_alloc = NULL;
285 ctx->shm = NULL;
286 }
287}
288
289/* Set registers for decoding temporary buffers */
290void s5p_mfc_set_dec_desc_buffer(struct s5p_mfc_ctx *ctx)
291{
292 struct s5p_mfc_dev *dev = ctx->dev;
293
294 mfc_write(dev, OFFSETA(ctx->desc_phys), S5P_FIMV_SI_CH0_DESC_ADR);
295 mfc_write(dev, DESC_BUF_SIZE, S5P_FIMV_SI_CH0_DESC_SIZE);
296}
297
298/* Set registers for shared buffer */
299static void s5p_mfc_set_shared_buffer(struct s5p_mfc_ctx *ctx)
300{
301 struct s5p_mfc_dev *dev = ctx->dev;
302 mfc_write(dev, ctx->shm_ofs, S5P_FIMV_SI_CH0_HOST_WR_ADR);
303}
304
305/* Set registers for decoding stream buffer */
306int s5p_mfc_set_dec_stream_buffer(struct s5p_mfc_ctx *ctx, int buf_addr,
307 unsigned int start_num_byte, unsigned int buf_size)
308{
309 struct s5p_mfc_dev *dev = ctx->dev;
310
311 mfc_write(dev, OFFSETA(buf_addr), S5P_FIMV_SI_CH0_SB_ST_ADR);
312 mfc_write(dev, ctx->dec_src_buf_size, S5P_FIMV_SI_CH0_CPB_SIZE);
313 mfc_write(dev, buf_size, S5P_FIMV_SI_CH0_SB_FRM_SIZE);
314 s5p_mfc_write_shm(ctx, start_num_byte, START_BYTE_NUM);
315 return 0;
316}
317
318/* Set decoding frame buffer */
319int s5p_mfc_set_dec_frame_buffer(struct s5p_mfc_ctx *ctx)
320{
321 unsigned int frame_size, i;
322 unsigned int frame_size_ch, frame_size_mv;
323 struct s5p_mfc_dev *dev = ctx->dev;
324 unsigned int dpb;
325 size_t buf_addr1, buf_addr2;
326 int buf_size1, buf_size2;
327
328 buf_addr1 = ctx->bank1_phys;
329 buf_size1 = ctx->bank1_size;
330 buf_addr2 = ctx->bank2_phys;
331 buf_size2 = ctx->bank2_size;
332 dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) &
333 ~S5P_FIMV_DPB_COUNT_MASK;
334 mfc_write(dev, ctx->total_dpb_count | dpb,
335 S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
336 s5p_mfc_set_shared_buffer(ctx);
337 switch (ctx->codec_mode) {
338 case S5P_FIMV_CODEC_H264_DEC:
339 mfc_write(dev, OFFSETA(buf_addr1),
340 S5P_FIMV_H264_VERT_NB_MV_ADR);
341 buf_addr1 += S5P_FIMV_DEC_VERT_NB_MV_SIZE;
342 buf_size1 -= S5P_FIMV_DEC_VERT_NB_MV_SIZE;
343 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_NB_IP_ADR);
344 buf_addr1 += S5P_FIMV_DEC_NB_IP_SIZE;
345 buf_size1 -= S5P_FIMV_DEC_NB_IP_SIZE;
346 break;
347 case S5P_FIMV_CODEC_MPEG4_DEC:
348 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_NB_DCAC_ADR);
349 buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
350 buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
351 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_UP_NB_MV_ADR);
352 buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
353 buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
354 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SA_MV_ADR);
355 buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
356 buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
357 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SP_ADR);
358 buf_addr1 += S5P_FIMV_DEC_STX_PARSER_SIZE;
359 buf_size1 -= S5P_FIMV_DEC_STX_PARSER_SIZE;
360 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_OT_LINE_ADR);
361 buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
362 buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
363 break;
364 case S5P_FIMV_CODEC_H263_DEC:
365 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_OT_LINE_ADR);
366 buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
367 buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
368 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_NB_MV_ADR);
369 buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
370 buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
371 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_SA_MV_ADR);
372 buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
373 buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
374 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_NB_DCAC_ADR);
375 buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
376 buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
377 break;
378 case S5P_FIMV_CODEC_VC1_DEC:
379 case S5P_FIMV_CODEC_VC1RCV_DEC:
380 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_NB_DCAC_ADR);
381 buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
382 buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
383 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_OT_LINE_ADR);
384 buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
385 buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
386 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_UP_NB_MV_ADR);
387 buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
388 buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
389 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_SA_MV_ADR);
390 buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
391 buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
392 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE3_ADR);
393 buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
394 buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
395 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE2_ADR);
396 buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
397 buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
398 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE1_ADR);
399 buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
400 buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
401 break;
402 case S5P_FIMV_CODEC_MPEG2_DEC:
403 break;
404 default:
405 mfc_err("Unknown codec for decoding (%x)\n",
406 ctx->codec_mode);
407 return -EINVAL;
408 break;
409 }
410 frame_size = ctx->luma_size;
411 frame_size_ch = ctx->chroma_size;
412 frame_size_mv = ctx->mv_size;
413 mfc_debug(2, "Frm size: %d ch: %d mv: %d\n", frame_size, frame_size_ch,
414 frame_size_mv);
415 for (i = 0; i < ctx->total_dpb_count; i++) {
416 /* Bank2 */
417 mfc_debug(2, "Luma %d: %x\n", i,
418 ctx->dst_bufs[i].cookie.raw.luma);
419 mfc_write(dev, OFFSETB(ctx->dst_bufs[i].cookie.raw.luma),
420 S5P_FIMV_DEC_LUMA_ADR + i * 4);
421 mfc_debug(2, "\tChroma %d: %x\n", i,
422 ctx->dst_bufs[i].cookie.raw.chroma);
423 mfc_write(dev, OFFSETA(ctx->dst_bufs[i].cookie.raw.chroma),
424 S5P_FIMV_DEC_CHROMA_ADR + i * 4);
425 if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC) {
426 mfc_debug(2, "\tBuf2: %x, size: %d\n",
427 buf_addr2, buf_size2);
428 mfc_write(dev, OFFSETB(buf_addr2),
429 S5P_FIMV_H264_MV_ADR + i * 4);
430 buf_addr2 += frame_size_mv;
431 buf_size2 -= frame_size_mv;
432 }
433 }
434 mfc_debug(2, "Buf1: %u, buf_size1: %d\n", buf_addr1, buf_size1);
435 mfc_debug(2, "Buf 1/2 size after: %d/%d (frames %d)\n",
436 buf_size1, buf_size2, ctx->total_dpb_count);
437 if (buf_size1 < 0 || buf_size2 < 0) {
438 mfc_debug(2, "Not enough memory has been allocated\n");
439 return -ENOMEM;
440 }
441 s5p_mfc_write_shm(ctx, frame_size, ALLOC_LUMA_DPB_SIZE);
442 s5p_mfc_write_shm(ctx, frame_size_ch, ALLOC_CHROMA_DPB_SIZE);
443 if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC)
444 s5p_mfc_write_shm(ctx, frame_size_mv, ALLOC_MV_SIZE);
445 mfc_write(dev, ((S5P_FIMV_CH_INIT_BUFS & S5P_FIMV_CH_MASK)
446 << S5P_FIMV_CH_SHIFT) | (ctx->inst_no),
447 S5P_FIMV_SI_CH0_INST_ID);
448 return 0;
449}
450
451/* Set registers for encoding stream buffer */
452int s5p_mfc_set_enc_stream_buffer(struct s5p_mfc_ctx *ctx,
453 unsigned long addr, unsigned int size)
454{
455 struct s5p_mfc_dev *dev = ctx->dev;
456
457 mfc_write(dev, OFFSETA(addr), S5P_FIMV_ENC_SI_CH0_SB_ADR);
458 mfc_write(dev, size, S5P_FIMV_ENC_SI_CH0_SB_SIZE);
459 return 0;
460}
461
462void s5p_mfc_set_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
463 unsigned long y_addr, unsigned long c_addr)
464{
465 struct s5p_mfc_dev *dev = ctx->dev;
466
467 mfc_write(dev, OFFSETB(y_addr), S5P_FIMV_ENC_SI_CH0_CUR_Y_ADR);
468 mfc_write(dev, OFFSETB(c_addr), S5P_FIMV_ENC_SI_CH0_CUR_C_ADR);
469}
470
471void s5p_mfc_get_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
472 unsigned long *y_addr, unsigned long *c_addr)
473{
474 struct s5p_mfc_dev *dev = ctx->dev;
475
476 *y_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_Y_ADDR)
477 << MFC_OFFSET_SHIFT);
478 *c_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_C_ADDR)
479 << MFC_OFFSET_SHIFT);
480}
481
482/* Set encoding ref & codec buffer */
483int s5p_mfc_set_enc_ref_buffer(struct s5p_mfc_ctx *ctx)
484{
485 struct s5p_mfc_dev *dev = ctx->dev;
486 size_t buf_addr1, buf_addr2;
487 size_t buf_size1, buf_size2;
488 unsigned int enc_ref_y_size, enc_ref_c_size;
489 unsigned int guard_width, guard_height;
490 int i;
491
492 buf_addr1 = ctx->bank1_phys;
493 buf_size1 = ctx->bank1_size;
494 buf_addr2 = ctx->bank2_phys;
495 buf_size2 = ctx->bank2_size;
496 enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
497 * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
498 enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN);
499 if (ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC) {
500 enc_ref_c_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
501 * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12MT_VALIGN);
502 enc_ref_c_size = ALIGN(enc_ref_c_size, S5P_FIMV_NV12MT_SALIGN);
503 } else {
504 guard_width = ALIGN(ctx->img_width + 16,
505 S5P_FIMV_NV12MT_HALIGN);
506 guard_height = ALIGN((ctx->img_height >> 1) + 4,
507 S5P_FIMV_NV12MT_VALIGN);
508 enc_ref_c_size = ALIGN(guard_width * guard_height,
509 S5P_FIMV_NV12MT_SALIGN);
510 }
511 mfc_debug(2, "buf_size1: %d, buf_size2: %d\n", buf_size1, buf_size2);
512 switch (ctx->codec_mode) {
513 case S5P_FIMV_CODEC_H264_ENC:
514 for (i = 0; i < 2; i++) {
515 mfc_write(dev, OFFSETA(buf_addr1),
516 S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
517 buf_addr1 += enc_ref_y_size;
518 buf_size1 -= enc_ref_y_size;
519
520 mfc_write(dev, OFFSETB(buf_addr2),
521 S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
522 buf_addr2 += enc_ref_y_size;
523 buf_size2 -= enc_ref_y_size;
524 }
525 for (i = 0; i < 4; i++) {
526 mfc_write(dev, OFFSETB(buf_addr2),
527 S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
528 buf_addr2 += enc_ref_c_size;
529 buf_size2 -= enc_ref_c_size;
530 }
531 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_UP_MV_ADR);
532 buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
533 buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
534 mfc_write(dev, OFFSETA(buf_addr1),
535 S5P_FIMV_H264_COZERO_FLAG_ADR);
536 buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE;
537 buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE;
538 mfc_write(dev, OFFSETA(buf_addr1),
539 S5P_FIMV_H264_UP_INTRA_MD_ADR);
540 buf_addr1 += S5P_FIMV_ENC_INTRAMD_SIZE;
541 buf_size1 -= S5P_FIMV_ENC_INTRAMD_SIZE;
542 mfc_write(dev, OFFSETB(buf_addr2),
543 S5P_FIMV_H264_UP_INTRA_PRED_ADR);
544 buf_addr2 += S5P_FIMV_ENC_INTRAPRED_SIZE;
545 buf_size2 -= S5P_FIMV_ENC_INTRAPRED_SIZE;
546 mfc_write(dev, OFFSETA(buf_addr1),
547 S5P_FIMV_H264_NBOR_INFO_ADR);
548 buf_addr1 += S5P_FIMV_ENC_NBORINFO_SIZE;
549 buf_size1 -= S5P_FIMV_ENC_NBORINFO_SIZE;
550 mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
551 buf_size1, buf_size2);
552 break;
553 case S5P_FIMV_CODEC_MPEG4_ENC:
554 for (i = 0; i < 2; i++) {
555 mfc_write(dev, OFFSETA(buf_addr1),
556 S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
557 buf_addr1 += enc_ref_y_size;
558 buf_size1 -= enc_ref_y_size;
559 mfc_write(dev, OFFSETB(buf_addr2),
560 S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
561 buf_addr2 += enc_ref_y_size;
562 buf_size2 -= enc_ref_y_size;
563 }
564 for (i = 0; i < 4; i++) {
565 mfc_write(dev, OFFSETB(buf_addr2),
566 S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
567 buf_addr2 += enc_ref_c_size;
568 buf_size2 -= enc_ref_c_size;
569 }
570 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_UP_MV_ADR);
571 buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
572 buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
573 mfc_write(dev, OFFSETA(buf_addr1),
574 S5P_FIMV_MPEG4_COZERO_FLAG_ADR);
575 buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE;
576 buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE;
577 mfc_write(dev, OFFSETA(buf_addr1),
578 S5P_FIMV_MPEG4_ACDC_COEF_ADR);
579 buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE;
580 buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE;
581 mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
582 buf_size1, buf_size2);
583 break;
584 case S5P_FIMV_CODEC_H263_ENC:
585 for (i = 0; i < 2; i++) {
586 mfc_write(dev, OFFSETA(buf_addr1),
587 S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
588 buf_addr1 += enc_ref_y_size;
589 buf_size1 -= enc_ref_y_size;
590 mfc_write(dev, OFFSETB(buf_addr2),
591 S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
592 buf_addr2 += enc_ref_y_size;
593 buf_size2 -= enc_ref_y_size;
594 }
595 for (i = 0; i < 4; i++) {
596 mfc_write(dev, OFFSETB(buf_addr2),
597 S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
598 buf_addr2 += enc_ref_c_size;
599 buf_size2 -= enc_ref_c_size;
600 }
601 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_MV_ADR);
602 buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
603 buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
604 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_ACDC_COEF_ADR);
605 buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE;
606 buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE;
607 mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
608 buf_size1, buf_size2);
609 break;
610 default:
611 mfc_err("Unknown codec set for encoding: %d\n",
612 ctx->codec_mode);
613 return -EINVAL;
614 }
615 return 0;
616}
617
618static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
619{
620 struct s5p_mfc_dev *dev = ctx->dev;
621 struct s5p_mfc_enc_params *p = &ctx->enc_params;
622 unsigned int reg;
623 unsigned int shm;
624
625 /* width */
626 mfc_write(dev, ctx->img_width, S5P_FIMV_ENC_HSIZE_PX);
627 /* height */
628 mfc_write(dev, ctx->img_height, S5P_FIMV_ENC_VSIZE_PX);
629 /* pictype : enable, IDR period */
630 reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
631 reg |= (1 << 18);
632 reg &= ~(0xFFFF);
633 reg |= p->gop_size;
634 mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
635 mfc_write(dev, 0, S5P_FIMV_ENC_B_RECON_WRITE_ON);
636 /* multi-slice control */
637 /* multi-slice MB number or bit size */
638 mfc_write(dev, p->slice_mode, S5P_FIMV_ENC_MSLICE_CTRL);
639 if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) {
640 mfc_write(dev, p->slice_mb, S5P_FIMV_ENC_MSLICE_MB);
641 } else if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) {
642 mfc_write(dev, p->slice_bit, S5P_FIMV_ENC_MSLICE_BIT);
643 } else {
644 mfc_write(dev, 0, S5P_FIMV_ENC_MSLICE_MB);
645 mfc_write(dev, 0, S5P_FIMV_ENC_MSLICE_BIT);
646 }
647 /* cyclic intra refresh */
648 mfc_write(dev, p->intra_refresh_mb, S5P_FIMV_ENC_CIR_CTRL);
649 /* memory structure cur. frame */
650 if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M)
651 mfc_write(dev, 0, S5P_FIMV_ENC_MAP_FOR_CUR);
652 else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT)
653 mfc_write(dev, 3, S5P_FIMV_ENC_MAP_FOR_CUR);
654 /* padding control & value */
655 reg = mfc_read(dev, S5P_FIMV_ENC_PADDING_CTRL);
656 if (p->pad) {
657 /** enable */
658 reg |= (1 << 31);
659 /** cr value */
660 reg &= ~(0xFF << 16);
661 reg |= (p->pad_cr << 16);
662 /** cb value */
663 reg &= ~(0xFF << 8);
664 reg |= (p->pad_cb << 8);
665 /** y value */
666 reg &= ~(0xFF);
667 reg |= (p->pad_luma);
668 } else {
669 /** disable & all value clear */
670 reg = 0;
671 }
672 mfc_write(dev, reg, S5P_FIMV_ENC_PADDING_CTRL);
673 /* rate control config. */
674 reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
675 /** frame-level rate control */
676 reg &= ~(0x1 << 9);
677 reg |= (p->rc_frame << 9);
678 mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
679 /* bit rate */
680 if (p->rc_frame)
681 mfc_write(dev, p->rc_bitrate,
682 S5P_FIMV_ENC_RC_BIT_RATE);
683 else
684 mfc_write(dev, 0, S5P_FIMV_ENC_RC_BIT_RATE);
685 /* reaction coefficient */
686 if (p->rc_frame)
687 mfc_write(dev, p->rc_reaction_coeff, S5P_FIMV_ENC_RC_RPARA);
688 shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
689 /* seq header ctrl */
690 shm &= ~(0x1 << 3);
691 shm |= (p->seq_hdr_mode << 3);
692 /* frame skip mode */
693 shm &= ~(0x3 << 1);
694 shm |= (p->frame_skip_mode << 1);
695 s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
696 /* fixed target bit */
697 s5p_mfc_write_shm(ctx, p->fixed_target_bit, RC_CONTROL_CONFIG);
698 return 0;
699}
700
701static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
702{
703 struct s5p_mfc_dev *dev = ctx->dev;
704 struct s5p_mfc_enc_params *p = &ctx->enc_params;
705 struct s5p_mfc_h264_enc_params *p_264 = &p->codec.h264;
706 unsigned int reg;
707 unsigned int shm;
708
709 s5p_mfc_set_enc_params(ctx);
710 /* pictype : number of B */
711 reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
712 /* num_b_frame - 0 ~ 2 */
713 reg &= ~(0x3 << 16);
714 reg |= (p->num_b_frame << 16);
715 mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
716 /* profile & level */
717 reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE);
718 /* level */
719 reg &= ~(0xFF << 8);
720 reg |= (p_264->level << 8);
721 /* profile - 0 ~ 2 */
722 reg &= ~(0x3F);
723 reg |= p_264->profile;
724 mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE);
725 /* interlace */
726 mfc_write(dev, p->interlace, S5P_FIMV_ENC_PIC_STRUCT);
727 /* height */
728 if (p->interlace)
729 mfc_write(dev, ctx->img_height >> 1, S5P_FIMV_ENC_VSIZE_PX);
730 /* loopfilter ctrl */
731 mfc_write(dev, p_264->loop_filter_mode, S5P_FIMV_ENC_LF_CTRL);
732 /* loopfilter alpha offset */
733 if (p_264->loop_filter_alpha < 0) {
734 reg = 0x10;
735 reg |= (0xFF - p_264->loop_filter_alpha) + 1;
736 } else {
737 reg = 0x00;
738 reg |= (p_264->loop_filter_alpha & 0xF);
739 }
740 mfc_write(dev, reg, S5P_FIMV_ENC_ALPHA_OFF);
741 /* loopfilter beta offset */
742 if (p_264->loop_filter_beta < 0) {
743 reg = 0x10;
744 reg |= (0xFF - p_264->loop_filter_beta) + 1;
745 } else {
746 reg = 0x00;
747 reg |= (p_264->loop_filter_beta & 0xF);
748 }
749 mfc_write(dev, reg, S5P_FIMV_ENC_BETA_OFF);
750 /* entropy coding mode */
751 if (p_264->entropy_mode == V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC)
752 mfc_write(dev, 1, S5P_FIMV_ENC_H264_ENTROPY_MODE);
753 else
754 mfc_write(dev, 0, S5P_FIMV_ENC_H264_ENTROPY_MODE);
755 /* number of ref. picture */
756 reg = mfc_read(dev, S5P_FIMV_ENC_H264_NUM_OF_REF);
757 /* num of ref. pictures of P */
758 reg &= ~(0x3 << 5);
759 reg |= (p_264->num_ref_pic_4p << 5);
760 /* max number of ref. pictures */
761 reg &= ~(0x1F);
762 reg |= p_264->max_ref_pic;
763 mfc_write(dev, reg, S5P_FIMV_ENC_H264_NUM_OF_REF);
764 /* 8x8 transform enable */
765 mfc_write(dev, p_264->_8x8_transform, S5P_FIMV_ENC_H264_TRANS_FLAG);
766 /* rate control config. */
767 reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
768 /* macroblock level rate control */
769 reg &= ~(0x1 << 8);
770 reg |= (p_264->rc_mb << 8);
771 /* frame QP */
772 reg &= ~(0x3F);
773 reg |= p_264->rc_frame_qp;
774 mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
775 /* frame rate */
776 if (p->rc_frame && p->rc_framerate_denom)
777 mfc_write(dev, p->rc_framerate_num * 1000
778 / p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE);
779 else
780 mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
781 /* max & min value of QP */
782 reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
783 /* max QP */
784 reg &= ~(0x3F << 8);
785 reg |= (p_264->rc_max_qp << 8);
786 /* min QP */
787 reg &= ~(0x3F);
788 reg |= p_264->rc_min_qp;
789 mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
790 /* macroblock adaptive scaling features */
791 if (p_264->rc_mb) {
792 reg = mfc_read(dev, S5P_FIMV_ENC_RC_MB_CTRL);
793 /* dark region */
794 reg &= ~(0x1 << 3);
795 reg |= (p_264->rc_mb_dark << 3);
796 /* smooth region */
797 reg &= ~(0x1 << 2);
798 reg |= (p_264->rc_mb_smooth << 2);
799 /* static region */
800 reg &= ~(0x1 << 1);
801 reg |= (p_264->rc_mb_static << 1);
802 /* high activity region */
803 reg &= ~(0x1);
804 reg |= p_264->rc_mb_activity;
805 mfc_write(dev, reg, S5P_FIMV_ENC_RC_MB_CTRL);
806 }
807 if (!p->rc_frame &&
808 !p_264->rc_mb) {
809 shm = s5p_mfc_read_shm(ctx, P_B_FRAME_QP);
810 shm &= ~(0xFFF);
811 shm |= ((p_264->rc_b_frame_qp & 0x3F) << 6);
812 shm |= (p_264->rc_p_frame_qp & 0x3F);
813 s5p_mfc_write_shm(ctx, shm, P_B_FRAME_QP);
814 }
815 /* extended encoder ctrl */
816 shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
817 /* AR VUI control */
818 shm &= ~(0x1 << 15);
819 shm |= (p_264->vui_sar << 1);
820 s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
821 if (p_264->vui_sar) {
822 /* aspect ration IDC */
823 shm = s5p_mfc_read_shm(ctx, SAMPLE_ASPECT_RATIO_IDC);
824 shm &= ~(0xFF);
825 shm |= p_264->vui_sar_idc;
826 s5p_mfc_write_shm(ctx, shm, SAMPLE_ASPECT_RATIO_IDC);
827 if (p_264->vui_sar_idc == 0xFF) {
828 /* sample AR info */
829 shm = s5p_mfc_read_shm(ctx, EXTENDED_SAR);
830 shm &= ~(0xFFFFFFFF);
831 shm |= p_264->vui_ext_sar_width << 16;
832 shm |= p_264->vui_ext_sar_height;
833 s5p_mfc_write_shm(ctx, shm, EXTENDED_SAR);
834 }
835 }
836 /* intra picture period for H.264 */
837 shm = s5p_mfc_read_shm(ctx, H264_I_PERIOD);
838 /* control */
839 shm &= ~(0x1 << 16);
840 shm |= (p_264->open_gop << 16);
841 /* value */
842 if (p_264->open_gop) {
843 shm &= ~(0xFFFF);
844 shm |= p_264->open_gop_size;
845 }
846 s5p_mfc_write_shm(ctx, shm, H264_I_PERIOD);
847 /* extended encoder ctrl */
848 shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
849 /* vbv buffer size */
850 if (p->frame_skip_mode ==
851 V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
852 shm &= ~(0xFFFF << 16);
853 shm |= (p_264->cpb_size << 16);
854 }
855 s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
856 return 0;
857}
858
859static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx)
860{
861 struct s5p_mfc_dev *dev = ctx->dev;
862 struct s5p_mfc_enc_params *p = &ctx->enc_params;
863 struct s5p_mfc_mpeg4_enc_params *p_mpeg4 = &p->codec.mpeg4;
864 unsigned int reg;
865 unsigned int shm;
866 unsigned int framerate;
867
868 s5p_mfc_set_enc_params(ctx);
869 /* pictype : number of B */
870 reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
871 /* num_b_frame - 0 ~ 2 */
872 reg &= ~(0x3 << 16);
873 reg |= (p->num_b_frame << 16);
874 mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
875 /* profile & level */
876 reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE);
877 /* level */
878 reg &= ~(0xFF << 8);
879 reg |= (p_mpeg4->level << 8);
880 /* profile - 0 ~ 2 */
881 reg &= ~(0x3F);
882 reg |= p_mpeg4->profile;
883 mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE);
884 /* quarter_pixel */
885 mfc_write(dev, p_mpeg4->quarter_pixel, S5P_FIMV_ENC_MPEG4_QUART_PXL);
886 /* qp */
887 if (!p->rc_frame) {
888 shm = s5p_mfc_read_shm(ctx, P_B_FRAME_QP);
889 shm &= ~(0xFFF);
890 shm |= ((p_mpeg4->rc_b_frame_qp & 0x3F) << 6);
891 shm |= (p_mpeg4->rc_p_frame_qp & 0x3F);
892 s5p_mfc_write_shm(ctx, shm, P_B_FRAME_QP);
893 }
894 /* frame rate */
895 if (p->rc_frame) {
896 if (p->rc_framerate_denom > 0) {
897 framerate = p->rc_framerate_num * 1000 /
898 p->rc_framerate_denom;
899 mfc_write(dev, framerate,
900 S5P_FIMV_ENC_RC_FRAME_RATE);
901 shm = s5p_mfc_read_shm(ctx, RC_VOP_TIMING);
902 shm &= ~(0xFFFFFFFF);
903 shm |= (1 << 31);
904 shm |= ((p->rc_framerate_num & 0x7FFF) << 16);
905 shm |= (p->rc_framerate_denom & 0xFFFF);
906 s5p_mfc_write_shm(ctx, shm, RC_VOP_TIMING);
907 }
908 } else {
909 mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
910 }
911 /* rate control config. */
912 reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
913 /* frame QP */
914 reg &= ~(0x3F);
915 reg |= p_mpeg4->rc_frame_qp;
916 mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
917 /* max & min value of QP */
918 reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
919 /* max QP */
920 reg &= ~(0x3F << 8);
921 reg |= (p_mpeg4->rc_max_qp << 8);
922 /* min QP */
923 reg &= ~(0x3F);
924 reg |= p_mpeg4->rc_min_qp;
925 mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
926 /* extended encoder ctrl */
927 shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
928 /* vbv buffer size */
929 if (p->frame_skip_mode ==
930 V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
931 shm &= ~(0xFFFF << 16);
932 shm |= (p->vbv_size << 16);
933 }
934 s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
935 return 0;
936}
937
938static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx)
939{
940 struct s5p_mfc_dev *dev = ctx->dev;
941 struct s5p_mfc_enc_params *p = &ctx->enc_params;
942 struct s5p_mfc_mpeg4_enc_params *p_h263 = &p->codec.mpeg4;
943 unsigned int reg;
944 unsigned int shm;
945
946 s5p_mfc_set_enc_params(ctx);
947 /* qp */
948 if (!p->rc_frame) {
949 shm = s5p_mfc_read_shm(ctx, P_B_FRAME_QP);
950 shm &= ~(0xFFF);
951 shm |= (p_h263->rc_p_frame_qp & 0x3F);
952 s5p_mfc_write_shm(ctx, shm, P_B_FRAME_QP);
953 }
954 /* frame rate */
955 if (p->rc_frame && p->rc_framerate_denom)
956 mfc_write(dev, p->rc_framerate_num * 1000
957 / p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE);
958 else
959 mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
960 /* rate control config. */
961 reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
962 /* frame QP */
963 reg &= ~(0x3F);
964 reg |= p_h263->rc_frame_qp;
965 mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
966 /* max & min value of QP */
967 reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
968 /* max QP */
969 reg &= ~(0x3F << 8);
970 reg |= (p_h263->rc_max_qp << 8);
971 /* min QP */
972 reg &= ~(0x3F);
973 reg |= p_h263->rc_min_qp;
974 mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
975 /* extended encoder ctrl */
976 shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
977 /* vbv buffer size */
978 if (p->frame_skip_mode ==
979 V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
980 shm &= ~(0xFFFF << 16);
981 shm |= (p->vbv_size << 16);
982 }
983 s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
984 return 0;
985}
986
987/* Initialize decoding */
988int s5p_mfc_init_decode(struct s5p_mfc_ctx *ctx)
989{
990 struct s5p_mfc_dev *dev = ctx->dev;
991
992 s5p_mfc_set_shared_buffer(ctx);
993 /* Setup loop filter, for decoding this is only valid for MPEG4 */
994 if (ctx->codec_mode == S5P_FIMV_CODEC_MPEG4_DEC)
995 mfc_write(dev, ctx->loop_filter_mpeg4, S5P_FIMV_ENC_LF_CTRL);
996 else
997 mfc_write(dev, 0, S5P_FIMV_ENC_LF_CTRL);
998 mfc_write(dev, ((ctx->slice_interface & S5P_FIMV_SLICE_INT_MASK) <<
999 S5P_FIMV_SLICE_INT_SHIFT) | (ctx->display_delay_enable <<
1000 S5P_FIMV_DDELAY_ENA_SHIFT) | ((ctx->display_delay &
1001 S5P_FIMV_DDELAY_VAL_MASK) << S5P_FIMV_DDELAY_VAL_SHIFT),
1002 S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
1003 mfc_write(dev,
1004 ((S5P_FIMV_CH_SEQ_HEADER & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT)
1005 | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
1006 return 0;
1007}
1008
1009static void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush)
1010{
1011 struct s5p_mfc_dev *dev = ctx->dev;
1012 unsigned int dpb;
1013
1014 if (flush)
1015 dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) | (
1016 S5P_FIMV_DPB_FLUSH_MASK << S5P_FIMV_DPB_FLUSH_SHIFT);
1017 else
1018 dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) &
1019 ~(S5P_FIMV_DPB_FLUSH_MASK << S5P_FIMV_DPB_FLUSH_SHIFT);
1020 mfc_write(dev, dpb, S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
1021}
1022
1023/* Decode a single frame */
1024int s5p_mfc_decode_one_frame(struct s5p_mfc_ctx *ctx,
1025 enum s5p_mfc_decode_arg last_frame)
1026{
1027 struct s5p_mfc_dev *dev = ctx->dev;
1028
1029 mfc_write(dev, ctx->dec_dst_flag, S5P_FIMV_SI_CH0_RELEASE_BUF);
1030 s5p_mfc_set_shared_buffer(ctx);
1031 s5p_mfc_set_flush(ctx, ctx->dpb_flush_flag);
1032 /* Issue different commands to instance basing on whether it
1033 * is the last frame or not. */
1034 switch (last_frame) {
1035 case MFC_DEC_FRAME:
1036 mfc_write(dev, ((S5P_FIMV_CH_FRAME_START & S5P_FIMV_CH_MASK) <<
1037 S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
1038 break;
1039 case MFC_DEC_LAST_FRAME:
1040 mfc_write(dev, ((S5P_FIMV_CH_LAST_FRAME & S5P_FIMV_CH_MASK) <<
1041 S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
1042 break;
1043 case MFC_DEC_RES_CHANGE:
1044 mfc_write(dev, ((S5P_FIMV_CH_FRAME_START_REALLOC &
1045 S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT) | (ctx->inst_no),
1046 S5P_FIMV_SI_CH0_INST_ID);
1047 break;
1048 }
1049 mfc_debug(2, "Decoding a usual frame\n");
1050 return 0;
1051}
1052
1053int s5p_mfc_init_encode(struct s5p_mfc_ctx *ctx)
1054{
1055 struct s5p_mfc_dev *dev = ctx->dev;
1056
1057 if (ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC)
1058 s5p_mfc_set_enc_params_h264(ctx);
1059 else if (ctx->codec_mode == S5P_FIMV_CODEC_MPEG4_ENC)
1060 s5p_mfc_set_enc_params_mpeg4(ctx);
1061 else if (ctx->codec_mode == S5P_FIMV_CODEC_H263_ENC)
1062 s5p_mfc_set_enc_params_h263(ctx);
1063 else {
1064 mfc_err("Unknown codec for encoding (%x)\n",
1065 ctx->codec_mode);
1066 return -EINVAL;
1067 }
1068 s5p_mfc_set_shared_buffer(ctx);
1069 mfc_write(dev, ((S5P_FIMV_CH_SEQ_HEADER << 16) & 0x70000) |
1070 (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
1071 return 0;
1072}
1073
1074/* Encode a single frame */
1075int s5p_mfc_encode_one_frame(struct s5p_mfc_ctx *ctx)
1076{
1077 struct s5p_mfc_dev *dev = ctx->dev;
1078 int cmd;
1079 /* memory structure cur. frame */
1080 if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M)
1081 mfc_write(dev, 0, S5P_FIMV_ENC_MAP_FOR_CUR);
1082 else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT)
1083 mfc_write(dev, 3, S5P_FIMV_ENC_MAP_FOR_CUR);
1084 s5p_mfc_set_shared_buffer(ctx);
1085
1086 if (ctx->state == MFCINST_FINISHING)
1087 cmd = S5P_FIMV_CH_LAST_FRAME;
1088 else
1089 cmd = S5P_FIMV_CH_FRAME_START;
1090 mfc_write(dev, ((cmd & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT)
1091 | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
1092
1093 return 0;
1094}
1095
1096static int s5p_mfc_get_new_ctx(struct s5p_mfc_dev *dev)
1097{
1098 unsigned long flags;
1099 int new_ctx;
1100 int cnt;
1101
1102 spin_lock_irqsave(&dev->condlock, flags);
1103 new_ctx = (dev->curr_ctx + 1) % MFC_NUM_CONTEXTS;
1104 cnt = 0;
1105 while (!test_bit(new_ctx, &dev->ctx_work_bits)) {
1106 new_ctx = (new_ctx + 1) % MFC_NUM_CONTEXTS;
1107 if (++cnt > MFC_NUM_CONTEXTS) {
1108 /* No contexts to run */
1109 spin_unlock_irqrestore(&dev->condlock, flags);
1110 return -EAGAIN;
1111 }
1112 }
1113 spin_unlock_irqrestore(&dev->condlock, flags);
1114 return new_ctx;
1115}
1116
1117static void s5p_mfc_run_res_change(struct s5p_mfc_ctx *ctx)
1118{
1119 struct s5p_mfc_dev *dev = ctx->dev;
1120
1121 s5p_mfc_set_dec_stream_buffer(ctx, 0, 0, 0);
1122 dev->curr_ctx = ctx->num;
1123 s5p_mfc_clean_ctx_int_flags(ctx);
1124 s5p_mfc_decode_one_frame(ctx, MFC_DEC_RES_CHANGE);
1125}
1126
1127static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame)
1128{
1129 struct s5p_mfc_dev *dev = ctx->dev;
1130 struct s5p_mfc_buf *temp_vb;
1131 unsigned long flags;
1132 unsigned int index;
1133
1134 spin_lock_irqsave(&dev->irqlock, flags);
1135 /* Frames are being decoded */
1136 if (list_empty(&ctx->src_queue)) {
1137 mfc_debug(2, "No src buffers\n");
1138 spin_unlock_irqrestore(&dev->irqlock, flags);
1139 return -EAGAIN;
1140 }
1141 /* Get the next source buffer */
1142 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1143 temp_vb->flags |= MFC_BUF_FLAG_USED;
1144 s5p_mfc_set_dec_stream_buffer(ctx,
1145 vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), ctx->consumed_stream,
1146 temp_vb->b->v4l2_planes[0].bytesused);
1147 spin_unlock_irqrestore(&dev->irqlock, flags);
1148 index = temp_vb->b->v4l2_buf.index;
1149 dev->curr_ctx = ctx->num;
1150 s5p_mfc_clean_ctx_int_flags(ctx);
1151 if (temp_vb->b->v4l2_planes[0].bytesused == 0) {
1152 last_frame = MFC_DEC_LAST_FRAME;
1153 mfc_debug(2, "Setting ctx->state to FINISHING\n");
1154 ctx->state = MFCINST_FINISHING;
1155 }
1156 s5p_mfc_decode_one_frame(ctx, last_frame);
1157 return 0;
1158}
1159
1160static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
1161{
1162 struct s5p_mfc_dev *dev = ctx->dev;
1163 unsigned long flags;
1164 struct s5p_mfc_buf *dst_mb;
1165 struct s5p_mfc_buf *src_mb;
1166 unsigned long src_y_addr, src_c_addr, dst_addr;
1167 unsigned int dst_size;
1168
1169 spin_lock_irqsave(&dev->irqlock, flags);
1170 if (list_empty(&ctx->src_queue) && ctx->state != MFCINST_FINISHING) {
1171 mfc_debug(2, "no src buffers\n");
1172 spin_unlock_irqrestore(&dev->irqlock, flags);
1173 return -EAGAIN;
1174 }
1175 if (list_empty(&ctx->dst_queue)) {
1176 mfc_debug(2, "no dst buffers\n");
1177 spin_unlock_irqrestore(&dev->irqlock, flags);
1178 return -EAGAIN;
1179 }
1180 if (list_empty(&ctx->src_queue)) {
1181 /* send null frame */
1182 s5p_mfc_set_enc_frame_buffer(ctx, dev->bank2, dev->bank2);
1183 src_mb = NULL;
1184 } else {
1185 src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
1186 list);
1187 src_mb->flags |= MFC_BUF_FLAG_USED;
1188 if (src_mb->b->v4l2_planes[0].bytesused == 0) {
1189 /* send null frame */
1190 s5p_mfc_set_enc_frame_buffer(ctx, dev->bank2,
1191 dev->bank2);
1192 ctx->state = MFCINST_FINISHING;
1193 } else {
1194 src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b,
1195 0);
1196 src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b,
1197 1);
1198 s5p_mfc_set_enc_frame_buffer(ctx, src_y_addr,
1199 src_c_addr);
1200 if (src_mb->flags & MFC_BUF_FLAG_EOS)
1201 ctx->state = MFCINST_FINISHING;
1202 }
1203 }
1204 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
1205 dst_mb->flags |= MFC_BUF_FLAG_USED;
1206 dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
1207 dst_size = vb2_plane_size(dst_mb->b, 0);
1208 s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
1209 spin_unlock_irqrestore(&dev->irqlock, flags);
1210 dev->curr_ctx = ctx->num;
1211 s5p_mfc_clean_ctx_int_flags(ctx);
1212 mfc_debug(2, "encoding buffer with index=%d state=%d",
1213 src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state);
1214 s5p_mfc_encode_one_frame(ctx);
1215 return 0;
1216}
1217
1218static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx)
1219{
1220 struct s5p_mfc_dev *dev = ctx->dev;
1221 unsigned long flags;
1222 struct s5p_mfc_buf *temp_vb;
1223
1224 /* Initializing decoding - parsing header */
1225 spin_lock_irqsave(&dev->irqlock, flags);
1226 mfc_debug(2, "Preparing to init decoding\n");
1227 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1228 s5p_mfc_set_dec_desc_buffer(ctx);
1229 mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
1230 s5p_mfc_set_dec_stream_buffer(ctx,
1231 vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
1232 0, temp_vb->b->v4l2_planes[0].bytesused);
1233 spin_unlock_irqrestore(&dev->irqlock, flags);
1234 dev->curr_ctx = ctx->num;
1235 s5p_mfc_clean_ctx_int_flags(ctx);
1236 s5p_mfc_init_decode(ctx);
1237}
1238
1239static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx)
1240{
1241 struct s5p_mfc_dev *dev = ctx->dev;
1242 unsigned long flags;
1243 struct s5p_mfc_buf *dst_mb;
1244 unsigned long dst_addr;
1245 unsigned int dst_size;
1246
1247 s5p_mfc_set_enc_ref_buffer(ctx);
1248 spin_lock_irqsave(&dev->irqlock, flags);
1249 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
1250 dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
1251 dst_size = vb2_plane_size(dst_mb->b, 0);
1252 s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
1253 spin_unlock_irqrestore(&dev->irqlock, flags);
1254 dev->curr_ctx = ctx->num;
1255 s5p_mfc_clean_ctx_int_flags(ctx);
1256 s5p_mfc_init_encode(ctx);
1257}
1258
1259static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
1260{
1261 struct s5p_mfc_dev *dev = ctx->dev;
1262 unsigned long flags;
1263 struct s5p_mfc_buf *temp_vb;
1264 int ret;
1265
1266 /*
1267 * Header was parsed now starting processing
1268 * First set the output frame buffers
1269 */
1270 if (ctx->capture_state != QUEUE_BUFS_MMAPED) {
1271 mfc_err("It seems that not all destionation buffers were "
1272 "mmaped\nMFC requires that all destination are mmaped "
1273 "before starting processing\n");
1274 return -EAGAIN;
1275 }
1276 spin_lock_irqsave(&dev->irqlock, flags);
1277 if (list_empty(&ctx->src_queue)) {
1278 mfc_err("Header has been deallocated in the middle of"
1279 " initialization\n");
1280 spin_unlock_irqrestore(&dev->irqlock, flags);
1281 return -EIO;
1282 }
1283 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1284 mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
1285 s5p_mfc_set_dec_stream_buffer(ctx,
1286 vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
1287 0, temp_vb->b->v4l2_planes[0].bytesused);
1288 spin_unlock_irqrestore(&dev->irqlock, flags);
1289 dev->curr_ctx = ctx->num;
1290 s5p_mfc_clean_ctx_int_flags(ctx);
1291 ret = s5p_mfc_set_dec_frame_buffer(ctx);
1292 if (ret) {
1293 mfc_err("Failed to alloc frame mem\n");
1294 ctx->state = MFCINST_ERROR;
1295 }
1296 return ret;
1297}
1298
1299/* Try running an operation on hardware */
1300void s5p_mfc_try_run(struct s5p_mfc_dev *dev)
1301{
1302 struct s5p_mfc_ctx *ctx;
1303 int new_ctx;
1304 unsigned int ret = 0;
1305
1306 if (test_bit(0, &dev->enter_suspend)) {
1307 mfc_debug(1, "Entering suspend so do not schedule any jobs\n");
1308 return;
1309 }
1310 /* Check whether hardware is not running */
1311 if (test_and_set_bit(0, &dev->hw_lock) != 0) {
1312 /* This is perfectly ok, the scheduled ctx should wait */
1313 mfc_debug(1, "Couldn't lock HW\n");
1314 return;
1315 }
1316 /* Choose the context to run */
1317 new_ctx = s5p_mfc_get_new_ctx(dev);
1318 if (new_ctx < 0) {
1319 /* No contexts to run */
1320 if (test_and_clear_bit(0, &dev->hw_lock) == 0) {
1321 mfc_err("Failed to unlock hardware\n");
1322 return;
1323 }
1324 mfc_debug(1, "No ctx is scheduled to be run\n");
1325 return;
1326 }
1327 ctx = dev->ctx[new_ctx];
1328 /* Got context to run in ctx */
1329 /*
1330 * Last frame has already been sent to MFC.
1331 * Now obtaining frames from MFC buffer
1332 */
1333 s5p_mfc_clock_on();
1334 if (ctx->type == MFCINST_DECODER) {
1335 s5p_mfc_set_dec_desc_buffer(ctx);
1336 switch (ctx->state) {
1337 case MFCINST_FINISHING:
1338 s5p_mfc_run_dec_frame(ctx, MFC_DEC_LAST_FRAME);
1339 break;
1340 case MFCINST_RUNNING:
1341 ret = s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME);
1342 break;
1343 case MFCINST_INIT:
1344 s5p_mfc_clean_ctx_int_flags(ctx);
1345 ret = s5p_mfc_open_inst_cmd(ctx);
1346 break;
1347 case MFCINST_RETURN_INST:
1348 s5p_mfc_clean_ctx_int_flags(ctx);
1349 ret = s5p_mfc_close_inst_cmd(ctx);
1350 break;
1351 case MFCINST_GOT_INST:
1352 s5p_mfc_run_init_dec(ctx);
1353 break;
1354 case MFCINST_HEAD_PARSED:
1355 ret = s5p_mfc_run_init_dec_buffers(ctx);
1356 mfc_debug(1, "head parsed\n");
1357 break;
1358 case MFCINST_RES_CHANGE_INIT:
1359 s5p_mfc_run_res_change(ctx);
1360 break;
1361 case MFCINST_RES_CHANGE_FLUSH:
1362 s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME);
1363 break;
1364 case MFCINST_RES_CHANGE_END:
1365 mfc_debug(2, "Finished remaining frames after resolution change\n");
1366 ctx->capture_state = QUEUE_FREE;
1367 mfc_debug(2, "Will re-init the codec\n");
1368 s5p_mfc_run_init_dec(ctx);
1369 break;
1370 default:
1371 ret = -EAGAIN;
1372 }
1373 } else if (ctx->type == MFCINST_ENCODER) {
1374 switch (ctx->state) {
1375 case MFCINST_FINISHING:
1376 case MFCINST_RUNNING:
1377 ret = s5p_mfc_run_enc_frame(ctx);
1378 break;
1379 case MFCINST_INIT:
1380 s5p_mfc_clean_ctx_int_flags(ctx);
1381 ret = s5p_mfc_open_inst_cmd(ctx);
1382 break;
1383 case MFCINST_RETURN_INST:
1384 s5p_mfc_clean_ctx_int_flags(ctx);
1385 ret = s5p_mfc_close_inst_cmd(ctx);
1386 break;
1387 case MFCINST_GOT_INST:
1388 s5p_mfc_run_init_enc(ctx);
1389 break;
1390 default:
1391 ret = -EAGAIN;
1392 }
1393 } else {
1394 mfc_err("Invalid context type: %d\n", ctx->type);
1395 ret = -EAGAIN;
1396 }
1397
1398 if (ret) {
1399 /* Free hardware lock */
1400 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
1401 mfc_err("Failed to unlock hardware\n");
1402
1403 /* This is in deed imporant, as no operation has been
1404 * scheduled, reduce the clock count as no one will
1405 * ever do this, because no interrupt related to this try_run
1406 * will ever come from hardware. */
1407 s5p_mfc_clock_off();
1408 }
1409}
1410
1411
1412void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq)
1413{
1414 struct s5p_mfc_buf *b;
1415 int i;
1416
1417 while (!list_empty(lh)) {
1418 b = list_entry(lh->next, struct s5p_mfc_buf, list);
1419 for (i = 0; i < b->b->num_planes; i++)
1420 vb2_set_plane_payload(b->b, i, 0);
1421 vb2_buffer_done(b->b, VB2_BUF_STATE_ERROR);
1422 list_del(&b->list);
1423 }
1424}
1425
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
index 2ad3def052f8..420abecafec0 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
@@ -1,10 +1,10 @@
1/* 1/*
2 * drivers/media/platform/samsung/mfc5/s5p_mfc_opr.h 2 * drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
3 * 3 *
4 * Header file for Samsung MFC (Multi Function Codec - FIMV) driver 4 * Header file for Samsung MFC (Multi Function Codec - FIMV) driver
5 * Contains declarations of hw related functions. 5 * Contains declarations of hw related functions.
6 * 6 *
7 * Kamil Debski, Copyright (C) 2011 Samsung Electronics 7 * Kamil Debski, Copyright (C) 2012 Samsung Electronics Co., Ltd.
8 * http://www.samsung.com/ 8 * http://www.samsung.com/
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
@@ -17,77 +17,68 @@
17 17
18#include "s5p_mfc_common.h" 18#include "s5p_mfc_common.h"
19 19
20int s5p_mfc_init_decode(struct s5p_mfc_ctx *ctx); 20struct s5p_mfc_hw_ops {
21int s5p_mfc_init_encode(struct s5p_mfc_ctx *mfc_ctx); 21 int (*alloc_dec_temp_buffers)(struct s5p_mfc_ctx *ctx);
22 void (*release_dec_desc_buffer)(struct s5p_mfc_ctx *ctx);
23 int (*alloc_codec_buffers)(struct s5p_mfc_ctx *ctx);
24 void (*release_codec_buffers)(struct s5p_mfc_ctx *ctx);
25 int (*alloc_instance_buffer)(struct s5p_mfc_ctx *ctx);
26 void (*release_instance_buffer)(struct s5p_mfc_ctx *ctx);
27 int (*alloc_dev_context_buffer)(struct s5p_mfc_dev *dev);
28 void (*release_dev_context_buffer)(struct s5p_mfc_dev *dev);
29 void (*dec_calc_dpb_size)(struct s5p_mfc_ctx *ctx);
30 void (*enc_calc_src_size)(struct s5p_mfc_ctx *ctx);
31 int (*set_dec_stream_buffer)(struct s5p_mfc_ctx *ctx,
32 int buf_addr, unsigned int start_num_byte,
33 unsigned int buf_size);
34 int (*set_dec_frame_buffer)(struct s5p_mfc_ctx *ctx);
35 int (*set_enc_stream_buffer)(struct s5p_mfc_ctx *ctx,
36 unsigned long addr, unsigned int size);
37 void (*set_enc_frame_buffer)(struct s5p_mfc_ctx *ctx,
38 unsigned long y_addr, unsigned long c_addr);
39 void (*get_enc_frame_buffer)(struct s5p_mfc_ctx *ctx,
40 unsigned long *y_addr, unsigned long *c_addr);
41 int (*set_enc_ref_buffer)(struct s5p_mfc_ctx *ctx);
42 int (*init_decode)(struct s5p_mfc_ctx *ctx);
43 int (*init_encode)(struct s5p_mfc_ctx *ctx);
44 int (*encode_one_frame)(struct s5p_mfc_ctx *ctx);
45 void (*try_run)(struct s5p_mfc_dev *dev);
46 void (*cleanup_queue)(struct list_head *lh,
47 struct vb2_queue *vq);
48 void (*clear_int_flags)(struct s5p_mfc_dev *dev);
49 void (*write_info)(struct s5p_mfc_ctx *ctx, unsigned int data,
50 unsigned int ofs);
51 unsigned int (*read_info)(struct s5p_mfc_ctx *ctx,
52 unsigned int ofs);
53 int (*get_dspl_y_adr)(struct s5p_mfc_dev *dev);
54 int (*get_dec_y_adr)(struct s5p_mfc_dev *dev);
55 int (*get_dspl_status)(struct s5p_mfc_dev *dev);
56 int (*get_dec_status)(struct s5p_mfc_dev *dev);
57 int (*get_dec_frame_type)(struct s5p_mfc_dev *dev);
58 int (*get_disp_frame_type)(struct s5p_mfc_ctx *ctx);
59 int (*get_consumed_stream)(struct s5p_mfc_dev *dev);
60 int (*get_int_reason)(struct s5p_mfc_dev *dev);
61 int (*get_int_err)(struct s5p_mfc_dev *dev);
62 int (*err_dec)(unsigned int err);
63 int (*err_dspl)(unsigned int err);
64 int (*get_img_width)(struct s5p_mfc_dev *dev);
65 int (*get_img_height)(struct s5p_mfc_dev *dev);
66 int (*get_dpb_count)(struct s5p_mfc_dev *dev);
67 int (*get_mv_count)(struct s5p_mfc_dev *dev);
68 int (*get_inst_no)(struct s5p_mfc_dev *dev);
69 int (*get_enc_strm_size)(struct s5p_mfc_dev *dev);
70 int (*get_enc_slice_type)(struct s5p_mfc_dev *dev);
71 int (*get_enc_dpb_count)(struct s5p_mfc_dev *dev);
72 int (*get_enc_pic_count)(struct s5p_mfc_dev *dev);
73 int (*get_sei_avail_status)(struct s5p_mfc_ctx *ctx);
74 int (*get_mvc_num_views)(struct s5p_mfc_dev *dev);
75 int (*get_mvc_view_id)(struct s5p_mfc_dev *dev);
76 unsigned int (*get_pic_type_top)(struct s5p_mfc_ctx *ctx);
77 unsigned int (*get_pic_type_bot)(struct s5p_mfc_ctx *ctx);
78 unsigned int (*get_crop_info_h)(struct s5p_mfc_ctx *ctx);
79 unsigned int (*get_crop_info_v)(struct s5p_mfc_ctx *ctx);
80};
22 81
23/* Decoding functions */ 82void s5p_mfc_init_hw_ops(struct s5p_mfc_dev *dev);
24int s5p_mfc_set_dec_frame_buffer(struct s5p_mfc_ctx *ctx);
25int s5p_mfc_set_dec_stream_buffer(struct s5p_mfc_ctx *ctx, int buf_addr,
26 unsigned int start_num_byte,
27 unsigned int buf_size);
28
29/* Encoding functions */
30void s5p_mfc_set_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
31 unsigned long y_addr, unsigned long c_addr);
32int s5p_mfc_set_enc_stream_buffer(struct s5p_mfc_ctx *ctx,
33 unsigned long addr, unsigned int size);
34void s5p_mfc_get_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
35 unsigned long *y_addr, unsigned long *c_addr);
36int s5p_mfc_set_enc_ref_buffer(struct s5p_mfc_ctx *mfc_ctx);
37
38int s5p_mfc_decode_one_frame(struct s5p_mfc_ctx *ctx,
39 enum s5p_mfc_decode_arg last_frame);
40int s5p_mfc_encode_one_frame(struct s5p_mfc_ctx *mfc_ctx);
41
42/* Memory allocation */
43int s5p_mfc_alloc_dec_temp_buffers(struct s5p_mfc_ctx *ctx);
44void s5p_mfc_set_dec_desc_buffer(struct s5p_mfc_ctx *ctx);
45void s5p_mfc_release_dec_desc_buffer(struct s5p_mfc_ctx *ctx);
46
47int s5p_mfc_alloc_codec_buffers(struct s5p_mfc_ctx *ctx);
48void s5p_mfc_release_codec_buffers(struct s5p_mfc_ctx *ctx);
49
50int s5p_mfc_alloc_instance_buffer(struct s5p_mfc_ctx *ctx);
51void s5p_mfc_release_instance_buffer(struct s5p_mfc_ctx *ctx);
52
53void s5p_mfc_try_run(struct s5p_mfc_dev *dev);
54void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq);
55
56#define s5p_mfc_get_dspl_y_adr() (readl(dev->regs_base + \
57 S5P_FIMV_SI_DISPLAY_Y_ADR) << \
58 MFC_OFFSET_SHIFT)
59#define s5p_mfc_get_dec_y_adr() (readl(dev->regs_base + \
60 S5P_FIMV_SI_DECODE_Y_ADR) << \
61 MFC_OFFSET_SHIFT)
62#define s5p_mfc_get_dspl_status() readl(dev->regs_base + \
63 S5P_FIMV_SI_DISPLAY_STATUS)
64#define s5p_mfc_get_dec_status() readl(dev->regs_base + \
65 S5P_FIMV_SI_DECODE_STATUS)
66#define s5p_mfc_get_frame_type() (readl(dev->regs_base + \
67 S5P_FIMV_DECODE_FRAME_TYPE) \
68 & S5P_FIMV_DECODE_FRAME_MASK)
69#define s5p_mfc_get_consumed_stream() readl(dev->regs_base + \
70 S5P_FIMV_SI_CONSUMED_BYTES)
71#define s5p_mfc_get_int_reason() (readl(dev->regs_base + \
72 S5P_FIMV_RISC2HOST_CMD) & \
73 S5P_FIMV_RISC2HOST_CMD_MASK)
74#define s5p_mfc_get_int_err() readl(dev->regs_base + \
75 S5P_FIMV_RISC2HOST_ARG2)
76#define s5p_mfc_err_dec(x) (((x) & S5P_FIMV_ERR_DEC_MASK) >> \
77 S5P_FIMV_ERR_DEC_SHIFT)
78#define s5p_mfc_err_dspl(x) (((x) & S5P_FIMV_ERR_DSPL_MASK) >> \
79 S5P_FIMV_ERR_DSPL_SHIFT)
80#define s5p_mfc_get_img_width() readl(dev->regs_base + \
81 S5P_FIMV_SI_HRESOL)
82#define s5p_mfc_get_img_height() readl(dev->regs_base + \
83 S5P_FIMV_SI_VRESOL)
84#define s5p_mfc_get_dpb_count() readl(dev->regs_base + \
85 S5P_FIMV_SI_BUF_NUMBER)
86#define s5p_mfc_get_inst_no() readl(dev->regs_base + \
87 S5P_FIMV_RISC2HOST_ARG1)
88#define s5p_mfc_get_enc_strm_size() readl(dev->regs_base + \
89 S5P_FIMV_ENC_SI_STRM_SIZE)
90#define s5p_mfc_get_enc_slice_type() readl(dev->regs_base + \
91 S5P_FIMV_ENC_SI_SLICE_TYPE)
92 83
93#endif /* S5P_MFC_OPR_H_ */ 84#endif /* S5P_MFC_OPR_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
new file mode 100644
index 000000000000..bf7d010a4107
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -0,0 +1,1794 @@
1/*
2 * drivers/media/platform/samsung/mfc5/s5p_mfc_opr_v5.c
3 *
4 * Samsung MFC (Multi Function Codec - FIMV) driver
5 * This file contains hw related functions.
6 *
7 * Kamil Debski, Copyright (c) 2011 Samsung Electronics
8 * http://www.samsung.com/
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include "s5p_mfc_common.h"
16#include "s5p_mfc_cmd.h"
17#include "s5p_mfc_ctrl.h"
18#include "s5p_mfc_debug.h"
19#include "s5p_mfc_intr.h"
20#include "s5p_mfc_pm.h"
21#include "s5p_mfc_opr.h"
22#include "s5p_mfc_opr_v5.h"
23#include <asm/cacheflush.h>
24#include <linux/delay.h>
25#include <linux/dma-mapping.h>
26#include <linux/err.h>
27#include <linux/firmware.h>
28#include <linux/io.h>
29#include <linux/jiffies.h>
30#include <linux/mm.h>
31#include <linux/sched.h>
32
33#define OFFSETA(x) (((x) - dev->bank1) >> MFC_OFFSET_SHIFT)
34#define OFFSETB(x) (((x) - dev->bank2) >> MFC_OFFSET_SHIFT)
35
36/* Allocate temporary buffers for decoding */
37int s5p_mfc_alloc_dec_temp_buffers_v5(struct s5p_mfc_ctx *ctx)
38{
39 struct s5p_mfc_dev *dev = ctx->dev;
40 struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv;
41
42 ctx->dsc.alloc = vb2_dma_contig_memops.alloc(
43 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX],
44 buf_size->dsc);
45 if (IS_ERR_VALUE((int)ctx->dsc.alloc)) {
46 ctx->dsc.alloc = NULL;
47 mfc_err("Allocating DESC buffer failed\n");
48 return -ENOMEM;
49 }
50 ctx->dsc.dma = s5p_mfc_mem_cookie(
51 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->dsc.alloc);
52 BUG_ON(ctx->dsc.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
53 ctx->dsc.virt = vb2_dma_contig_memops.vaddr(ctx->dsc.alloc);
54 if (ctx->dsc.virt == NULL) {
55 vb2_dma_contig_memops.put(ctx->dsc.alloc);
56 ctx->dsc.dma = 0;
57 ctx->dsc.alloc = NULL;
58 mfc_err("Remapping DESC buffer failed\n");
59 return -ENOMEM;
60 }
61 memset(ctx->dsc.virt, 0, buf_size->dsc);
62 wmb();
63 return 0;
64}
65
66/* Release temporary buffers for decoding */
67void s5p_mfc_release_dec_desc_buffer_v5(struct s5p_mfc_ctx *ctx)
68{
69 if (ctx->dsc.dma) {
70 vb2_dma_contig_memops.put(ctx->dsc.alloc);
71 ctx->dsc.alloc = NULL;
72 ctx->dsc.dma = 0;
73 }
74}
75
76/* Allocate codec buffers */
77int s5p_mfc_alloc_codec_buffers_v5(struct s5p_mfc_ctx *ctx)
78{
79 struct s5p_mfc_dev *dev = ctx->dev;
80 unsigned int enc_ref_y_size = 0;
81 unsigned int enc_ref_c_size = 0;
82 unsigned int guard_width, guard_height;
83
84 if (ctx->type == MFCINST_DECODER) {
85 mfc_debug(2, "Luma size:%d Chroma size:%d MV size:%d\n",
86 ctx->luma_size, ctx->chroma_size, ctx->mv_size);
87 mfc_debug(2, "Totals bufs: %d\n", ctx->total_dpb_count);
88 } else if (ctx->type == MFCINST_ENCODER) {
89 enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
90 * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
91 enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN);
92
93 if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC) {
94 enc_ref_c_size = ALIGN(ctx->img_width,
95 S5P_FIMV_NV12MT_HALIGN)
96 * ALIGN(ctx->img_height >> 1,
97 S5P_FIMV_NV12MT_VALIGN);
98 enc_ref_c_size = ALIGN(enc_ref_c_size,
99 S5P_FIMV_NV12MT_SALIGN);
100 } else {
101 guard_width = ALIGN(ctx->img_width + 16,
102 S5P_FIMV_NV12MT_HALIGN);
103 guard_height = ALIGN((ctx->img_height >> 1) + 4,
104 S5P_FIMV_NV12MT_VALIGN);
105 enc_ref_c_size = ALIGN(guard_width * guard_height,
106 S5P_FIMV_NV12MT_SALIGN);
107 }
108 mfc_debug(2, "recon luma size: %d chroma size: %d\n",
109 enc_ref_y_size, enc_ref_c_size);
110 } else {
111 return -EINVAL;
112 }
113 /* Codecs have different memory requirements */
114 switch (ctx->codec_mode) {
115 case S5P_MFC_CODEC_H264_DEC:
116 ctx->bank1_size =
117 ALIGN(S5P_FIMV_DEC_NB_IP_SIZE +
118 S5P_FIMV_DEC_VERT_NB_MV_SIZE,
119 S5P_FIMV_DEC_BUF_ALIGN);
120 ctx->bank2_size = ctx->total_dpb_count * ctx->mv_size;
121 break;
122 case S5P_MFC_CODEC_MPEG4_DEC:
123 ctx->bank1_size =
124 ALIGN(S5P_FIMV_DEC_NB_DCAC_SIZE +
125 S5P_FIMV_DEC_UPNB_MV_SIZE +
126 S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
127 S5P_FIMV_DEC_STX_PARSER_SIZE +
128 S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE,
129 S5P_FIMV_DEC_BUF_ALIGN);
130 ctx->bank2_size = 0;
131 break;
132 case S5P_MFC_CODEC_VC1RCV_DEC:
133 case S5P_MFC_CODEC_VC1_DEC:
134 ctx->bank1_size =
135 ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE +
136 S5P_FIMV_DEC_UPNB_MV_SIZE +
137 S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
138 S5P_FIMV_DEC_NB_DCAC_SIZE +
139 3 * S5P_FIMV_DEC_VC1_BITPLANE_SIZE,
140 S5P_FIMV_DEC_BUF_ALIGN);
141 ctx->bank2_size = 0;
142 break;
143 case S5P_MFC_CODEC_MPEG2_DEC:
144 ctx->bank1_size = 0;
145 ctx->bank2_size = 0;
146 break;
147 case S5P_MFC_CODEC_H263_DEC:
148 ctx->bank1_size =
149 ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE +
150 S5P_FIMV_DEC_UPNB_MV_SIZE +
151 S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
152 S5P_FIMV_DEC_NB_DCAC_SIZE,
153 S5P_FIMV_DEC_BUF_ALIGN);
154 ctx->bank2_size = 0;
155 break;
156 case S5P_MFC_CODEC_H264_ENC:
157 ctx->bank1_size = (enc_ref_y_size * 2) +
158 S5P_FIMV_ENC_UPMV_SIZE +
159 S5P_FIMV_ENC_COLFLG_SIZE +
160 S5P_FIMV_ENC_INTRAMD_SIZE +
161 S5P_FIMV_ENC_NBORINFO_SIZE;
162 ctx->bank2_size = (enc_ref_y_size * 2) +
163 (enc_ref_c_size * 4) +
164 S5P_FIMV_ENC_INTRAPRED_SIZE;
165 break;
166 case S5P_MFC_CODEC_MPEG4_ENC:
167 ctx->bank1_size = (enc_ref_y_size * 2) +
168 S5P_FIMV_ENC_UPMV_SIZE +
169 S5P_FIMV_ENC_COLFLG_SIZE +
170 S5P_FIMV_ENC_ACDCCOEF_SIZE;
171 ctx->bank2_size = (enc_ref_y_size * 2) +
172 (enc_ref_c_size * 4);
173 break;
174 case S5P_MFC_CODEC_H263_ENC:
175 ctx->bank1_size = (enc_ref_y_size * 2) +
176 S5P_FIMV_ENC_UPMV_SIZE +
177 S5P_FIMV_ENC_ACDCCOEF_SIZE;
178 ctx->bank2_size = (enc_ref_y_size * 2) +
179 (enc_ref_c_size * 4);
180 break;
181 default:
182 break;
183 }
184 /* Allocate only if memory from bank 1 is necessary */
185 if (ctx->bank1_size > 0) {
186 ctx->bank1_buf = vb2_dma_contig_memops.alloc(
187 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_size);
188 if (IS_ERR(ctx->bank1_buf)) {
189 ctx->bank1_buf = NULL;
190 printk(KERN_ERR
191 "Buf alloc for decoding failed (port A)\n");
192 return -ENOMEM;
193 }
194 ctx->bank1_phys = s5p_mfc_mem_cookie(
195 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_buf);
196 BUG_ON(ctx->bank1_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
197 }
198 /* Allocate only if memory from bank 2 is necessary */
199 if (ctx->bank2_size > 0) {
200 ctx->bank2_buf = vb2_dma_contig_memops.alloc(
201 dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_size);
202 if (IS_ERR(ctx->bank2_buf)) {
203 ctx->bank2_buf = NULL;
204 mfc_err("Buf alloc for decoding failed (port B)\n");
205 return -ENOMEM;
206 }
207 ctx->bank2_phys = s5p_mfc_mem_cookie(
208 dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_buf);
209 BUG_ON(ctx->bank2_phys & ((1 << MFC_BANK2_ALIGN_ORDER) - 1));
210 }
211 return 0;
212}
213
214/* Release buffers allocated for codec */
215void s5p_mfc_release_codec_buffers_v5(struct s5p_mfc_ctx *ctx)
216{
217 if (ctx->bank1_buf) {
218 vb2_dma_contig_memops.put(ctx->bank1_buf);
219 ctx->bank1_buf = NULL;
220 ctx->bank1_phys = 0;
221 ctx->bank1_size = 0;
222 }
223 if (ctx->bank2_buf) {
224 vb2_dma_contig_memops.put(ctx->bank2_buf);
225 ctx->bank2_buf = NULL;
226 ctx->bank2_phys = 0;
227 ctx->bank2_size = 0;
228 }
229}
230
231/* Allocate memory for instance data buffer */
232int s5p_mfc_alloc_instance_buffer_v5(struct s5p_mfc_ctx *ctx)
233{
234 struct s5p_mfc_dev *dev = ctx->dev;
235 struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv;
236
237 if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC ||
238 ctx->codec_mode == S5P_MFC_CODEC_H264_ENC)
239 ctx->ctx.size = buf_size->h264_ctx;
240 else
241 ctx->ctx.size = buf_size->non_h264_ctx;
242 ctx->ctx.alloc = vb2_dma_contig_memops.alloc(
243 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx.size);
244 if (IS_ERR(ctx->ctx.alloc)) {
245 mfc_err("Allocating context buffer failed\n");
246 ctx->ctx.alloc = NULL;
247 return -ENOMEM;
248 }
249 ctx->ctx.dma = s5p_mfc_mem_cookie(
250 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx.alloc);
251 BUG_ON(ctx->ctx.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
252 ctx->ctx.ofs = OFFSETA(ctx->ctx.dma);
253 ctx->ctx.virt = vb2_dma_contig_memops.vaddr(ctx->ctx.alloc);
254 if (!ctx->ctx.virt) {
255 mfc_err("Remapping instance buffer failed\n");
256 vb2_dma_contig_memops.put(ctx->ctx.alloc);
257 ctx->ctx.alloc = NULL;
258 ctx->ctx.ofs = 0;
259 ctx->ctx.dma = 0;
260 return -ENOMEM;
261 }
262 /* Zero content of the allocated memory */
263 memset(ctx->ctx.virt, 0, ctx->ctx.size);
264 wmb();
265
266 /* Initialize shared memory */
267 ctx->shm.alloc = vb2_dma_contig_memops.alloc(
268 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], buf_size->shm);
269 if (IS_ERR(ctx->shm.alloc)) {
270 mfc_err("failed to allocate shared memory\n");
271 return PTR_ERR(ctx->shm.alloc);
272 }
273 /* shared memory offset only keeps the offset from base (port a) */
274 ctx->shm.ofs = s5p_mfc_mem_cookie(
275 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->shm.alloc)
276 - dev->bank1;
277 BUG_ON(ctx->shm.ofs & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
278
279 ctx->shm.virt = vb2_dma_contig_memops.vaddr(ctx->shm.alloc);
280 if (!ctx->shm.virt) {
281 vb2_dma_contig_memops.put(ctx->shm.alloc);
282 ctx->shm.alloc = NULL;
283 ctx->shm.ofs = 0;
284 mfc_err("failed to virt addr of shared memory\n");
285 return -ENOMEM;
286 }
287 memset((void *)ctx->shm.virt, 0, buf_size->shm);
288 wmb();
289 return 0;
290}
291
292/* Release instance buffer */
293void s5p_mfc_release_instance_buffer_v5(struct s5p_mfc_ctx *ctx)
294{
295 if (ctx->ctx.alloc) {
296 vb2_dma_contig_memops.put(ctx->ctx.alloc);
297 ctx->ctx.alloc = NULL;
298 ctx->ctx.ofs = 0;
299 ctx->ctx.virt = NULL;
300 ctx->ctx.dma = 0;
301 }
302 if (ctx->shm.alloc) {
303 vb2_dma_contig_memops.put(ctx->shm.alloc);
304 ctx->shm.alloc = NULL;
305 ctx->shm.ofs = 0;
306 ctx->shm.virt = NULL;
307 }
308}
309
310int s5p_mfc_alloc_dev_context_buffer_v5(struct s5p_mfc_dev *dev)
311{
312 /* NOP */
313
314 return 0;
315}
316
317void s5p_mfc_release_dev_context_buffer_v5(struct s5p_mfc_dev *dev)
318{
319 /* NOP */
320}
321
322static void s5p_mfc_write_info_v5(struct s5p_mfc_ctx *ctx, unsigned int data,
323 unsigned int ofs)
324{
325 writel(data, (ctx->shm.virt + ofs));
326 wmb();
327}
328
329static unsigned int s5p_mfc_read_info_v5(struct s5p_mfc_ctx *ctx,
330 unsigned int ofs)
331{
332 rmb();
333 return readl(ctx->shm.virt + ofs);
334}
335
336void s5p_mfc_dec_calc_dpb_size_v5(struct s5p_mfc_ctx *ctx)
337{
338 unsigned int guard_width, guard_height;
339
340 ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN);
341 ctx->buf_height = ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
342 mfc_debug(2,
343 "SEQ Done: Movie dimensions %dx%d, buffer dimensions: %dx%d\n",
344 ctx->img_width, ctx->img_height, ctx->buf_width,
345 ctx->buf_height);
346
347 if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC) {
348 ctx->luma_size = ALIGN(ctx->buf_width * ctx->buf_height,
349 S5P_FIMV_DEC_BUF_ALIGN);
350 ctx->chroma_size = ALIGN(ctx->buf_width *
351 ALIGN((ctx->img_height >> 1),
352 S5P_FIMV_NV12MT_VALIGN),
353 S5P_FIMV_DEC_BUF_ALIGN);
354 ctx->mv_size = ALIGN(ctx->buf_width *
355 ALIGN((ctx->buf_height >> 2),
356 S5P_FIMV_NV12MT_VALIGN),
357 S5P_FIMV_DEC_BUF_ALIGN);
358 } else {
359 guard_width =
360 ALIGN(ctx->img_width + 24, S5P_FIMV_NV12MT_HALIGN);
361 guard_height =
362 ALIGN(ctx->img_height + 16, S5P_FIMV_NV12MT_VALIGN);
363 ctx->luma_size = ALIGN(guard_width * guard_height,
364 S5P_FIMV_DEC_BUF_ALIGN);
365
366 guard_width =
367 ALIGN(ctx->img_width + 16, S5P_FIMV_NV12MT_HALIGN);
368 guard_height =
369 ALIGN((ctx->img_height >> 1) + 4,
370 S5P_FIMV_NV12MT_VALIGN);
371 ctx->chroma_size = ALIGN(guard_width * guard_height,
372 S5P_FIMV_DEC_BUF_ALIGN);
373
374 ctx->mv_size = 0;
375 }
376}
377
378void s5p_mfc_enc_calc_src_size_v5(struct s5p_mfc_ctx *ctx)
379{
380 if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M) {
381 ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN);
382
383 ctx->luma_size = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN)
384 * ALIGN(ctx->img_height, S5P_FIMV_NV12M_LVALIGN);
385 ctx->chroma_size = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN)
386 * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12M_CVALIGN);
387
388 ctx->luma_size = ALIGN(ctx->luma_size, S5P_FIMV_NV12M_SALIGN);
389 ctx->chroma_size =
390 ALIGN(ctx->chroma_size, S5P_FIMV_NV12M_SALIGN);
391 } else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT) {
392 ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN);
393
394 ctx->luma_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
395 * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
396 ctx->chroma_size =
397 ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
398 * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12MT_VALIGN);
399
400 ctx->luma_size = ALIGN(ctx->luma_size, S5P_FIMV_NV12MT_SALIGN);
401 ctx->chroma_size =
402 ALIGN(ctx->chroma_size, S5P_FIMV_NV12MT_SALIGN);
403 }
404}
405
406/* Set registers for decoding temporary buffers */
407static void s5p_mfc_set_dec_desc_buffer(struct s5p_mfc_ctx *ctx)
408{
409 struct s5p_mfc_dev *dev = ctx->dev;
410 struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv;
411
412 mfc_write(dev, OFFSETA(ctx->dsc.dma), S5P_FIMV_SI_CH0_DESC_ADR);
413 mfc_write(dev, buf_size->dsc, S5P_FIMV_SI_CH0_DESC_SIZE);
414}
415
416/* Set registers for shared buffer */
417static void s5p_mfc_set_shared_buffer(struct s5p_mfc_ctx *ctx)
418{
419 struct s5p_mfc_dev *dev = ctx->dev;
420 mfc_write(dev, ctx->shm.ofs, S5P_FIMV_SI_CH0_HOST_WR_ADR);
421}
422
423/* Set registers for decoding stream buffer */
424int s5p_mfc_set_dec_stream_buffer_v5(struct s5p_mfc_ctx *ctx, int buf_addr,
425 unsigned int start_num_byte, unsigned int buf_size)
426{
427 struct s5p_mfc_dev *dev = ctx->dev;
428
429 mfc_write(dev, OFFSETA(buf_addr), S5P_FIMV_SI_CH0_SB_ST_ADR);
430 mfc_write(dev, ctx->dec_src_buf_size, S5P_FIMV_SI_CH0_CPB_SIZE);
431 mfc_write(dev, buf_size, S5P_FIMV_SI_CH0_SB_FRM_SIZE);
432 s5p_mfc_write_info_v5(ctx, start_num_byte, START_BYTE_NUM);
433 return 0;
434}
435
436/* Set decoding frame buffer */
437int s5p_mfc_set_dec_frame_buffer_v5(struct s5p_mfc_ctx *ctx)
438{
439 unsigned int frame_size, i;
440 unsigned int frame_size_ch, frame_size_mv;
441 struct s5p_mfc_dev *dev = ctx->dev;
442 unsigned int dpb;
443 size_t buf_addr1, buf_addr2;
444 int buf_size1, buf_size2;
445
446 buf_addr1 = ctx->bank1_phys;
447 buf_size1 = ctx->bank1_size;
448 buf_addr2 = ctx->bank2_phys;
449 buf_size2 = ctx->bank2_size;
450 dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) &
451 ~S5P_FIMV_DPB_COUNT_MASK;
452 mfc_write(dev, ctx->total_dpb_count | dpb,
453 S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
454 s5p_mfc_set_shared_buffer(ctx);
455 switch (ctx->codec_mode) {
456 case S5P_MFC_CODEC_H264_DEC:
457 mfc_write(dev, OFFSETA(buf_addr1),
458 S5P_FIMV_H264_VERT_NB_MV_ADR);
459 buf_addr1 += S5P_FIMV_DEC_VERT_NB_MV_SIZE;
460 buf_size1 -= S5P_FIMV_DEC_VERT_NB_MV_SIZE;
461 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_NB_IP_ADR);
462 buf_addr1 += S5P_FIMV_DEC_NB_IP_SIZE;
463 buf_size1 -= S5P_FIMV_DEC_NB_IP_SIZE;
464 break;
465 case S5P_MFC_CODEC_MPEG4_DEC:
466 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_NB_DCAC_ADR);
467 buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
468 buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
469 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_UP_NB_MV_ADR);
470 buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
471 buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
472 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SA_MV_ADR);
473 buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
474 buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
475 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SP_ADR);
476 buf_addr1 += S5P_FIMV_DEC_STX_PARSER_SIZE;
477 buf_size1 -= S5P_FIMV_DEC_STX_PARSER_SIZE;
478 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_OT_LINE_ADR);
479 buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
480 buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
481 break;
482 case S5P_MFC_CODEC_H263_DEC:
483 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_OT_LINE_ADR);
484 buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
485 buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
486 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_NB_MV_ADR);
487 buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
488 buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
489 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_SA_MV_ADR);
490 buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
491 buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
492 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_NB_DCAC_ADR);
493 buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
494 buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
495 break;
496 case S5P_MFC_CODEC_VC1_DEC:
497 case S5P_MFC_CODEC_VC1RCV_DEC:
498 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_NB_DCAC_ADR);
499 buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
500 buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
501 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_OT_LINE_ADR);
502 buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
503 buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
504 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_UP_NB_MV_ADR);
505 buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
506 buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
507 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_SA_MV_ADR);
508 buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
509 buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
510 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE3_ADR);
511 buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
512 buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
513 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE2_ADR);
514 buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
515 buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
516 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE1_ADR);
517 buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
518 buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
519 break;
520 case S5P_MFC_CODEC_MPEG2_DEC:
521 break;
522 default:
523 mfc_err("Unknown codec for decoding (%x)\n",
524 ctx->codec_mode);
525 return -EINVAL;
526 break;
527 }
528 frame_size = ctx->luma_size;
529 frame_size_ch = ctx->chroma_size;
530 frame_size_mv = ctx->mv_size;
531 mfc_debug(2, "Frm size: %d ch: %d mv: %d\n", frame_size, frame_size_ch,
532 frame_size_mv);
533 for (i = 0; i < ctx->total_dpb_count; i++) {
534 /* Bank2 */
535 mfc_debug(2, "Luma %d: %x\n", i,
536 ctx->dst_bufs[i].cookie.raw.luma);
537 mfc_write(dev, OFFSETB(ctx->dst_bufs[i].cookie.raw.luma),
538 S5P_FIMV_DEC_LUMA_ADR + i * 4);
539 mfc_debug(2, "\tChroma %d: %x\n", i,
540 ctx->dst_bufs[i].cookie.raw.chroma);
541 mfc_write(dev, OFFSETA(ctx->dst_bufs[i].cookie.raw.chroma),
542 S5P_FIMV_DEC_CHROMA_ADR + i * 4);
543 if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC) {
544 mfc_debug(2, "\tBuf2: %x, size: %d\n",
545 buf_addr2, buf_size2);
546 mfc_write(dev, OFFSETB(buf_addr2),
547 S5P_FIMV_H264_MV_ADR + i * 4);
548 buf_addr2 += frame_size_mv;
549 buf_size2 -= frame_size_mv;
550 }
551 }
552 mfc_debug(2, "Buf1: %u, buf_size1: %d\n", buf_addr1, buf_size1);
553 mfc_debug(2, "Buf 1/2 size after: %d/%d (frames %d)\n",
554 buf_size1, buf_size2, ctx->total_dpb_count);
555 if (buf_size1 < 0 || buf_size2 < 0) {
556 mfc_debug(2, "Not enough memory has been allocated\n");
557 return -ENOMEM;
558 }
559 s5p_mfc_write_info_v5(ctx, frame_size, ALLOC_LUMA_DPB_SIZE);
560 s5p_mfc_write_info_v5(ctx, frame_size_ch, ALLOC_CHROMA_DPB_SIZE);
561 if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC)
562 s5p_mfc_write_info_v5(ctx, frame_size_mv, ALLOC_MV_SIZE);
563 mfc_write(dev, ((S5P_FIMV_CH_INIT_BUFS & S5P_FIMV_CH_MASK)
564 << S5P_FIMV_CH_SHIFT) | (ctx->inst_no),
565 S5P_FIMV_SI_CH0_INST_ID);
566 return 0;
567}
568
569/* Set registers for encoding stream buffer */
570int s5p_mfc_set_enc_stream_buffer_v5(struct s5p_mfc_ctx *ctx,
571 unsigned long addr, unsigned int size)
572{
573 struct s5p_mfc_dev *dev = ctx->dev;
574
575 mfc_write(dev, OFFSETA(addr), S5P_FIMV_ENC_SI_CH0_SB_ADR);
576 mfc_write(dev, size, S5P_FIMV_ENC_SI_CH0_SB_SIZE);
577 return 0;
578}
579
580void s5p_mfc_set_enc_frame_buffer_v5(struct s5p_mfc_ctx *ctx,
581 unsigned long y_addr, unsigned long c_addr)
582{
583 struct s5p_mfc_dev *dev = ctx->dev;
584
585 mfc_write(dev, OFFSETB(y_addr), S5P_FIMV_ENC_SI_CH0_CUR_Y_ADR);
586 mfc_write(dev, OFFSETB(c_addr), S5P_FIMV_ENC_SI_CH0_CUR_C_ADR);
587}
588
589void s5p_mfc_get_enc_frame_buffer_v5(struct s5p_mfc_ctx *ctx,
590 unsigned long *y_addr, unsigned long *c_addr)
591{
592 struct s5p_mfc_dev *dev = ctx->dev;
593
594 *y_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_Y_ADDR)
595 << MFC_OFFSET_SHIFT);
596 *c_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_C_ADDR)
597 << MFC_OFFSET_SHIFT);
598}
599
600/* Set encoding ref & codec buffer */
601int s5p_mfc_set_enc_ref_buffer_v5(struct s5p_mfc_ctx *ctx)
602{
603 struct s5p_mfc_dev *dev = ctx->dev;
604 size_t buf_addr1, buf_addr2;
605 size_t buf_size1, buf_size2;
606 unsigned int enc_ref_y_size, enc_ref_c_size;
607 unsigned int guard_width, guard_height;
608 int i;
609
610 buf_addr1 = ctx->bank1_phys;
611 buf_size1 = ctx->bank1_size;
612 buf_addr2 = ctx->bank2_phys;
613 buf_size2 = ctx->bank2_size;
614 enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
615 * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
616 enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN);
617 if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC) {
618 enc_ref_c_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
619 * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12MT_VALIGN);
620 enc_ref_c_size = ALIGN(enc_ref_c_size, S5P_FIMV_NV12MT_SALIGN);
621 } else {
622 guard_width = ALIGN(ctx->img_width + 16,
623 S5P_FIMV_NV12MT_HALIGN);
624 guard_height = ALIGN((ctx->img_height >> 1) + 4,
625 S5P_FIMV_NV12MT_VALIGN);
626 enc_ref_c_size = ALIGN(guard_width * guard_height,
627 S5P_FIMV_NV12MT_SALIGN);
628 }
629 mfc_debug(2, "buf_size1: %d, buf_size2: %d\n", buf_size1, buf_size2);
630 switch (ctx->codec_mode) {
631 case S5P_MFC_CODEC_H264_ENC:
632 for (i = 0; i < 2; i++) {
633 mfc_write(dev, OFFSETA(buf_addr1),
634 S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
635 buf_addr1 += enc_ref_y_size;
636 buf_size1 -= enc_ref_y_size;
637
638 mfc_write(dev, OFFSETB(buf_addr2),
639 S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
640 buf_addr2 += enc_ref_y_size;
641 buf_size2 -= enc_ref_y_size;
642 }
643 for (i = 0; i < 4; i++) {
644 mfc_write(dev, OFFSETB(buf_addr2),
645 S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
646 buf_addr2 += enc_ref_c_size;
647 buf_size2 -= enc_ref_c_size;
648 }
649 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_UP_MV_ADR);
650 buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
651 buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
652 mfc_write(dev, OFFSETA(buf_addr1),
653 S5P_FIMV_H264_COZERO_FLAG_ADR);
654 buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE;
655 buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE;
656 mfc_write(dev, OFFSETA(buf_addr1),
657 S5P_FIMV_H264_UP_INTRA_MD_ADR);
658 buf_addr1 += S5P_FIMV_ENC_INTRAMD_SIZE;
659 buf_size1 -= S5P_FIMV_ENC_INTRAMD_SIZE;
660 mfc_write(dev, OFFSETB(buf_addr2),
661 S5P_FIMV_H264_UP_INTRA_PRED_ADR);
662 buf_addr2 += S5P_FIMV_ENC_INTRAPRED_SIZE;
663 buf_size2 -= S5P_FIMV_ENC_INTRAPRED_SIZE;
664 mfc_write(dev, OFFSETA(buf_addr1),
665 S5P_FIMV_H264_NBOR_INFO_ADR);
666 buf_addr1 += S5P_FIMV_ENC_NBORINFO_SIZE;
667 buf_size1 -= S5P_FIMV_ENC_NBORINFO_SIZE;
668 mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
669 buf_size1, buf_size2);
670 break;
671 case S5P_MFC_CODEC_MPEG4_ENC:
672 for (i = 0; i < 2; i++) {
673 mfc_write(dev, OFFSETA(buf_addr1),
674 S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
675 buf_addr1 += enc_ref_y_size;
676 buf_size1 -= enc_ref_y_size;
677 mfc_write(dev, OFFSETB(buf_addr2),
678 S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
679 buf_addr2 += enc_ref_y_size;
680 buf_size2 -= enc_ref_y_size;
681 }
682 for (i = 0; i < 4; i++) {
683 mfc_write(dev, OFFSETB(buf_addr2),
684 S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
685 buf_addr2 += enc_ref_c_size;
686 buf_size2 -= enc_ref_c_size;
687 }
688 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_UP_MV_ADR);
689 buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
690 buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
691 mfc_write(dev, OFFSETA(buf_addr1),
692 S5P_FIMV_MPEG4_COZERO_FLAG_ADR);
693 buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE;
694 buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE;
695 mfc_write(dev, OFFSETA(buf_addr1),
696 S5P_FIMV_MPEG4_ACDC_COEF_ADR);
697 buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE;
698 buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE;
699 mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
700 buf_size1, buf_size2);
701 break;
702 case S5P_MFC_CODEC_H263_ENC:
703 for (i = 0; i < 2; i++) {
704 mfc_write(dev, OFFSETA(buf_addr1),
705 S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
706 buf_addr1 += enc_ref_y_size;
707 buf_size1 -= enc_ref_y_size;
708 mfc_write(dev, OFFSETB(buf_addr2),
709 S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
710 buf_addr2 += enc_ref_y_size;
711 buf_size2 -= enc_ref_y_size;
712 }
713 for (i = 0; i < 4; i++) {
714 mfc_write(dev, OFFSETB(buf_addr2),
715 S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
716 buf_addr2 += enc_ref_c_size;
717 buf_size2 -= enc_ref_c_size;
718 }
719 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_MV_ADR);
720 buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
721 buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
722 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_ACDC_COEF_ADR);
723 buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE;
724 buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE;
725 mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
726 buf_size1, buf_size2);
727 break;
728 default:
729 mfc_err("Unknown codec set for encoding: %d\n",
730 ctx->codec_mode);
731 return -EINVAL;
732 }
733 return 0;
734}
735
736static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
737{
738 struct s5p_mfc_dev *dev = ctx->dev;
739 struct s5p_mfc_enc_params *p = &ctx->enc_params;
740 unsigned int reg;
741 unsigned int shm;
742
743 /* width */
744 mfc_write(dev, ctx->img_width, S5P_FIMV_ENC_HSIZE_PX);
745 /* height */
746 mfc_write(dev, ctx->img_height, S5P_FIMV_ENC_VSIZE_PX);
747 /* pictype : enable, IDR period */
748 reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
749 reg |= (1 << 18);
750 reg &= ~(0xFFFF);
751 reg |= p->gop_size;
752 mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
753 mfc_write(dev, 0, S5P_FIMV_ENC_B_RECON_WRITE_ON);
754 /* multi-slice control */
755 /* multi-slice MB number or bit size */
756 mfc_write(dev, p->slice_mode, S5P_FIMV_ENC_MSLICE_CTRL);
757 if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) {
758 mfc_write(dev, p->slice_mb, S5P_FIMV_ENC_MSLICE_MB);
759 } else if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) {
760 mfc_write(dev, p->slice_bit, S5P_FIMV_ENC_MSLICE_BIT);
761 } else {
762 mfc_write(dev, 0, S5P_FIMV_ENC_MSLICE_MB);
763 mfc_write(dev, 0, S5P_FIMV_ENC_MSLICE_BIT);
764 }
765 /* cyclic intra refresh */
766 mfc_write(dev, p->intra_refresh_mb, S5P_FIMV_ENC_CIR_CTRL);
767 /* memory structure cur. frame */
768 if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M)
769 mfc_write(dev, 0, S5P_FIMV_ENC_MAP_FOR_CUR);
770 else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT)
771 mfc_write(dev, 3, S5P_FIMV_ENC_MAP_FOR_CUR);
772 /* padding control & value */
773 reg = mfc_read(dev, S5P_FIMV_ENC_PADDING_CTRL);
774 if (p->pad) {
775 /** enable */
776 reg |= (1 << 31);
777 /** cr value */
778 reg &= ~(0xFF << 16);
779 reg |= (p->pad_cr << 16);
780 /** cb value */
781 reg &= ~(0xFF << 8);
782 reg |= (p->pad_cb << 8);
783 /** y value */
784 reg &= ~(0xFF);
785 reg |= (p->pad_luma);
786 } else {
787 /** disable & all value clear */
788 reg = 0;
789 }
790 mfc_write(dev, reg, S5P_FIMV_ENC_PADDING_CTRL);
791 /* rate control config. */
792 reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
793 /** frame-level rate control */
794 reg &= ~(0x1 << 9);
795 reg |= (p->rc_frame << 9);
796 mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
797 /* bit rate */
798 if (p->rc_frame)
799 mfc_write(dev, p->rc_bitrate,
800 S5P_FIMV_ENC_RC_BIT_RATE);
801 else
802 mfc_write(dev, 0, S5P_FIMV_ENC_RC_BIT_RATE);
803 /* reaction coefficient */
804 if (p->rc_frame)
805 mfc_write(dev, p->rc_reaction_coeff, S5P_FIMV_ENC_RC_RPARA);
806 shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL);
807 /* seq header ctrl */
808 shm &= ~(0x1 << 3);
809 shm |= (p->seq_hdr_mode << 3);
810 /* frame skip mode */
811 shm &= ~(0x3 << 1);
812 shm |= (p->frame_skip_mode << 1);
813 s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL);
814 /* fixed target bit */
815 s5p_mfc_write_info_v5(ctx, p->fixed_target_bit, RC_CONTROL_CONFIG);
816 return 0;
817}
818
819static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
820{
821 struct s5p_mfc_dev *dev = ctx->dev;
822 struct s5p_mfc_enc_params *p = &ctx->enc_params;
823 struct s5p_mfc_h264_enc_params *p_264 = &p->codec.h264;
824 unsigned int reg;
825 unsigned int shm;
826
827 s5p_mfc_set_enc_params(ctx);
828 /* pictype : number of B */
829 reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
830 /* num_b_frame - 0 ~ 2 */
831 reg &= ~(0x3 << 16);
832 reg |= (p->num_b_frame << 16);
833 mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
834 /* profile & level */
835 reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE);
836 /* level */
837 reg &= ~(0xFF << 8);
838 reg |= (p_264->level << 8);
839 /* profile - 0 ~ 2 */
840 reg &= ~(0x3F);
841 reg |= p_264->profile;
842 mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE);
843 /* interlace */
844 mfc_write(dev, p_264->interlace, S5P_FIMV_ENC_PIC_STRUCT);
845 /* height */
846 if (p_264->interlace)
847 mfc_write(dev, ctx->img_height >> 1, S5P_FIMV_ENC_VSIZE_PX);
848 /* loopfilter ctrl */
849 mfc_write(dev, p_264->loop_filter_mode, S5P_FIMV_ENC_LF_CTRL);
850 /* loopfilter alpha offset */
851 if (p_264->loop_filter_alpha < 0) {
852 reg = 0x10;
853 reg |= (0xFF - p_264->loop_filter_alpha) + 1;
854 } else {
855 reg = 0x00;
856 reg |= (p_264->loop_filter_alpha & 0xF);
857 }
858 mfc_write(dev, reg, S5P_FIMV_ENC_ALPHA_OFF);
859 /* loopfilter beta offset */
860 if (p_264->loop_filter_beta < 0) {
861 reg = 0x10;
862 reg |= (0xFF - p_264->loop_filter_beta) + 1;
863 } else {
864 reg = 0x00;
865 reg |= (p_264->loop_filter_beta & 0xF);
866 }
867 mfc_write(dev, reg, S5P_FIMV_ENC_BETA_OFF);
868 /* entropy coding mode */
869 if (p_264->entropy_mode == V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC)
870 mfc_write(dev, 1, S5P_FIMV_ENC_H264_ENTROPY_MODE);
871 else
872 mfc_write(dev, 0, S5P_FIMV_ENC_H264_ENTROPY_MODE);
873 /* number of ref. picture */
874 reg = mfc_read(dev, S5P_FIMV_ENC_H264_NUM_OF_REF);
875 /* num of ref. pictures of P */
876 reg &= ~(0x3 << 5);
877 reg |= (p_264->num_ref_pic_4p << 5);
878 /* max number of ref. pictures */
879 reg &= ~(0x1F);
880 reg |= p_264->max_ref_pic;
881 mfc_write(dev, reg, S5P_FIMV_ENC_H264_NUM_OF_REF);
882 /* 8x8 transform enable */
883 mfc_write(dev, p_264->_8x8_transform, S5P_FIMV_ENC_H264_TRANS_FLAG);
884 /* rate control config. */
885 reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
886 /* macroblock level rate control */
887 reg &= ~(0x1 << 8);
888 reg |= (p->rc_mb << 8);
889 /* frame QP */
890 reg &= ~(0x3F);
891 reg |= p_264->rc_frame_qp;
892 mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
893 /* frame rate */
894 if (p->rc_frame && p->rc_framerate_denom)
895 mfc_write(dev, p->rc_framerate_num * 1000
896 / p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE);
897 else
898 mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
899 /* max & min value of QP */
900 reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
901 /* max QP */
902 reg &= ~(0x3F << 8);
903 reg |= (p_264->rc_max_qp << 8);
904 /* min QP */
905 reg &= ~(0x3F);
906 reg |= p_264->rc_min_qp;
907 mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
908 /* macroblock adaptive scaling features */
909 if (p->rc_mb) {
910 reg = mfc_read(dev, S5P_FIMV_ENC_RC_MB_CTRL);
911 /* dark region */
912 reg &= ~(0x1 << 3);
913 reg |= (p_264->rc_mb_dark << 3);
914 /* smooth region */
915 reg &= ~(0x1 << 2);
916 reg |= (p_264->rc_mb_smooth << 2);
917 /* static region */
918 reg &= ~(0x1 << 1);
919 reg |= (p_264->rc_mb_static << 1);
920 /* high activity region */
921 reg &= ~(0x1);
922 reg |= p_264->rc_mb_activity;
923 mfc_write(dev, reg, S5P_FIMV_ENC_RC_MB_CTRL);
924 }
925 if (!p->rc_frame && !p->rc_mb) {
926 shm = s5p_mfc_read_info_v5(ctx, P_B_FRAME_QP);
927 shm &= ~(0xFFF);
928 shm |= ((p_264->rc_b_frame_qp & 0x3F) << 6);
929 shm |= (p_264->rc_p_frame_qp & 0x3F);
930 s5p_mfc_write_info_v5(ctx, shm, P_B_FRAME_QP);
931 }
932 /* extended encoder ctrl */
933 shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL);
934 /* AR VUI control */
935 shm &= ~(0x1 << 15);
936 shm |= (p_264->vui_sar << 1);
937 s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL);
938 if (p_264->vui_sar) {
939 /* aspect ration IDC */
940 shm = s5p_mfc_read_info_v5(ctx, SAMPLE_ASPECT_RATIO_IDC);
941 shm &= ~(0xFF);
942 shm |= p_264->vui_sar_idc;
943 s5p_mfc_write_info_v5(ctx, shm, SAMPLE_ASPECT_RATIO_IDC);
944 if (p_264->vui_sar_idc == 0xFF) {
945 /* sample AR info */
946 shm = s5p_mfc_read_info_v5(ctx, EXTENDED_SAR);
947 shm &= ~(0xFFFFFFFF);
948 shm |= p_264->vui_ext_sar_width << 16;
949 shm |= p_264->vui_ext_sar_height;
950 s5p_mfc_write_info_v5(ctx, shm, EXTENDED_SAR);
951 }
952 }
953 /* intra picture period for H.264 */
954 shm = s5p_mfc_read_info_v5(ctx, H264_I_PERIOD);
955 /* control */
956 shm &= ~(0x1 << 16);
957 shm |= (p_264->open_gop << 16);
958 /* value */
959 if (p_264->open_gop) {
960 shm &= ~(0xFFFF);
961 shm |= p_264->open_gop_size;
962 }
963 s5p_mfc_write_info_v5(ctx, shm, H264_I_PERIOD);
964 /* extended encoder ctrl */
965 shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL);
966 /* vbv buffer size */
967 if (p->frame_skip_mode ==
968 V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
969 shm &= ~(0xFFFF << 16);
970 shm |= (p_264->cpb_size << 16);
971 }
972 s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL);
973 return 0;
974}
975
976static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx)
977{
978 struct s5p_mfc_dev *dev = ctx->dev;
979 struct s5p_mfc_enc_params *p = &ctx->enc_params;
980 struct s5p_mfc_mpeg4_enc_params *p_mpeg4 = &p->codec.mpeg4;
981 unsigned int reg;
982 unsigned int shm;
983 unsigned int framerate;
984
985 s5p_mfc_set_enc_params(ctx);
986 /* pictype : number of B */
987 reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
988 /* num_b_frame - 0 ~ 2 */
989 reg &= ~(0x3 << 16);
990 reg |= (p->num_b_frame << 16);
991 mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
992 /* profile & level */
993 reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE);
994 /* level */
995 reg &= ~(0xFF << 8);
996 reg |= (p_mpeg4->level << 8);
997 /* profile - 0 ~ 2 */
998 reg &= ~(0x3F);
999 reg |= p_mpeg4->profile;
1000 mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE);
1001 /* quarter_pixel */
1002 mfc_write(dev, p_mpeg4->quarter_pixel, S5P_FIMV_ENC_MPEG4_QUART_PXL);
1003 /* qp */
1004 if (!p->rc_frame) {
1005 shm = s5p_mfc_read_info_v5(ctx, P_B_FRAME_QP);
1006 shm &= ~(0xFFF);
1007 shm |= ((p_mpeg4->rc_b_frame_qp & 0x3F) << 6);
1008 shm |= (p_mpeg4->rc_p_frame_qp & 0x3F);
1009 s5p_mfc_write_info_v5(ctx, shm, P_B_FRAME_QP);
1010 }
1011 /* frame rate */
1012 if (p->rc_frame) {
1013 if (p->rc_framerate_denom > 0) {
1014 framerate = p->rc_framerate_num * 1000 /
1015 p->rc_framerate_denom;
1016 mfc_write(dev, framerate,
1017 S5P_FIMV_ENC_RC_FRAME_RATE);
1018 shm = s5p_mfc_read_info_v5(ctx, RC_VOP_TIMING);
1019 shm &= ~(0xFFFFFFFF);
1020 shm |= (1 << 31);
1021 shm |= ((p->rc_framerate_num & 0x7FFF) << 16);
1022 shm |= (p->rc_framerate_denom & 0xFFFF);
1023 s5p_mfc_write_info_v5(ctx, shm, RC_VOP_TIMING);
1024 }
1025 } else {
1026 mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
1027 }
1028 /* rate control config. */
1029 reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
1030 /* frame QP */
1031 reg &= ~(0x3F);
1032 reg |= p_mpeg4->rc_frame_qp;
1033 mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
1034 /* max & min value of QP */
1035 reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
1036 /* max QP */
1037 reg &= ~(0x3F << 8);
1038 reg |= (p_mpeg4->rc_max_qp << 8);
1039 /* min QP */
1040 reg &= ~(0x3F);
1041 reg |= p_mpeg4->rc_min_qp;
1042 mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
1043 /* extended encoder ctrl */
1044 shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL);
1045 /* vbv buffer size */
1046 if (p->frame_skip_mode ==
1047 V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
1048 shm &= ~(0xFFFF << 16);
1049 shm |= (p->vbv_size << 16);
1050 }
1051 s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL);
1052 return 0;
1053}
1054
1055static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx)
1056{
1057 struct s5p_mfc_dev *dev = ctx->dev;
1058 struct s5p_mfc_enc_params *p = &ctx->enc_params;
1059 struct s5p_mfc_mpeg4_enc_params *p_h263 = &p->codec.mpeg4;
1060 unsigned int reg;
1061 unsigned int shm;
1062
1063 s5p_mfc_set_enc_params(ctx);
1064 /* qp */
1065 if (!p->rc_frame) {
1066 shm = s5p_mfc_read_info_v5(ctx, P_B_FRAME_QP);
1067 shm &= ~(0xFFF);
1068 shm |= (p_h263->rc_p_frame_qp & 0x3F);
1069 s5p_mfc_write_info_v5(ctx, shm, P_B_FRAME_QP);
1070 }
1071 /* frame rate */
1072 if (p->rc_frame && p->rc_framerate_denom)
1073 mfc_write(dev, p->rc_framerate_num * 1000
1074 / p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE);
1075 else
1076 mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
1077 /* rate control config. */
1078 reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
1079 /* frame QP */
1080 reg &= ~(0x3F);
1081 reg |= p_h263->rc_frame_qp;
1082 mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
1083 /* max & min value of QP */
1084 reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
1085 /* max QP */
1086 reg &= ~(0x3F << 8);
1087 reg |= (p_h263->rc_max_qp << 8);
1088 /* min QP */
1089 reg &= ~(0x3F);
1090 reg |= p_h263->rc_min_qp;
1091 mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
1092 /* extended encoder ctrl */
1093 shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL);
1094 /* vbv buffer size */
1095 if (p->frame_skip_mode ==
1096 V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
1097 shm &= ~(0xFFFF << 16);
1098 shm |= (p->vbv_size << 16);
1099 }
1100 s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL);
1101 return 0;
1102}
1103
1104/* Initialize decoding */
1105int s5p_mfc_init_decode_v5(struct s5p_mfc_ctx *ctx)
1106{
1107 struct s5p_mfc_dev *dev = ctx->dev;
1108
1109 s5p_mfc_set_shared_buffer(ctx);
1110 /* Setup loop filter, for decoding this is only valid for MPEG4 */
1111 if (ctx->codec_mode == S5P_MFC_CODEC_MPEG4_DEC)
1112 mfc_write(dev, ctx->loop_filter_mpeg4, S5P_FIMV_ENC_LF_CTRL);
1113 else
1114 mfc_write(dev, 0, S5P_FIMV_ENC_LF_CTRL);
1115 mfc_write(dev, ((ctx->slice_interface & S5P_FIMV_SLICE_INT_MASK) <<
1116 S5P_FIMV_SLICE_INT_SHIFT) | (ctx->display_delay_enable <<
1117 S5P_FIMV_DDELAY_ENA_SHIFT) | ((ctx->display_delay &
1118 S5P_FIMV_DDELAY_VAL_MASK) << S5P_FIMV_DDELAY_VAL_SHIFT),
1119 S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
1120 mfc_write(dev,
1121 ((S5P_FIMV_CH_SEQ_HEADER & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT)
1122 | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
1123 return 0;
1124}
1125
1126static void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush)
1127{
1128 struct s5p_mfc_dev *dev = ctx->dev;
1129 unsigned int dpb;
1130
1131 if (flush)
1132 dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) | (
1133 S5P_FIMV_DPB_FLUSH_MASK << S5P_FIMV_DPB_FLUSH_SHIFT);
1134 else
1135 dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) &
1136 ~(S5P_FIMV_DPB_FLUSH_MASK << S5P_FIMV_DPB_FLUSH_SHIFT);
1137 mfc_write(dev, dpb, S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
1138}
1139
1140/* Decode a single frame */
1141int s5p_mfc_decode_one_frame_v5(struct s5p_mfc_ctx *ctx,
1142 enum s5p_mfc_decode_arg last_frame)
1143{
1144 struct s5p_mfc_dev *dev = ctx->dev;
1145
1146 mfc_write(dev, ctx->dec_dst_flag, S5P_FIMV_SI_CH0_RELEASE_BUF);
1147 s5p_mfc_set_shared_buffer(ctx);
1148 s5p_mfc_set_flush(ctx, ctx->dpb_flush_flag);
1149 /* Issue different commands to instance basing on whether it
1150 * is the last frame or not. */
1151 switch (last_frame) {
1152 case MFC_DEC_FRAME:
1153 mfc_write(dev, ((S5P_FIMV_CH_FRAME_START & S5P_FIMV_CH_MASK) <<
1154 S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
1155 break;
1156 case MFC_DEC_LAST_FRAME:
1157 mfc_write(dev, ((S5P_FIMV_CH_LAST_FRAME & S5P_FIMV_CH_MASK) <<
1158 S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
1159 break;
1160 case MFC_DEC_RES_CHANGE:
1161 mfc_write(dev, ((S5P_FIMV_CH_FRAME_START_REALLOC &
1162 S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT) | (ctx->inst_no),
1163 S5P_FIMV_SI_CH0_INST_ID);
1164 break;
1165 }
1166 mfc_debug(2, "Decoding a usual frame\n");
1167 return 0;
1168}
1169
1170int s5p_mfc_init_encode_v5(struct s5p_mfc_ctx *ctx)
1171{
1172 struct s5p_mfc_dev *dev = ctx->dev;
1173
1174 if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC)
1175 s5p_mfc_set_enc_params_h264(ctx);
1176 else if (ctx->codec_mode == S5P_MFC_CODEC_MPEG4_ENC)
1177 s5p_mfc_set_enc_params_mpeg4(ctx);
1178 else if (ctx->codec_mode == S5P_MFC_CODEC_H263_ENC)
1179 s5p_mfc_set_enc_params_h263(ctx);
1180 else {
1181 mfc_err("Unknown codec for encoding (%x)\n",
1182 ctx->codec_mode);
1183 return -EINVAL;
1184 }
1185 s5p_mfc_set_shared_buffer(ctx);
1186 mfc_write(dev, ((S5P_FIMV_CH_SEQ_HEADER << 16) & 0x70000) |
1187 (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
1188 return 0;
1189}
1190
1191/* Encode a single frame */
1192int s5p_mfc_encode_one_frame_v5(struct s5p_mfc_ctx *ctx)
1193{
1194 struct s5p_mfc_dev *dev = ctx->dev;
1195 int cmd;
1196 /* memory structure cur. frame */
1197 if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M)
1198 mfc_write(dev, 0, S5P_FIMV_ENC_MAP_FOR_CUR);
1199 else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT)
1200 mfc_write(dev, 3, S5P_FIMV_ENC_MAP_FOR_CUR);
1201 s5p_mfc_set_shared_buffer(ctx);
1202
1203 if (ctx->state == MFCINST_FINISHING)
1204 cmd = S5P_FIMV_CH_LAST_FRAME;
1205 else
1206 cmd = S5P_FIMV_CH_FRAME_START;
1207 mfc_write(dev, ((cmd & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT)
1208 | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
1209
1210 return 0;
1211}
1212
1213static int s5p_mfc_get_new_ctx(struct s5p_mfc_dev *dev)
1214{
1215 unsigned long flags;
1216 int new_ctx;
1217 int cnt;
1218
1219 spin_lock_irqsave(&dev->condlock, flags);
1220 new_ctx = (dev->curr_ctx + 1) % MFC_NUM_CONTEXTS;
1221 cnt = 0;
1222 while (!test_bit(new_ctx, &dev->ctx_work_bits)) {
1223 new_ctx = (new_ctx + 1) % MFC_NUM_CONTEXTS;
1224 if (++cnt > MFC_NUM_CONTEXTS) {
1225 /* No contexts to run */
1226 spin_unlock_irqrestore(&dev->condlock, flags);
1227 return -EAGAIN;
1228 }
1229 }
1230 spin_unlock_irqrestore(&dev->condlock, flags);
1231 return new_ctx;
1232}
1233
1234static void s5p_mfc_run_res_change(struct s5p_mfc_ctx *ctx)
1235{
1236 struct s5p_mfc_dev *dev = ctx->dev;
1237
1238 s5p_mfc_set_dec_stream_buffer_v5(ctx, 0, 0, 0);
1239 dev->curr_ctx = ctx->num;
1240 s5p_mfc_clean_ctx_int_flags(ctx);
1241 s5p_mfc_decode_one_frame_v5(ctx, MFC_DEC_RES_CHANGE);
1242}
1243
1244static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame)
1245{
1246 struct s5p_mfc_dev *dev = ctx->dev;
1247 struct s5p_mfc_buf *temp_vb;
1248 unsigned long flags;
1249 unsigned int index;
1250
1251 spin_lock_irqsave(&dev->irqlock, flags);
1252 /* Frames are being decoded */
1253 if (list_empty(&ctx->src_queue)) {
1254 mfc_debug(2, "No src buffers\n");
1255 spin_unlock_irqrestore(&dev->irqlock, flags);
1256 return -EAGAIN;
1257 }
1258 /* Get the next source buffer */
1259 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1260 temp_vb->flags |= MFC_BUF_FLAG_USED;
1261 s5p_mfc_set_dec_stream_buffer_v5(ctx,
1262 vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
1263 ctx->consumed_stream, temp_vb->b->v4l2_planes[0].bytesused);
1264 spin_unlock_irqrestore(&dev->irqlock, flags);
1265 index = temp_vb->b->v4l2_buf.index;
1266 dev->curr_ctx = ctx->num;
1267 s5p_mfc_clean_ctx_int_flags(ctx);
1268 if (temp_vb->b->v4l2_planes[0].bytesused == 0) {
1269 last_frame = MFC_DEC_LAST_FRAME;
1270 mfc_debug(2, "Setting ctx->state to FINISHING\n");
1271 ctx->state = MFCINST_FINISHING;
1272 }
1273 s5p_mfc_decode_one_frame_v5(ctx, last_frame);
1274 return 0;
1275}
1276
1277static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
1278{
1279 struct s5p_mfc_dev *dev = ctx->dev;
1280 unsigned long flags;
1281 struct s5p_mfc_buf *dst_mb;
1282 struct s5p_mfc_buf *src_mb;
1283 unsigned long src_y_addr, src_c_addr, dst_addr;
1284 unsigned int dst_size;
1285
1286 spin_lock_irqsave(&dev->irqlock, flags);
1287 if (list_empty(&ctx->src_queue) && ctx->state != MFCINST_FINISHING) {
1288 mfc_debug(2, "no src buffers\n");
1289 spin_unlock_irqrestore(&dev->irqlock, flags);
1290 return -EAGAIN;
1291 }
1292 if (list_empty(&ctx->dst_queue)) {
1293 mfc_debug(2, "no dst buffers\n");
1294 spin_unlock_irqrestore(&dev->irqlock, flags);
1295 return -EAGAIN;
1296 }
1297 if (list_empty(&ctx->src_queue)) {
1298 /* send null frame */
1299 s5p_mfc_set_enc_frame_buffer_v5(ctx, dev->bank2, dev->bank2);
1300 src_mb = NULL;
1301 } else {
1302 src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
1303 list);
1304 src_mb->flags |= MFC_BUF_FLAG_USED;
1305 if (src_mb->b->v4l2_planes[0].bytesused == 0) {
1306 /* send null frame */
1307 s5p_mfc_set_enc_frame_buffer_v5(ctx, dev->bank2,
1308 dev->bank2);
1309 ctx->state = MFCINST_FINISHING;
1310 } else {
1311 src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b,
1312 0);
1313 src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b,
1314 1);
1315 s5p_mfc_set_enc_frame_buffer_v5(ctx, src_y_addr,
1316 src_c_addr);
1317 if (src_mb->flags & MFC_BUF_FLAG_EOS)
1318 ctx->state = MFCINST_FINISHING;
1319 }
1320 }
1321 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
1322 dst_mb->flags |= MFC_BUF_FLAG_USED;
1323 dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
1324 dst_size = vb2_plane_size(dst_mb->b, 0);
1325 s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size);
1326 spin_unlock_irqrestore(&dev->irqlock, flags);
1327 dev->curr_ctx = ctx->num;
1328 s5p_mfc_clean_ctx_int_flags(ctx);
1329 mfc_debug(2, "encoding buffer with index=%d state=%d",
1330 src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state);
1331 s5p_mfc_encode_one_frame_v5(ctx);
1332 return 0;
1333}
1334
1335static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx)
1336{
1337 struct s5p_mfc_dev *dev = ctx->dev;
1338 unsigned long flags;
1339 struct s5p_mfc_buf *temp_vb;
1340
1341 /* Initializing decoding - parsing header */
1342 spin_lock_irqsave(&dev->irqlock, flags);
1343 mfc_debug(2, "Preparing to init decoding\n");
1344 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1345 s5p_mfc_set_dec_desc_buffer(ctx);
1346 mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
1347 s5p_mfc_set_dec_stream_buffer_v5(ctx,
1348 vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
1349 0, temp_vb->b->v4l2_planes[0].bytesused);
1350 spin_unlock_irqrestore(&dev->irqlock, flags);
1351 dev->curr_ctx = ctx->num;
1352 s5p_mfc_clean_ctx_int_flags(ctx);
1353 s5p_mfc_init_decode_v5(ctx);
1354}
1355
1356static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx)
1357{
1358 struct s5p_mfc_dev *dev = ctx->dev;
1359 unsigned long flags;
1360 struct s5p_mfc_buf *dst_mb;
1361 unsigned long dst_addr;
1362 unsigned int dst_size;
1363
1364 s5p_mfc_set_enc_ref_buffer_v5(ctx);
1365 spin_lock_irqsave(&dev->irqlock, flags);
1366 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
1367 dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
1368 dst_size = vb2_plane_size(dst_mb->b, 0);
1369 s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size);
1370 spin_unlock_irqrestore(&dev->irqlock, flags);
1371 dev->curr_ctx = ctx->num;
1372 s5p_mfc_clean_ctx_int_flags(ctx);
1373 s5p_mfc_init_encode_v5(ctx);
1374}
1375
1376static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
1377{
1378 struct s5p_mfc_dev *dev = ctx->dev;
1379 unsigned long flags;
1380 struct s5p_mfc_buf *temp_vb;
1381 int ret;
1382
1383 /*
1384 * Header was parsed now starting processing
1385 * First set the output frame buffers
1386 */
1387 if (ctx->capture_state != QUEUE_BUFS_MMAPED) {
1388 mfc_err("It seems that not all destionation buffers were "
1389 "mmaped\nMFC requires that all destination are mmaped "
1390 "before starting processing\n");
1391 return -EAGAIN;
1392 }
1393 spin_lock_irqsave(&dev->irqlock, flags);
1394 if (list_empty(&ctx->src_queue)) {
1395 mfc_err("Header has been deallocated in the middle of"
1396 " initialization\n");
1397 spin_unlock_irqrestore(&dev->irqlock, flags);
1398 return -EIO;
1399 }
1400 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1401 mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
1402 s5p_mfc_set_dec_stream_buffer_v5(ctx,
1403 vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
1404 0, temp_vb->b->v4l2_planes[0].bytesused);
1405 spin_unlock_irqrestore(&dev->irqlock, flags);
1406 dev->curr_ctx = ctx->num;
1407 s5p_mfc_clean_ctx_int_flags(ctx);
1408 ret = s5p_mfc_set_dec_frame_buffer_v5(ctx);
1409 if (ret) {
1410 mfc_err("Failed to alloc frame mem\n");
1411 ctx->state = MFCINST_ERROR;
1412 }
1413 return ret;
1414}
1415
1416/* Try running an operation on hardware */
1417void s5p_mfc_try_run_v5(struct s5p_mfc_dev *dev)
1418{
1419 struct s5p_mfc_ctx *ctx;
1420 int new_ctx;
1421 unsigned int ret = 0;
1422
1423 if (test_bit(0, &dev->enter_suspend)) {
1424 mfc_debug(1, "Entering suspend so do not schedule any jobs\n");
1425 return;
1426 }
1427 /* Check whether hardware is not running */
1428 if (test_and_set_bit(0, &dev->hw_lock) != 0) {
1429 /* This is perfectly ok, the scheduled ctx should wait */
1430 mfc_debug(1, "Couldn't lock HW\n");
1431 return;
1432 }
1433 /* Choose the context to run */
1434 new_ctx = s5p_mfc_get_new_ctx(dev);
1435 if (new_ctx < 0) {
1436 /* No contexts to run */
1437 if (test_and_clear_bit(0, &dev->hw_lock) == 0) {
1438 mfc_err("Failed to unlock hardware\n");
1439 return;
1440 }
1441 mfc_debug(1, "No ctx is scheduled to be run\n");
1442 return;
1443 }
1444 ctx = dev->ctx[new_ctx];
1445 /* Got context to run in ctx */
1446 /*
1447 * Last frame has already been sent to MFC.
1448 * Now obtaining frames from MFC buffer
1449 */
1450 s5p_mfc_clock_on();
1451 if (ctx->type == MFCINST_DECODER) {
1452 s5p_mfc_set_dec_desc_buffer(ctx);
1453 switch (ctx->state) {
1454 case MFCINST_FINISHING:
1455 s5p_mfc_run_dec_frame(ctx, MFC_DEC_LAST_FRAME);
1456 break;
1457 case MFCINST_RUNNING:
1458 ret = s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME);
1459 break;
1460 case MFCINST_INIT:
1461 s5p_mfc_clean_ctx_int_flags(ctx);
1462 ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd,
1463 ctx);
1464 break;
1465 case MFCINST_RETURN_INST:
1466 s5p_mfc_clean_ctx_int_flags(ctx);
1467 ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd,
1468 ctx);
1469 break;
1470 case MFCINST_GOT_INST:
1471 s5p_mfc_run_init_dec(ctx);
1472 break;
1473 case MFCINST_HEAD_PARSED:
1474 ret = s5p_mfc_run_init_dec_buffers(ctx);
1475 mfc_debug(1, "head parsed\n");
1476 break;
1477 case MFCINST_RES_CHANGE_INIT:
1478 s5p_mfc_run_res_change(ctx);
1479 break;
1480 case MFCINST_RES_CHANGE_FLUSH:
1481 s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME);
1482 break;
1483 case MFCINST_RES_CHANGE_END:
1484 mfc_debug(2, "Finished remaining frames after resolution change\n");
1485 ctx->capture_state = QUEUE_FREE;
1486 mfc_debug(2, "Will re-init the codec\n");
1487 s5p_mfc_run_init_dec(ctx);
1488 break;
1489 default:
1490 ret = -EAGAIN;
1491 }
1492 } else if (ctx->type == MFCINST_ENCODER) {
1493 switch (ctx->state) {
1494 case MFCINST_FINISHING:
1495 case MFCINST_RUNNING:
1496 ret = s5p_mfc_run_enc_frame(ctx);
1497 break;
1498 case MFCINST_INIT:
1499 s5p_mfc_clean_ctx_int_flags(ctx);
1500 ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd,
1501 ctx);
1502 break;
1503 case MFCINST_RETURN_INST:
1504 s5p_mfc_clean_ctx_int_flags(ctx);
1505 ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd,
1506 ctx);
1507 break;
1508 case MFCINST_GOT_INST:
1509 s5p_mfc_run_init_enc(ctx);
1510 break;
1511 default:
1512 ret = -EAGAIN;
1513 }
1514 } else {
1515 mfc_err("Invalid context type: %d\n", ctx->type);
1516 ret = -EAGAIN;
1517 }
1518
1519 if (ret) {
1520 /* Free hardware lock */
1521 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
1522 mfc_err("Failed to unlock hardware\n");
1523
1524 /* This is in deed imporant, as no operation has been
1525 * scheduled, reduce the clock count as no one will
1526 * ever do this, because no interrupt related to this try_run
1527 * will ever come from hardware. */
1528 s5p_mfc_clock_off();
1529 }
1530}
1531
1532
1533void s5p_mfc_cleanup_queue_v5(struct list_head *lh, struct vb2_queue *vq)
1534{
1535 struct s5p_mfc_buf *b;
1536 int i;
1537
1538 while (!list_empty(lh)) {
1539 b = list_entry(lh->next, struct s5p_mfc_buf, list);
1540 for (i = 0; i < b->b->num_planes; i++)
1541 vb2_set_plane_payload(b->b, i, 0);
1542 vb2_buffer_done(b->b, VB2_BUF_STATE_ERROR);
1543 list_del(&b->list);
1544 }
1545}
1546
1547void s5p_mfc_clear_int_flags_v5(struct s5p_mfc_dev *dev)
1548{
1549 mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT);
1550 mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD);
1551 mfc_write(dev, 0xffff, S5P_FIMV_SI_RTN_CHID);
1552}
1553
1554int s5p_mfc_get_dspl_y_adr_v5(struct s5p_mfc_dev *dev)
1555{
1556 return mfc_read(dev, S5P_FIMV_SI_DISPLAY_Y_ADR) << MFC_OFFSET_SHIFT;
1557}
1558
1559int s5p_mfc_get_dec_y_adr_v5(struct s5p_mfc_dev *dev)
1560{
1561 return mfc_read(dev, S5P_FIMV_SI_DECODE_Y_ADR) << MFC_OFFSET_SHIFT;
1562}
1563
1564int s5p_mfc_get_dspl_status_v5(struct s5p_mfc_dev *dev)
1565{
1566 return mfc_read(dev, S5P_FIMV_SI_DISPLAY_STATUS);
1567}
1568
1569int s5p_mfc_get_dec_status_v5(struct s5p_mfc_dev *dev)
1570{
1571 return mfc_read(dev, S5P_FIMV_SI_DECODE_STATUS);
1572}
1573
1574int s5p_mfc_get_dec_frame_type_v5(struct s5p_mfc_dev *dev)
1575{
1576 return mfc_read(dev, S5P_FIMV_DECODE_FRAME_TYPE) &
1577 S5P_FIMV_DECODE_FRAME_MASK;
1578}
1579
1580int s5p_mfc_get_disp_frame_type_v5(struct s5p_mfc_ctx *ctx)
1581{
1582 return (s5p_mfc_read_info_v5(ctx, DISP_PIC_FRAME_TYPE) >>
1583 S5P_FIMV_SHARED_DISP_FRAME_TYPE_SHIFT) &
1584 S5P_FIMV_DECODE_FRAME_MASK;
1585}
1586
1587int s5p_mfc_get_consumed_stream_v5(struct s5p_mfc_dev *dev)
1588{
1589 return mfc_read(dev, S5P_FIMV_SI_CONSUMED_BYTES);
1590}
1591
1592int s5p_mfc_get_int_reason_v5(struct s5p_mfc_dev *dev)
1593{
1594 int reason;
1595 reason = mfc_read(dev, S5P_FIMV_RISC2HOST_CMD) &
1596 S5P_FIMV_RISC2HOST_CMD_MASK;
1597 switch (reason) {
1598 case S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET:
1599 reason = S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET;
1600 break;
1601 case S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET:
1602 reason = S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET;
1603 break;
1604 case S5P_FIMV_R2H_CMD_SEQ_DONE_RET:
1605 reason = S5P_MFC_R2H_CMD_SEQ_DONE_RET;
1606 break;
1607 case S5P_FIMV_R2H_CMD_FRAME_DONE_RET:
1608 reason = S5P_MFC_R2H_CMD_FRAME_DONE_RET;
1609 break;
1610 case S5P_FIMV_R2H_CMD_SLICE_DONE_RET:
1611 reason = S5P_MFC_R2H_CMD_SLICE_DONE_RET;
1612 break;
1613 case S5P_FIMV_R2H_CMD_SYS_INIT_RET:
1614 reason = S5P_MFC_R2H_CMD_SYS_INIT_RET;
1615 break;
1616 case S5P_FIMV_R2H_CMD_FW_STATUS_RET:
1617 reason = S5P_MFC_R2H_CMD_FW_STATUS_RET;
1618 break;
1619 case S5P_FIMV_R2H_CMD_SLEEP_RET:
1620 reason = S5P_MFC_R2H_CMD_SLEEP_RET;
1621 break;
1622 case S5P_FIMV_R2H_CMD_WAKEUP_RET:
1623 reason = S5P_MFC_R2H_CMD_WAKEUP_RET;
1624 break;
1625 case S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET:
1626 reason = S5P_MFC_R2H_CMD_INIT_BUFFERS_RET;
1627 break;
1628 case S5P_FIMV_R2H_CMD_ENC_COMPLETE_RET:
1629 reason = S5P_MFC_R2H_CMD_COMPLETE_SEQ_RET;
1630 break;
1631 case S5P_FIMV_R2H_CMD_ERR_RET:
1632 reason = S5P_MFC_R2H_CMD_ERR_RET;
1633 break;
1634 default:
1635 reason = S5P_MFC_R2H_CMD_EMPTY;
1636 };
1637 return reason;
1638}
1639
1640int s5p_mfc_get_int_err_v5(struct s5p_mfc_dev *dev)
1641{
1642 return mfc_read(dev, S5P_FIMV_RISC2HOST_ARG2);
1643}
1644
1645int s5p_mfc_err_dec_v5(unsigned int err)
1646{
1647 return (err & S5P_FIMV_ERR_DEC_MASK) >> S5P_FIMV_ERR_DEC_SHIFT;
1648}
1649
1650int s5p_mfc_err_dspl_v5(unsigned int err)
1651{
1652 return (err & S5P_FIMV_ERR_DSPL_MASK) >> S5P_FIMV_ERR_DSPL_SHIFT;
1653}
1654
1655int s5p_mfc_get_img_width_v5(struct s5p_mfc_dev *dev)
1656{
1657 return mfc_read(dev, S5P_FIMV_SI_HRESOL);
1658}
1659
1660int s5p_mfc_get_img_height_v5(struct s5p_mfc_dev *dev)
1661{
1662 return mfc_read(dev, S5P_FIMV_SI_VRESOL);
1663}
1664
1665int s5p_mfc_get_dpb_count_v5(struct s5p_mfc_dev *dev)
1666{
1667 return mfc_read(dev, S5P_FIMV_SI_BUF_NUMBER);
1668}
1669
1670int s5p_mfc_get_mv_count_v5(struct s5p_mfc_dev *dev)
1671{
1672 /* NOP */
1673 return -1;
1674}
1675
1676int s5p_mfc_get_inst_no_v5(struct s5p_mfc_dev *dev)
1677{
1678 return mfc_read(dev, S5P_FIMV_RISC2HOST_ARG1);
1679}
1680
1681int s5p_mfc_get_enc_strm_size_v5(struct s5p_mfc_dev *dev)
1682{
1683 return mfc_read(dev, S5P_FIMV_ENC_SI_STRM_SIZE);
1684}
1685
1686int s5p_mfc_get_enc_slice_type_v5(struct s5p_mfc_dev *dev)
1687{
1688 return mfc_read(dev, S5P_FIMV_ENC_SI_SLICE_TYPE);
1689}
1690
1691int s5p_mfc_get_enc_dpb_count_v5(struct s5p_mfc_dev *dev)
1692{
1693 return -1;
1694}
1695
1696int s5p_mfc_get_enc_pic_count_v5(struct s5p_mfc_dev *dev)
1697{
1698 return mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT);
1699}
1700
1701int s5p_mfc_get_sei_avail_status_v5(struct s5p_mfc_ctx *ctx)
1702{
1703 return s5p_mfc_read_info_v5(ctx, FRAME_PACK_SEI_AVAIL);
1704}
1705
1706int s5p_mfc_get_mvc_num_views_v5(struct s5p_mfc_dev *dev)
1707{
1708 return -1;
1709}
1710
1711int s5p_mfc_get_mvc_view_id_v5(struct s5p_mfc_dev *dev)
1712{
1713 return -1;
1714}
1715
1716unsigned int s5p_mfc_get_pic_type_top_v5(struct s5p_mfc_ctx *ctx)
1717{
1718 return s5p_mfc_read_info_v5(ctx, PIC_TIME_TOP);
1719}
1720
1721unsigned int s5p_mfc_get_pic_type_bot_v5(struct s5p_mfc_ctx *ctx)
1722{
1723 return s5p_mfc_read_info_v5(ctx, PIC_TIME_BOT);
1724}
1725
1726unsigned int s5p_mfc_get_crop_info_h_v5(struct s5p_mfc_ctx *ctx)
1727{
1728 return s5p_mfc_read_info_v5(ctx, CROP_INFO_H);
1729}
1730
1731unsigned int s5p_mfc_get_crop_info_v_v5(struct s5p_mfc_ctx *ctx)
1732{
1733 return s5p_mfc_read_info_v5(ctx, CROP_INFO_V);
1734}
1735
1736/* Initialize opr function pointers for MFC v5 */
1737static struct s5p_mfc_hw_ops s5p_mfc_ops_v5 = {
1738 .alloc_dec_temp_buffers = s5p_mfc_alloc_dec_temp_buffers_v5,
1739 .release_dec_desc_buffer = s5p_mfc_release_dec_desc_buffer_v5,
1740 .alloc_codec_buffers = s5p_mfc_alloc_codec_buffers_v5,
1741 .release_codec_buffers = s5p_mfc_release_codec_buffers_v5,
1742 .alloc_instance_buffer = s5p_mfc_alloc_instance_buffer_v5,
1743 .release_instance_buffer = s5p_mfc_release_instance_buffer_v5,
1744 .alloc_dev_context_buffer = s5p_mfc_alloc_dev_context_buffer_v5,
1745 .release_dev_context_buffer = s5p_mfc_release_dev_context_buffer_v5,
1746 .dec_calc_dpb_size = s5p_mfc_dec_calc_dpb_size_v5,
1747 .enc_calc_src_size = s5p_mfc_enc_calc_src_size_v5,
1748 .set_dec_stream_buffer = s5p_mfc_set_dec_stream_buffer_v5,
1749 .set_dec_frame_buffer = s5p_mfc_set_dec_frame_buffer_v5,
1750 .set_enc_stream_buffer = s5p_mfc_set_enc_stream_buffer_v5,
1751 .set_enc_frame_buffer = s5p_mfc_set_enc_frame_buffer_v5,
1752 .get_enc_frame_buffer = s5p_mfc_get_enc_frame_buffer_v5,
1753 .set_enc_ref_buffer = s5p_mfc_set_enc_ref_buffer_v5,
1754 .init_decode = s5p_mfc_init_decode_v5,
1755 .init_encode = s5p_mfc_init_encode_v5,
1756 .encode_one_frame = s5p_mfc_encode_one_frame_v5,
1757 .try_run = s5p_mfc_try_run_v5,
1758 .cleanup_queue = s5p_mfc_cleanup_queue_v5,
1759 .clear_int_flags = s5p_mfc_clear_int_flags_v5,
1760 .write_info = s5p_mfc_write_info_v5,
1761 .read_info = s5p_mfc_read_info_v5,
1762 .get_dspl_y_adr = s5p_mfc_get_dspl_y_adr_v5,
1763 .get_dec_y_adr = s5p_mfc_get_dec_y_adr_v5,
1764 .get_dspl_status = s5p_mfc_get_dspl_status_v5,
1765 .get_dec_status = s5p_mfc_get_dec_status_v5,
1766 .get_dec_frame_type = s5p_mfc_get_dec_frame_type_v5,
1767 .get_disp_frame_type = s5p_mfc_get_disp_frame_type_v5,
1768 .get_consumed_stream = s5p_mfc_get_consumed_stream_v5,
1769 .get_int_reason = s5p_mfc_get_int_reason_v5,
1770 .get_int_err = s5p_mfc_get_int_err_v5,
1771 .err_dec = s5p_mfc_err_dec_v5,
1772 .err_dspl = s5p_mfc_err_dspl_v5,
1773 .get_img_width = s5p_mfc_get_img_width_v5,
1774 .get_img_height = s5p_mfc_get_img_height_v5,
1775 .get_dpb_count = s5p_mfc_get_dpb_count_v5,
1776 .get_mv_count = s5p_mfc_get_mv_count_v5,
1777 .get_inst_no = s5p_mfc_get_inst_no_v5,
1778 .get_enc_strm_size = s5p_mfc_get_enc_strm_size_v5,
1779 .get_enc_slice_type = s5p_mfc_get_enc_slice_type_v5,
1780 .get_enc_dpb_count = s5p_mfc_get_enc_dpb_count_v5,
1781 .get_enc_pic_count = s5p_mfc_get_enc_pic_count_v5,
1782 .get_sei_avail_status = s5p_mfc_get_sei_avail_status_v5,
1783 .get_mvc_num_views = s5p_mfc_get_mvc_num_views_v5,
1784 .get_mvc_view_id = s5p_mfc_get_mvc_view_id_v5,
1785 .get_pic_type_top = s5p_mfc_get_pic_type_top_v5,
1786 .get_pic_type_bot = s5p_mfc_get_pic_type_bot_v5,
1787 .get_crop_info_h = s5p_mfc_get_crop_info_h_v5,
1788 .get_crop_info_v = s5p_mfc_get_crop_info_v_v5,
1789};
1790
1791struct s5p_mfc_hw_ops *s5p_mfc_init_hw_ops_v5(void)
1792{
1793 return &s5p_mfc_ops_v5;
1794}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_shm.h b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.h
index 416ebd7ba35a..ffee39a127d5 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_shm.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.h
@@ -1,17 +1,22 @@
1/* 1/*
2 * linux/drivers/media/platform/s5p-mfc/s5p_mfc_shm.h 2 * drivers/media/platform/samsung/mfc5/s5p_mfc_opr_v5.h
3 * 3 *
4 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 4 * Header file for Samsung MFC (Multi Function Codec - FIMV) driver
5 * http://www.samsung.com/ 5 * Contains declarations of hw related functions.
6 *
7 * Kamil Debski, Copyright (C) 2011 Samsung Electronics
8 * http://www.samsung.com/
6 * 9 *
7 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License version 2 as
9 * the Free Software Foundation; either version 2 of the License, or 12 * published by the Free Software Foundation.
10 * (at your option) any later version.
11 */ 13 */
12 14
13#ifndef S5P_MFC_SHM_H_ 15#ifndef S5P_MFC_OPR_V5_H_
14#define S5P_MFC_SHM_H_ 16#define S5P_MFC_OPR_V5_H_
17
18#include "s5p_mfc_common.h"
19#include "s5p_mfc_opr.h"
15 20
16enum MFC_SHM_OFS { 21enum MFC_SHM_OFS {
17 EXTENEDED_DECODE_STATUS = 0x00, /* D */ 22 EXTENEDED_DECODE_STATUS = 0x00, /* D */
@@ -71,20 +76,10 @@ enum MFC_SHM_OFS {
71 DBG_HISTORY_INPUT1 = 0xD4, /* C */ 76 DBG_HISTORY_INPUT1 = 0xD4, /* C */
72 DBG_HISTORY_OUTPUT = 0xD8, /* C */ 77 DBG_HISTORY_OUTPUT = 0xD8, /* C */
73 HIERARCHICAL_P_QP = 0xE0, /* E, H.264 */ 78 HIERARCHICAL_P_QP = 0xE0, /* E, H.264 */
79 FRAME_PACK_SEI_ENABLE = 0x168, /* C */
80 FRAME_PACK_SEI_AVAIL = 0x16c, /* D */
81 FRAME_PACK_SEI_INFO = 0x17c, /* E */
74}; 82};
75 83
76int s5p_mfc_init_shm(struct s5p_mfc_ctx *ctx); 84struct s5p_mfc_hw_ops *s5p_mfc_init_hw_ops_v5(void);
77 85#endif /* S5P_MFC_OPR_H_ */
78#define s5p_mfc_write_shm(ctx, x, ofs) \
79 do { \
80 writel(x, (ctx->shm + ofs)); \
81 wmb(); \
82 } while (0)
83
84static inline u32 s5p_mfc_read_shm(struct s5p_mfc_ctx *ctx, unsigned int ofs)
85{
86 rmb();
87 return readl(ctx->shm + ofs);
88}
89
90#endif /* S5P_MFC_SHM_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
new file mode 100644
index 000000000000..50b5bee3c44e
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -0,0 +1,1956 @@
1/*
2 * drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
3 *
4 * Samsung MFC (Multi Function Codec - FIMV) driver
5 * This file contains hw related functions.
6 *
7 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
8 * http://www.samsung.com/
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#undef DEBUG
16
17#include <linux/delay.h>
18#include <linux/mm.h>
19#include <linux/io.h>
20#include <linux/jiffies.h>
21#include <linux/firmware.h>
22#include <linux/err.h>
23#include <linux/sched.h>
24#include <linux/dma-mapping.h>
25
26#include <asm/cacheflush.h>
27
28#include "s5p_mfc_common.h"
29#include "s5p_mfc_cmd.h"
30#include "s5p_mfc_intr.h"
31#include "s5p_mfc_pm.h"
32#include "s5p_mfc_debug.h"
33#include "s5p_mfc_opr.h"
34#include "s5p_mfc_opr_v6.h"
35
36/* #define S5P_MFC_DEBUG_REGWRITE */
37#ifdef S5P_MFC_DEBUG_REGWRITE
38#undef writel
39#define writel(v, r) \
40 do { \
41 pr_err("MFCWRITE(%p): %08x\n", r, (unsigned int)v); \
42 __raw_writel(v, r); \
43 } while (0)
44#endif /* S5P_MFC_DEBUG_REGWRITE */
45
46#define READL(offset) readl(dev->regs_base + (offset))
47#define WRITEL(data, offset) writel((data), dev->regs_base + (offset))
48#define OFFSETA(x) (((x) - dev->port_a) >> S5P_FIMV_MEM_OFFSET)
49#define OFFSETB(x) (((x) - dev->port_b) >> S5P_FIMV_MEM_OFFSET)
50
51/* Allocate temporary buffers for decoding */
52int s5p_mfc_alloc_dec_temp_buffers_v6(struct s5p_mfc_ctx *ctx)
53{
54 /* NOP */
55
56 return 0;
57}
58
59/* Release temproary buffers for decoding */
60void s5p_mfc_release_dec_desc_buffer_v6(struct s5p_mfc_ctx *ctx)
61{
62 /* NOP */
63}
64
65int s5p_mfc_get_dec_status_v6(struct s5p_mfc_dev *dev)
66{
67 /* NOP */
68 return -1;
69}
70
71/* Allocate codec buffers */
72int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
73{
74 struct s5p_mfc_dev *dev = ctx->dev;
75 unsigned int mb_width, mb_height;
76
77 mb_width = MB_WIDTH(ctx->img_width);
78 mb_height = MB_HEIGHT(ctx->img_height);
79
80 if (ctx->type == MFCINST_DECODER) {
81 mfc_debug(2, "Luma size:%d Chroma size:%d MV size:%d\n",
82 ctx->luma_size, ctx->chroma_size, ctx->mv_size);
83 mfc_debug(2, "Totals bufs: %d\n", ctx->total_dpb_count);
84 } else if (ctx->type == MFCINST_ENCODER) {
85 ctx->tmv_buffer_size = S5P_FIMV_NUM_TMV_BUFFERS_V6 *
86 ALIGN(S5P_FIMV_TMV_BUFFER_SIZE_V6(mb_width, mb_height),
87 S5P_FIMV_TMV_BUFFER_ALIGN_V6);
88 ctx->luma_dpb_size = ALIGN((mb_width * mb_height) *
89 S5P_FIMV_LUMA_MB_TO_PIXEL_V6,
90 S5P_FIMV_LUMA_DPB_BUFFER_ALIGN_V6);
91 ctx->chroma_dpb_size = ALIGN((mb_width * mb_height) *
92 S5P_FIMV_CHROMA_MB_TO_PIXEL_V6,
93 S5P_FIMV_CHROMA_DPB_BUFFER_ALIGN_V6);
94 ctx->me_buffer_size = ALIGN(S5P_FIMV_ME_BUFFER_SIZE_V6(
95 ctx->img_width, ctx->img_height,
96 mb_width, mb_height),
97 S5P_FIMV_ME_BUFFER_ALIGN_V6);
98
99 mfc_debug(2, "recon luma size: %d chroma size: %d\n",
100 ctx->luma_dpb_size, ctx->chroma_dpb_size);
101 } else {
102 return -EINVAL;
103 }
104
105 /* Codecs have different memory requirements */
106 switch (ctx->codec_mode) {
107 case S5P_MFC_CODEC_H264_DEC:
108 case S5P_MFC_CODEC_H264_MVC_DEC:
109 ctx->scratch_buf_size =
110 S5P_FIMV_SCRATCH_BUF_SIZE_H264_DEC_V6(
111 mb_width,
112 mb_height);
113 ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
114 S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
115 ctx->bank1_size =
116 ctx->scratch_buf_size +
117 (ctx->mv_count * ctx->mv_size);
118 break;
119 case S5P_MFC_CODEC_MPEG4_DEC:
120 ctx->scratch_buf_size =
121 S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_DEC_V6(
122 mb_width,
123 mb_height);
124 ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
125 S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
126 ctx->bank1_size = ctx->scratch_buf_size;
127 break;
128 case S5P_MFC_CODEC_VC1RCV_DEC:
129 case S5P_MFC_CODEC_VC1_DEC:
130 ctx->scratch_buf_size =
131 S5P_FIMV_SCRATCH_BUF_SIZE_VC1_DEC_V6(
132 mb_width,
133 mb_height);
134 ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
135 S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
136 ctx->bank1_size = ctx->scratch_buf_size;
137 break;
138 case S5P_MFC_CODEC_MPEG2_DEC:
139 ctx->bank1_size = 0;
140 ctx->bank2_size = 0;
141 break;
142 case S5P_MFC_CODEC_H263_DEC:
143 ctx->scratch_buf_size =
144 S5P_FIMV_SCRATCH_BUF_SIZE_H263_DEC_V6(
145 mb_width,
146 mb_height);
147 ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
148 S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
149 ctx->bank1_size = ctx->scratch_buf_size;
150 break;
151 case S5P_MFC_CODEC_VP8_DEC:
152 ctx->scratch_buf_size =
153 S5P_FIMV_SCRATCH_BUF_SIZE_VP8_DEC_V6(
154 mb_width,
155 mb_height);
156 ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
157 S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
158 ctx->bank1_size = ctx->scratch_buf_size;
159 break;
160 case S5P_MFC_CODEC_H264_ENC:
161 ctx->scratch_buf_size =
162 S5P_FIMV_SCRATCH_BUF_SIZE_H264_ENC_V6(
163 mb_width,
164 mb_height);
165 ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
166 S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
167 ctx->bank1_size =
168 ctx->scratch_buf_size + ctx->tmv_buffer_size +
169 (ctx->dpb_count * (ctx->luma_dpb_size +
170 ctx->chroma_dpb_size + ctx->me_buffer_size));
171 ctx->bank2_size = 0;
172 break;
173 case S5P_MFC_CODEC_MPEG4_ENC:
174 case S5P_MFC_CODEC_H263_ENC:
175 ctx->scratch_buf_size =
176 S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_ENC_V6(
177 mb_width,
178 mb_height);
179 ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
180 S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
181 ctx->bank1_size =
182 ctx->scratch_buf_size + ctx->tmv_buffer_size +
183 (ctx->dpb_count * (ctx->luma_dpb_size +
184 ctx->chroma_dpb_size + ctx->me_buffer_size));
185 ctx->bank2_size = 0;
186 break;
187 default:
188 break;
189 }
190
191 /* Allocate only if memory from bank 1 is necessary */
192 if (ctx->bank1_size > 0) {
193 ctx->bank1_buf = vb2_dma_contig_memops.alloc(
194 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_size);
195 if (IS_ERR(ctx->bank1_buf)) {
196 ctx->bank1_buf = 0;
197 pr_err("Buf alloc for decoding failed (port A)\n");
198 return -ENOMEM;
199 }
200 ctx->bank1_phys = s5p_mfc_mem_cookie(
201 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_buf);
202 BUG_ON(ctx->bank1_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
203 }
204
205 return 0;
206}
207
208/* Release buffers allocated for codec */
209void s5p_mfc_release_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
210{
211 if (ctx->bank1_buf) {
212 vb2_dma_contig_memops.put(ctx->bank1_buf);
213 ctx->bank1_buf = 0;
214 ctx->bank1_phys = 0;
215 ctx->bank1_size = 0;
216 }
217}
218
219/* Allocate memory for instance data buffer */
220int s5p_mfc_alloc_instance_buffer_v6(struct s5p_mfc_ctx *ctx)
221{
222 struct s5p_mfc_dev *dev = ctx->dev;
223 struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv;
224
225 mfc_debug_enter();
226
227 switch (ctx->codec_mode) {
228 case S5P_MFC_CODEC_H264_DEC:
229 case S5P_MFC_CODEC_H264_MVC_DEC:
230 ctx->ctx.size = buf_size->h264_dec_ctx;
231 break;
232 case S5P_MFC_CODEC_MPEG4_DEC:
233 case S5P_MFC_CODEC_H263_DEC:
234 case S5P_MFC_CODEC_VC1RCV_DEC:
235 case S5P_MFC_CODEC_VC1_DEC:
236 case S5P_MFC_CODEC_MPEG2_DEC:
237 case S5P_MFC_CODEC_VP8_DEC:
238 ctx->ctx.size = buf_size->other_dec_ctx;
239 break;
240 case S5P_MFC_CODEC_H264_ENC:
241 ctx->ctx.size = buf_size->h264_enc_ctx;
242 break;
243 case S5P_MFC_CODEC_MPEG4_ENC:
244 case S5P_MFC_CODEC_H263_ENC:
245 ctx->ctx.size = buf_size->other_enc_ctx;
246 break;
247 default:
248 ctx->ctx.size = 0;
249 mfc_err("Codec type(%d) should be checked!\n", ctx->codec_mode);
250 break;
251 }
252
253 ctx->ctx.alloc = vb2_dma_contig_memops.alloc(
254 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx.size);
255 if (IS_ERR(ctx->ctx.alloc)) {
256 mfc_err("Allocating context buffer failed.\n");
257 return PTR_ERR(ctx->ctx.alloc);
258 }
259
260 ctx->ctx.dma = s5p_mfc_mem_cookie(
261 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx.alloc);
262
263 ctx->ctx.virt = vb2_dma_contig_memops.vaddr(ctx->ctx.alloc);
264 if (!ctx->ctx.virt) {
265 vb2_dma_contig_memops.put(ctx->ctx.alloc);
266 ctx->ctx.alloc = NULL;
267 ctx->ctx.dma = 0;
268 ctx->ctx.virt = NULL;
269
270 mfc_err("Remapping context buffer failed.\n");
271 return -ENOMEM;
272 }
273
274 memset(ctx->ctx.virt, 0, ctx->ctx.size);
275 wmb();
276
277 mfc_debug_leave();
278
279 return 0;
280}
281
282/* Release instance buffer */
283void s5p_mfc_release_instance_buffer_v6(struct s5p_mfc_ctx *ctx)
284{
285 mfc_debug_enter();
286
287 if (ctx->ctx.alloc) {
288 vb2_dma_contig_memops.put(ctx->ctx.alloc);
289 ctx->ctx.alloc = NULL;
290 ctx->ctx.dma = 0;
291 ctx->ctx.virt = NULL;
292 }
293
294 mfc_debug_leave();
295}
296
297/* Allocate context buffers for SYS_INIT */
298int s5p_mfc_alloc_dev_context_buffer_v6(struct s5p_mfc_dev *dev)
299{
300 struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv;
301
302 mfc_debug_enter();
303
304 dev->ctx_buf.alloc = vb2_dma_contig_memops.alloc(
305 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], buf_size->dev_ctx);
306 if (IS_ERR(dev->ctx_buf.alloc)) {
307 mfc_err("Allocating DESC buffer failed.\n");
308 return PTR_ERR(dev->ctx_buf.alloc);
309 }
310
311 dev->ctx_buf.dma = s5p_mfc_mem_cookie(
312 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX],
313 dev->ctx_buf.alloc);
314
315 dev->ctx_buf.virt = vb2_dma_contig_memops.vaddr(dev->ctx_buf.alloc);
316 if (!dev->ctx_buf.virt) {
317 vb2_dma_contig_memops.put(dev->ctx_buf.alloc);
318 dev->ctx_buf.alloc = NULL;
319 dev->ctx_buf.dma = 0;
320
321 mfc_err("Remapping DESC buffer failed.\n");
322 return -ENOMEM;
323 }
324
325 memset(dev->ctx_buf.virt, 0, buf_size->dev_ctx);
326 wmb();
327
328 mfc_debug_leave();
329
330 return 0;
331}
332
333/* Release context buffers for SYS_INIT */
334void s5p_mfc_release_dev_context_buffer_v6(struct s5p_mfc_dev *dev)
335{
336 if (dev->ctx_buf.alloc) {
337 vb2_dma_contig_memops.put(dev->ctx_buf.alloc);
338 dev->ctx_buf.alloc = NULL;
339 dev->ctx_buf.dma = 0;
340 dev->ctx_buf.virt = NULL;
341 }
342}
343
344static int calc_plane(int width, int height)
345{
346 int mbX, mbY;
347
348 mbX = DIV_ROUND_UP(width, S5P_FIMV_NUM_PIXELS_IN_MB_ROW_V6);
349 mbY = DIV_ROUND_UP(height, S5P_FIMV_NUM_PIXELS_IN_MB_COL_V6);
350
351 if (width * height < S5P_FIMV_MAX_FRAME_SIZE_V6)
352 mbY = (mbY + 1) / 2 * 2;
353
354 return (mbX * S5P_FIMV_NUM_PIXELS_IN_MB_COL_V6) *
355 (mbY * S5P_FIMV_NUM_PIXELS_IN_MB_ROW_V6);
356}
357
358void s5p_mfc_dec_calc_dpb_size_v6(struct s5p_mfc_ctx *ctx)
359{
360 ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN_V6);
361 ctx->buf_height = ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN_V6);
362 mfc_debug(2, "SEQ Done: Movie dimensions %dx%d,\n"
363 "buffer dimensions: %dx%d\n", ctx->img_width,
364 ctx->img_height, ctx->buf_width, ctx->buf_height);
365
366 ctx->luma_size = calc_plane(ctx->img_width, ctx->img_height);
367 ctx->chroma_size = calc_plane(ctx->img_width, (ctx->img_height >> 1));
368 if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC ||
369 ctx->codec_mode == S5P_MFC_CODEC_H264_MVC_DEC) {
370 ctx->mv_size = S5P_MFC_DEC_MV_SIZE_V6(ctx->img_width,
371 ctx->img_height);
372 ctx->mv_size = ALIGN(ctx->mv_size, 16);
373 } else {
374 ctx->mv_size = 0;
375 }
376}
377
378void s5p_mfc_enc_calc_src_size_v6(struct s5p_mfc_ctx *ctx)
379{
380 unsigned int mb_width, mb_height;
381
382 mb_width = MB_WIDTH(ctx->img_width);
383 mb_height = MB_HEIGHT(ctx->img_height);
384
385 ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN_V6);
386 ctx->luma_size = ALIGN((mb_width * mb_height) * 256, 256);
387 ctx->chroma_size = ALIGN((mb_width * mb_height) * 128, 256);
388}
389
390/* Set registers for decoding stream buffer */
391int s5p_mfc_set_dec_stream_buffer_v6(struct s5p_mfc_ctx *ctx, int buf_addr,
392 unsigned int start_num_byte, unsigned int strm_size)
393{
394 struct s5p_mfc_dev *dev = ctx->dev;
395 struct s5p_mfc_buf_size *buf_size = dev->variant->buf_size;
396
397 mfc_debug_enter();
398 mfc_debug(2, "inst_no: %d, buf_addr: 0x%08x,\n"
399 "buf_size: 0x%08x (%d)\n",
400 ctx->inst_no, buf_addr, strm_size, strm_size);
401 WRITEL(strm_size, S5P_FIMV_D_STREAM_DATA_SIZE_V6);
402 WRITEL(buf_addr, S5P_FIMV_D_CPB_BUFFER_ADDR_V6);
403 WRITEL(buf_size->cpb, S5P_FIMV_D_CPB_BUFFER_SIZE_V6);
404 WRITEL(start_num_byte, S5P_FIMV_D_CPB_BUFFER_OFFSET_V6);
405
406 mfc_debug_leave();
407 return 0;
408}
409
410/* Set decoding frame buffer */
411int s5p_mfc_set_dec_frame_buffer_v6(struct s5p_mfc_ctx *ctx)
412{
413 unsigned int frame_size, i;
414 unsigned int frame_size_ch, frame_size_mv;
415 struct s5p_mfc_dev *dev = ctx->dev;
416 size_t buf_addr1;
417 int buf_size1;
418 int align_gap;
419
420 buf_addr1 = ctx->bank1_phys;
421 buf_size1 = ctx->bank1_size;
422
423 mfc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1);
424 mfc_debug(2, "Total DPB COUNT: %d\n", ctx->total_dpb_count);
425 mfc_debug(2, "Setting display delay to %d\n", ctx->display_delay);
426
427 WRITEL(ctx->total_dpb_count, S5P_FIMV_D_NUM_DPB_V6);
428 WRITEL(ctx->luma_size, S5P_FIMV_D_LUMA_DPB_SIZE_V6);
429 WRITEL(ctx->chroma_size, S5P_FIMV_D_CHROMA_DPB_SIZE_V6);
430
431 WRITEL(buf_addr1, S5P_FIMV_D_SCRATCH_BUFFER_ADDR_V6);
432 WRITEL(ctx->scratch_buf_size, S5P_FIMV_D_SCRATCH_BUFFER_SIZE_V6);
433 buf_addr1 += ctx->scratch_buf_size;
434 buf_size1 -= ctx->scratch_buf_size;
435
436 if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC ||
437 ctx->codec_mode == S5P_FIMV_CODEC_H264_MVC_DEC){
438 WRITEL(ctx->mv_size, S5P_FIMV_D_MV_BUFFER_SIZE_V6);
439 WRITEL(ctx->mv_count, S5P_FIMV_D_NUM_MV_V6);
440 }
441
442 frame_size = ctx->luma_size;
443 frame_size_ch = ctx->chroma_size;
444 frame_size_mv = ctx->mv_size;
445 mfc_debug(2, "Frame size: %d ch: %d mv: %d\n",
446 frame_size, frame_size_ch, frame_size_mv);
447
448 for (i = 0; i < ctx->total_dpb_count; i++) {
449 /* Bank2 */
450 mfc_debug(2, "Luma %d: %x\n", i,
451 ctx->dst_bufs[i].cookie.raw.luma);
452 WRITEL(ctx->dst_bufs[i].cookie.raw.luma,
453 S5P_FIMV_D_LUMA_DPB_V6 + i * 4);
454 mfc_debug(2, "\tChroma %d: %x\n", i,
455 ctx->dst_bufs[i].cookie.raw.chroma);
456 WRITEL(ctx->dst_bufs[i].cookie.raw.chroma,
457 S5P_FIMV_D_CHROMA_DPB_V6 + i * 4);
458 }
459 if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC ||
460 ctx->codec_mode == S5P_MFC_CODEC_H264_MVC_DEC) {
461 for (i = 0; i < ctx->mv_count; i++) {
462 /* To test alignment */
463 align_gap = buf_addr1;
464 buf_addr1 = ALIGN(buf_addr1, 16);
465 align_gap = buf_addr1 - align_gap;
466 buf_size1 -= align_gap;
467
468 mfc_debug(2, "\tBuf1: %x, size: %d\n",
469 buf_addr1, buf_size1);
470 WRITEL(buf_addr1, S5P_FIMV_D_MV_BUFFER_V6 + i * 4);
471 buf_addr1 += frame_size_mv;
472 buf_size1 -= frame_size_mv;
473 }
474 }
475
476 mfc_debug(2, "Buf1: %u, buf_size1: %d (frames %d)\n",
477 buf_addr1, buf_size1, ctx->total_dpb_count);
478 if (buf_size1 < 0) {
479 mfc_debug(2, "Not enough memory has been allocated.\n");
480 return -ENOMEM;
481 }
482
483 WRITEL(ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6);
484 s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
485 S5P_FIMV_CH_INIT_BUFS_V6, NULL);
486
487 mfc_debug(2, "After setting buffers.\n");
488 return 0;
489}
490
491/* Set registers for encoding stream buffer */
492int s5p_mfc_set_enc_stream_buffer_v6(struct s5p_mfc_ctx *ctx,
493 unsigned long addr, unsigned int size)
494{
495 struct s5p_mfc_dev *dev = ctx->dev;
496
497 WRITEL(addr, S5P_FIMV_E_STREAM_BUFFER_ADDR_V6); /* 16B align */
498 WRITEL(size, S5P_FIMV_E_STREAM_BUFFER_SIZE_V6);
499
500 mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%d",
501 addr, size);
502
503 return 0;
504}
505
506void s5p_mfc_set_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
507 unsigned long y_addr, unsigned long c_addr)
508{
509 struct s5p_mfc_dev *dev = ctx->dev;
510
511 WRITEL(y_addr, S5P_FIMV_E_SOURCE_LUMA_ADDR_V6); /* 256B align */
512 WRITEL(c_addr, S5P_FIMV_E_SOURCE_CHROMA_ADDR_V6);
513
514 mfc_debug(2, "enc src y buf addr: 0x%08lx", y_addr);
515 mfc_debug(2, "enc src c buf addr: 0x%08lx", c_addr);
516}
517
518void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
519 unsigned long *y_addr, unsigned long *c_addr)
520{
521 struct s5p_mfc_dev *dev = ctx->dev;
522 unsigned long enc_recon_y_addr, enc_recon_c_addr;
523
524 *y_addr = READL(S5P_FIMV_E_ENCODED_SOURCE_LUMA_ADDR_V6);
525 *c_addr = READL(S5P_FIMV_E_ENCODED_SOURCE_CHROMA_ADDR_V6);
526
527 enc_recon_y_addr = READL(S5P_FIMV_E_RECON_LUMA_DPB_ADDR_V6);
528 enc_recon_c_addr = READL(S5P_FIMV_E_RECON_CHROMA_DPB_ADDR_V6);
529
530 mfc_debug(2, "recon y addr: 0x%08lx", enc_recon_y_addr);
531 mfc_debug(2, "recon c addr: 0x%08lx", enc_recon_c_addr);
532}
533
534/* Set encoding ref & codec buffer */
535int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx)
536{
537 struct s5p_mfc_dev *dev = ctx->dev;
538 size_t buf_addr1, buf_size1;
539 int i;
540
541 mfc_debug_enter();
542
543 buf_addr1 = ctx->bank1_phys;
544 buf_size1 = ctx->bank1_size;
545
546 mfc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1);
547
548 for (i = 0; i < ctx->dpb_count; i++) {
549 WRITEL(buf_addr1, S5P_FIMV_E_LUMA_DPB_V6 + (4 * i));
550 buf_addr1 += ctx->luma_dpb_size;
551 WRITEL(buf_addr1, S5P_FIMV_E_CHROMA_DPB_V6 + (4 * i));
552 buf_addr1 += ctx->chroma_dpb_size;
553 WRITEL(buf_addr1, S5P_FIMV_E_ME_BUFFER_V6 + (4 * i));
554 buf_addr1 += ctx->me_buffer_size;
555 buf_size1 -= (ctx->luma_dpb_size + ctx->chroma_dpb_size +
556 ctx->me_buffer_size);
557 }
558
559 WRITEL(buf_addr1, S5P_FIMV_E_SCRATCH_BUFFER_ADDR_V6);
560 WRITEL(ctx->scratch_buf_size, S5P_FIMV_E_SCRATCH_BUFFER_SIZE_V6);
561 buf_addr1 += ctx->scratch_buf_size;
562 buf_size1 -= ctx->scratch_buf_size;
563
564 WRITEL(buf_addr1, S5P_FIMV_E_TMV_BUFFER0_V6);
565 buf_addr1 += ctx->tmv_buffer_size >> 1;
566 WRITEL(buf_addr1, S5P_FIMV_E_TMV_BUFFER1_V6);
567 buf_addr1 += ctx->tmv_buffer_size >> 1;
568 buf_size1 -= ctx->tmv_buffer_size;
569
570 mfc_debug(2, "Buf1: %u, buf_size1: %d (ref frames %d)\n",
571 buf_addr1, buf_size1, ctx->dpb_count);
572 if (buf_size1 < 0) {
573 mfc_debug(2, "Not enough memory has been allocated.\n");
574 return -ENOMEM;
575 }
576
577 WRITEL(ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6);
578 s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
579 S5P_FIMV_CH_INIT_BUFS_V6, NULL);
580
581 mfc_debug_leave();
582
583 return 0;
584}
585
586static int s5p_mfc_set_slice_mode(struct s5p_mfc_ctx *ctx)
587{
588 struct s5p_mfc_dev *dev = ctx->dev;
589
590 /* multi-slice control */
591 /* multi-slice MB number or bit size */
592 WRITEL(ctx->slice_mode, S5P_FIMV_E_MSLICE_MODE_V6);
593 if (ctx->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) {
594 WRITEL(ctx->slice_size.mb, S5P_FIMV_E_MSLICE_SIZE_MB_V6);
595 } else if (ctx->slice_mode ==
596 V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) {
597 WRITEL(ctx->slice_size.bits, S5P_FIMV_E_MSLICE_SIZE_BITS_V6);
598 } else {
599 WRITEL(0x0, S5P_FIMV_E_MSLICE_SIZE_MB_V6);
600 WRITEL(0x0, S5P_FIMV_E_MSLICE_SIZE_BITS_V6);
601 }
602
603 return 0;
604}
605
606static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
607{
608 struct s5p_mfc_dev *dev = ctx->dev;
609 struct s5p_mfc_enc_params *p = &ctx->enc_params;
610 unsigned int reg = 0;
611
612 mfc_debug_enter();
613
614 /* width */
615 WRITEL(ctx->img_width, S5P_FIMV_E_FRAME_WIDTH_V6); /* 16 align */
616 /* height */
617 WRITEL(ctx->img_height, S5P_FIMV_E_FRAME_HEIGHT_V6); /* 16 align */
618
619 /* cropped width */
620 WRITEL(ctx->img_width, S5P_FIMV_E_CROPPED_FRAME_WIDTH_V6);
621 /* cropped height */
622 WRITEL(ctx->img_height, S5P_FIMV_E_CROPPED_FRAME_HEIGHT_V6);
623 /* cropped offset */
624 WRITEL(0x0, S5P_FIMV_E_FRAME_CROP_OFFSET_V6);
625
626 /* pictype : IDR period */
627 reg = 0;
628 reg |= p->gop_size & 0xFFFF;
629 WRITEL(reg, S5P_FIMV_E_GOP_CONFIG_V6);
630
631 /* multi-slice control */
632 /* multi-slice MB number or bit size */
633 ctx->slice_mode = p->slice_mode;
634 reg = 0;
635 if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) {
636 reg |= (0x1 << 3);
637 WRITEL(reg, S5P_FIMV_E_ENC_OPTIONS_V6);
638 ctx->slice_size.mb = p->slice_mb;
639 } else if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) {
640 reg |= (0x1 << 3);
641 WRITEL(reg, S5P_FIMV_E_ENC_OPTIONS_V6);
642 ctx->slice_size.bits = p->slice_bit;
643 } else {
644 reg &= ~(0x1 << 3);
645 WRITEL(reg, S5P_FIMV_E_ENC_OPTIONS_V6);
646 }
647
648 s5p_mfc_set_slice_mode(ctx);
649
650 /* cyclic intra refresh */
651 WRITEL(p->intra_refresh_mb, S5P_FIMV_E_IR_SIZE_V6);
652 reg = READL(S5P_FIMV_E_ENC_OPTIONS_V6);
653 if (p->intra_refresh_mb == 0)
654 reg &= ~(0x1 << 4);
655 else
656 reg |= (0x1 << 4);
657 WRITEL(reg, S5P_FIMV_E_ENC_OPTIONS_V6);
658
659 /* 'NON_REFERENCE_STORE_ENABLE' for debugging */
660 reg = READL(S5P_FIMV_E_ENC_OPTIONS_V6);
661 reg &= ~(0x1 << 9);
662 WRITEL(reg, S5P_FIMV_E_ENC_OPTIONS_V6);
663
664 /* memory structure cur. frame */
665 if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M) {
666 /* 0: Linear, 1: 2D tiled*/
667 reg = READL(S5P_FIMV_E_ENC_OPTIONS_V6);
668 reg &= ~(0x1 << 7);
669 WRITEL(reg, S5P_FIMV_E_ENC_OPTIONS_V6);
670 /* 0: NV12(CbCr), 1: NV21(CrCb) */
671 WRITEL(0x0, S5P_FIMV_PIXEL_FORMAT_V6);
672 } else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV21M) {
673 /* 0: Linear, 1: 2D tiled*/
674 reg = READL(S5P_FIMV_E_ENC_OPTIONS_V6);
675 reg &= ~(0x1 << 7);
676 WRITEL(reg, S5P_FIMV_E_ENC_OPTIONS_V6);
677 /* 0: NV12(CbCr), 1: NV21(CrCb) */
678 WRITEL(0x1, S5P_FIMV_PIXEL_FORMAT_V6);
679 } else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16) {
680 /* 0: Linear, 1: 2D tiled*/
681 reg = READL(S5P_FIMV_E_ENC_OPTIONS_V6);
682 reg |= (0x1 << 7);
683 WRITEL(reg, S5P_FIMV_E_ENC_OPTIONS_V6);
684 /* 0: NV12(CbCr), 1: NV21(CrCb) */
685 WRITEL(0x0, S5P_FIMV_PIXEL_FORMAT_V6);
686 }
687
688 /* memory structure recon. frame */
689 /* 0: Linear, 1: 2D tiled */
690 reg = READL(S5P_FIMV_E_ENC_OPTIONS_V6);
691 reg |= (0x1 << 8);
692 WRITEL(reg, S5P_FIMV_E_ENC_OPTIONS_V6);
693
694 /* padding control & value */
695 WRITEL(0x0, S5P_FIMV_E_PADDING_CTRL_V6);
696 if (p->pad) {
697 reg = 0;
698 /** enable */
699 reg |= (1 << 31);
700 /** cr value */
701 reg |= ((p->pad_cr & 0xFF) << 16);
702 /** cb value */
703 reg |= ((p->pad_cb & 0xFF) << 8);
704 /** y value */
705 reg |= p->pad_luma & 0xFF;
706 WRITEL(reg, S5P_FIMV_E_PADDING_CTRL_V6);
707 }
708
709 /* rate control config. */
710 reg = 0;
711 /* frame-level rate control */
712 reg |= ((p->rc_frame & 0x1) << 9);
713 WRITEL(reg, S5P_FIMV_E_RC_CONFIG_V6);
714
715 /* bit rate */
716 if (p->rc_frame)
717 WRITEL(p->rc_bitrate,
718 S5P_FIMV_E_RC_BIT_RATE_V6);
719 else
720 WRITEL(1, S5P_FIMV_E_RC_BIT_RATE_V6);
721
722 /* reaction coefficient */
723 if (p->rc_frame) {
724 if (p->rc_reaction_coeff < TIGHT_CBR_MAX) /* tight CBR */
725 WRITEL(1, S5P_FIMV_E_RC_RPARAM_V6);
726 else /* loose CBR */
727 WRITEL(2, S5P_FIMV_E_RC_RPARAM_V6);
728 }
729
730 /* seq header ctrl */
731 reg = READL(S5P_FIMV_E_ENC_OPTIONS_V6);
732 reg &= ~(0x1 << 2);
733 reg |= ((p->seq_hdr_mode & 0x1) << 2);
734
735 /* frame skip mode */
736 reg &= ~(0x3);
737 reg |= (p->frame_skip_mode & 0x3);
738 WRITEL(reg, S5P_FIMV_E_ENC_OPTIONS_V6);
739
740 /* 'DROP_CONTROL_ENABLE', disable */
741 reg = READL(S5P_FIMV_E_RC_CONFIG_V6);
742 reg &= ~(0x1 << 10);
743 WRITEL(reg, S5P_FIMV_E_RC_CONFIG_V6);
744
745 /* setting for MV range [16, 256] */
746 reg = 0;
747 reg &= ~(0x3FFF);
748 reg = 256;
749 WRITEL(reg, S5P_FIMV_E_MV_HOR_RANGE_V6);
750
751 reg = 0;
752 reg &= ~(0x3FFF);
753 reg = 256;
754 WRITEL(reg, S5P_FIMV_E_MV_VER_RANGE_V6);
755
756 WRITEL(0x0, S5P_FIMV_E_FRAME_INSERTION_V6);
757 WRITEL(0x0, S5P_FIMV_E_ROI_BUFFER_ADDR_V6);
758 WRITEL(0x0, S5P_FIMV_E_PARAM_CHANGE_V6);
759 WRITEL(0x0, S5P_FIMV_E_RC_ROI_CTRL_V6);
760 WRITEL(0x0, S5P_FIMV_E_PICTURE_TAG_V6);
761
762 WRITEL(0x0, S5P_FIMV_E_BIT_COUNT_ENABLE_V6);
763 WRITEL(0x0, S5P_FIMV_E_MAX_BIT_COUNT_V6);
764 WRITEL(0x0, S5P_FIMV_E_MIN_BIT_COUNT_V6);
765
766 WRITEL(0x0, S5P_FIMV_E_METADATA_BUFFER_ADDR_V6);
767 WRITEL(0x0, S5P_FIMV_E_METADATA_BUFFER_SIZE_V6);
768
769 mfc_debug_leave();
770
771 return 0;
772}
773
774static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
775{
776 struct s5p_mfc_dev *dev = ctx->dev;
777 struct s5p_mfc_enc_params *p = &ctx->enc_params;
778 struct s5p_mfc_h264_enc_params *p_h264 = &p->codec.h264;
779 unsigned int reg = 0;
780 int i;
781
782 mfc_debug_enter();
783
784 s5p_mfc_set_enc_params(ctx);
785
786 /* pictype : number of B */
787 reg = READL(S5P_FIMV_E_GOP_CONFIG_V6);
788 reg &= ~(0x3 << 16);
789 reg |= ((p->num_b_frame & 0x3) << 16);
790 WRITEL(reg, S5P_FIMV_E_GOP_CONFIG_V6);
791
792 /* profile & level */
793 reg = 0;
794 /** level */
795 reg |= ((p_h264->level & 0xFF) << 8);
796 /** profile - 0 ~ 3 */
797 reg |= p_h264->profile & 0x3F;
798 WRITEL(reg, S5P_FIMV_E_PICTURE_PROFILE_V6);
799
800 /* rate control config. */
801 reg = READL(S5P_FIMV_E_RC_CONFIG_V6);
802 /** macroblock level rate control */
803 reg &= ~(0x1 << 8);
804 reg |= ((p->rc_mb & 0x1) << 8);
805 WRITEL(reg, S5P_FIMV_E_RC_CONFIG_V6);
806 /** frame QP */
807 reg &= ~(0x3F);
808 reg |= p_h264->rc_frame_qp & 0x3F;
809 WRITEL(reg, S5P_FIMV_E_RC_CONFIG_V6);
810
811 /* max & min value of QP */
812 reg = 0;
813 /** max QP */
814 reg |= ((p_h264->rc_max_qp & 0x3F) << 8);
815 /** min QP */
816 reg |= p_h264->rc_min_qp & 0x3F;
817 WRITEL(reg, S5P_FIMV_E_RC_QP_BOUND_V6);
818
819 /* other QPs */
820 WRITEL(0x0, S5P_FIMV_E_FIXED_PICTURE_QP_V6);
821 if (!p->rc_frame && !p->rc_mb) {
822 reg = 0;
823 reg |= ((p_h264->rc_b_frame_qp & 0x3F) << 16);
824 reg |= ((p_h264->rc_p_frame_qp & 0x3F) << 8);
825 reg |= p_h264->rc_frame_qp & 0x3F;
826 WRITEL(reg, S5P_FIMV_E_FIXED_PICTURE_QP_V6);
827 }
828
829 /* frame rate */
830 if (p->rc_frame && p->rc_framerate_num && p->rc_framerate_denom) {
831 reg = 0;
832 reg |= ((p->rc_framerate_num & 0xFFFF) << 16);
833 reg |= p->rc_framerate_denom & 0xFFFF;
834 WRITEL(reg, S5P_FIMV_E_RC_FRAME_RATE_V6);
835 }
836
837 /* vbv buffer size */
838 if (p->frame_skip_mode ==
839 V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
840 WRITEL(p_h264->cpb_size & 0xFFFF,
841 S5P_FIMV_E_VBV_BUFFER_SIZE_V6);
842
843 if (p->rc_frame)
844 WRITEL(p->vbv_delay, S5P_FIMV_E_VBV_INIT_DELAY_V6);
845 }
846
847 /* interlace */
848 reg = 0;
849 reg |= ((p_h264->interlace & 0x1) << 3);
850 WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6);
851
852 /* height */
853 if (p_h264->interlace) {
854 WRITEL(ctx->img_height >> 1,
855 S5P_FIMV_E_FRAME_HEIGHT_V6); /* 32 align */
856 /* cropped height */
857 WRITEL(ctx->img_height >> 1,
858 S5P_FIMV_E_CROPPED_FRAME_HEIGHT_V6);
859 }
860
861 /* loop filter ctrl */
862 reg = READL(S5P_FIMV_E_H264_OPTIONS_V6);
863 reg &= ~(0x3 << 1);
864 reg |= ((p_h264->loop_filter_mode & 0x3) << 1);
865 WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6);
866
867 /* loopfilter alpha offset */
868 if (p_h264->loop_filter_alpha < 0) {
869 reg = 0x10;
870 reg |= (0xFF - p_h264->loop_filter_alpha) + 1;
871 } else {
872 reg = 0x00;
873 reg |= (p_h264->loop_filter_alpha & 0xF);
874 }
875 WRITEL(reg, S5P_FIMV_E_H264_LF_ALPHA_OFFSET_V6);
876
877 /* loopfilter beta offset */
878 if (p_h264->loop_filter_beta < 0) {
879 reg = 0x10;
880 reg |= (0xFF - p_h264->loop_filter_beta) + 1;
881 } else {
882 reg = 0x00;
883 reg |= (p_h264->loop_filter_beta & 0xF);
884 }
885 WRITEL(reg, S5P_FIMV_E_H264_LF_BETA_OFFSET_V6);
886
887 /* entropy coding mode */
888 reg = READL(S5P_FIMV_E_H264_OPTIONS_V6);
889 reg &= ~(0x1);
890 reg |= p_h264->entropy_mode & 0x1;
891 WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6);
892
893 /* number of ref. picture */
894 reg = READL(S5P_FIMV_E_H264_OPTIONS_V6);
895 reg &= ~(0x1 << 7);
896 reg |= (((p_h264->num_ref_pic_4p - 1) & 0x1) << 7);
897 WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6);
898
899 /* 8x8 transform enable */
900 reg = READL(S5P_FIMV_E_H264_OPTIONS_V6);
901 reg &= ~(0x3 << 12);
902 reg |= ((p_h264->_8x8_transform & 0x3) << 12);
903 WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6);
904
905 /* macroblock adaptive scaling features */
906 WRITEL(0x0, S5P_FIMV_E_MB_RC_CONFIG_V6);
907 if (p->rc_mb) {
908 reg = 0;
909 /** dark region */
910 reg |= ((p_h264->rc_mb_dark & 0x1) << 3);
911 /** smooth region */
912 reg |= ((p_h264->rc_mb_smooth & 0x1) << 2);
913 /** static region */
914 reg |= ((p_h264->rc_mb_static & 0x1) << 1);
915 /** high activity region */
916 reg |= p_h264->rc_mb_activity & 0x1;
917 WRITEL(reg, S5P_FIMV_E_MB_RC_CONFIG_V6);
918 }
919
920 /* aspect ratio VUI */
921 reg = READL(S5P_FIMV_E_H264_OPTIONS_V6);
922 reg &= ~(0x1 << 5);
923 reg |= ((p_h264->vui_sar & 0x1) << 5);
924 WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6);
925
926 WRITEL(0x0, S5P_FIMV_E_ASPECT_RATIO_V6);
927 WRITEL(0x0, S5P_FIMV_E_EXTENDED_SAR_V6);
928 if (p_h264->vui_sar) {
929 /* aspect ration IDC */
930 reg = 0;
931 reg |= p_h264->vui_sar_idc & 0xFF;
932 WRITEL(reg, S5P_FIMV_E_ASPECT_RATIO_V6);
933 if (p_h264->vui_sar_idc == 0xFF) {
934 /* extended SAR */
935 reg = 0;
936 reg |= (p_h264->vui_ext_sar_width & 0xFFFF) << 16;
937 reg |= p_h264->vui_ext_sar_height & 0xFFFF;
938 WRITEL(reg, S5P_FIMV_E_EXTENDED_SAR_V6);
939 }
940 }
941
942 /* intra picture period for H.264 open GOP */
943 /* control */
944 reg = READL(S5P_FIMV_E_H264_OPTIONS_V6);
945 reg &= ~(0x1 << 4);
946 reg |= ((p_h264->open_gop & 0x1) << 4);
947 WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6);
948 /* value */
949 WRITEL(0x0, S5P_FIMV_E_H264_I_PERIOD_V6);
950 if (p_h264->open_gop) {
951 reg = 0;
952 reg |= p_h264->open_gop_size & 0xFFFF;
953 WRITEL(reg, S5P_FIMV_E_H264_I_PERIOD_V6);
954 }
955
956 /* 'WEIGHTED_BI_PREDICTION' for B is disable */
957 reg = READL(S5P_FIMV_E_H264_OPTIONS_V6);
958 reg &= ~(0x3 << 9);
959 WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6);
960
961 /* 'CONSTRAINED_INTRA_PRED_ENABLE' is disable */
962 reg = READL(S5P_FIMV_E_H264_OPTIONS_V6);
963 reg &= ~(0x1 << 14);
964 WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6);
965
966 /* ASO */
967 reg = READL(S5P_FIMV_E_H264_OPTIONS_V6);
968 reg &= ~(0x1 << 6);
969 reg |= ((p_h264->aso & 0x1) << 6);
970 WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6);
971
972 /* hier qp enable */
973 reg = READL(S5P_FIMV_E_H264_OPTIONS_V6);
974 reg &= ~(0x1 << 8);
975 reg |= ((p_h264->open_gop & 0x1) << 8);
976 WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6);
977 reg = 0;
978 if (p_h264->hier_qp && p_h264->hier_qp_layer) {
979 reg |= (p_h264->hier_qp_type & 0x1) << 0x3;
980 reg |= p_h264->hier_qp_layer & 0x7;
981 WRITEL(reg, S5P_FIMV_E_H264_NUM_T_LAYER_V6);
982 /* QP value for each layer */
983 for (i = 0; i < (p_h264->hier_qp_layer & 0x7); i++)
984 WRITEL(p_h264->hier_qp_layer_qp[i],
985 S5P_FIMV_E_H264_HIERARCHICAL_QP_LAYER0_V6 +
986 i * 4);
987 }
988 /* number of coding layer should be zero when hierarchical is disable */
989 WRITEL(reg, S5P_FIMV_E_H264_NUM_T_LAYER_V6);
990
991 /* frame packing SEI generation */
992 reg = READL(S5P_FIMV_E_H264_OPTIONS_V6);
993 reg &= ~(0x1 << 25);
994 reg |= ((p_h264->sei_frame_packing & 0x1) << 25);
995 WRITEL(reg, S5P_FIMV_E_H264_OPTIONS_V6);
996 if (p_h264->sei_frame_packing) {
997 reg = 0;
998 /** current frame0 flag */
999 reg |= ((p_h264->sei_fp_curr_frame_0 & 0x1) << 2);
1000 /** arrangement type */
1001 reg |= p_h264->sei_fp_arrangement_type & 0x3;
1002 WRITEL(reg, S5P_FIMV_E_H264_FRAME_PACKING_SEI_INFO_V6);
1003 }
1004
1005 if (p_h264->fmo) {
1006 switch (p_h264->fmo_map_type) {
1007 case V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_INTERLEAVED_SLICES:
1008 if (p_h264->fmo_slice_grp > 4)
1009 p_h264->fmo_slice_grp = 4;
1010 for (i = 0; i < (p_h264->fmo_slice_grp & 0xF); i++)
1011 WRITEL(p_h264->fmo_run_len[i] - 1,
1012 S5P_FIMV_E_H264_FMO_RUN_LENGTH_MINUS1_0_V6 +
1013 i * 4);
1014 break;
1015 case V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_SCATTERED_SLICES:
1016 if (p_h264->fmo_slice_grp > 4)
1017 p_h264->fmo_slice_grp = 4;
1018 break;
1019 case V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_RASTER_SCAN:
1020 case V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_WIPE_SCAN:
1021 if (p_h264->fmo_slice_grp > 2)
1022 p_h264->fmo_slice_grp = 2;
1023 WRITEL(p_h264->fmo_chg_dir & 0x1,
1024 S5P_FIMV_E_H264_FMO_SLICE_GRP_CHANGE_DIR_V6);
1025 /* the valid range is 0 ~ number of macroblocks -1 */
1026 WRITEL(p_h264->fmo_chg_rate,
1027 S5P_FIMV_E_H264_FMO_SLICE_GRP_CHANGE_RATE_MINUS1_V6);
1028 break;
1029 default:
1030 mfc_err("Unsupported map type for FMO: %d\n",
1031 p_h264->fmo_map_type);
1032 p_h264->fmo_map_type = 0;
1033 p_h264->fmo_slice_grp = 1;
1034 break;
1035 }
1036
1037 WRITEL(p_h264->fmo_map_type,
1038 S5P_FIMV_E_H264_FMO_SLICE_GRP_MAP_TYPE_V6);
1039 WRITEL(p_h264->fmo_slice_grp - 1,
1040 S5P_FIMV_E_H264_FMO_NUM_SLICE_GRP_MINUS1_V6);
1041 } else {
1042 WRITEL(0, S5P_FIMV_E_H264_FMO_NUM_SLICE_GRP_MINUS1_V6);
1043 }
1044
1045 mfc_debug_leave();
1046
1047 return 0;
1048}
1049
1050static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx)
1051{
1052 struct s5p_mfc_dev *dev = ctx->dev;
1053 struct s5p_mfc_enc_params *p = &ctx->enc_params;
1054 struct s5p_mfc_mpeg4_enc_params *p_mpeg4 = &p->codec.mpeg4;
1055 unsigned int reg = 0;
1056
1057 mfc_debug_enter();
1058
1059 s5p_mfc_set_enc_params(ctx);
1060
1061 /* pictype : number of B */
1062 reg = READL(S5P_FIMV_E_GOP_CONFIG_V6);
1063 reg &= ~(0x3 << 16);
1064 reg |= ((p->num_b_frame & 0x3) << 16);
1065 WRITEL(reg, S5P_FIMV_E_GOP_CONFIG_V6);
1066
1067 /* profile & level */
1068 reg = 0;
1069 /** level */
1070 reg |= ((p_mpeg4->level & 0xFF) << 8);
1071 /** profile - 0 ~ 1 */
1072 reg |= p_mpeg4->profile & 0x3F;
1073 WRITEL(reg, S5P_FIMV_E_PICTURE_PROFILE_V6);
1074
1075 /* rate control config. */
1076 reg = READL(S5P_FIMV_E_RC_CONFIG_V6);
1077 /** macroblock level rate control */
1078 reg &= ~(0x1 << 8);
1079 reg |= ((p->rc_mb & 0x1) << 8);
1080 WRITEL(reg, S5P_FIMV_E_RC_CONFIG_V6);
1081 /** frame QP */
1082 reg &= ~(0x3F);
1083 reg |= p_mpeg4->rc_frame_qp & 0x3F;
1084 WRITEL(reg, S5P_FIMV_E_RC_CONFIG_V6);
1085
1086 /* max & min value of QP */
1087 reg = 0;
1088 /** max QP */
1089 reg |= ((p_mpeg4->rc_max_qp & 0x3F) << 8);
1090 /** min QP */
1091 reg |= p_mpeg4->rc_min_qp & 0x3F;
1092 WRITEL(reg, S5P_FIMV_E_RC_QP_BOUND_V6);
1093
1094 /* other QPs */
1095 WRITEL(0x0, S5P_FIMV_E_FIXED_PICTURE_QP_V6);
1096 if (!p->rc_frame && !p->rc_mb) {
1097 reg = 0;
1098 reg |= ((p_mpeg4->rc_b_frame_qp & 0x3F) << 16);
1099 reg |= ((p_mpeg4->rc_p_frame_qp & 0x3F) << 8);
1100 reg |= p_mpeg4->rc_frame_qp & 0x3F;
1101 WRITEL(reg, S5P_FIMV_E_FIXED_PICTURE_QP_V6);
1102 }
1103
1104 /* frame rate */
1105 if (p->rc_frame && p->rc_framerate_num && p->rc_framerate_denom) {
1106 reg = 0;
1107 reg |= ((p->rc_framerate_num & 0xFFFF) << 16);
1108 reg |= p->rc_framerate_denom & 0xFFFF;
1109 WRITEL(reg, S5P_FIMV_E_RC_FRAME_RATE_V6);
1110 }
1111
1112 /* vbv buffer size */
1113 if (p->frame_skip_mode ==
1114 V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
1115 WRITEL(p->vbv_size & 0xFFFF, S5P_FIMV_E_VBV_BUFFER_SIZE_V6);
1116
1117 if (p->rc_frame)
1118 WRITEL(p->vbv_delay, S5P_FIMV_E_VBV_INIT_DELAY_V6);
1119 }
1120
1121 /* Disable HEC */
1122 WRITEL(0x0, S5P_FIMV_E_MPEG4_OPTIONS_V6);
1123 WRITEL(0x0, S5P_FIMV_E_MPEG4_HEC_PERIOD_V6);
1124
1125 mfc_debug_leave();
1126
1127 return 0;
1128}
1129
1130static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx)
1131{
1132 struct s5p_mfc_dev *dev = ctx->dev;
1133 struct s5p_mfc_enc_params *p = &ctx->enc_params;
1134 struct s5p_mfc_mpeg4_enc_params *p_h263 = &p->codec.mpeg4;
1135 unsigned int reg = 0;
1136
1137 mfc_debug_enter();
1138
1139 s5p_mfc_set_enc_params(ctx);
1140
1141 /* profile & level */
1142 reg = 0;
1143 /** profile */
1144 reg |= (0x1 << 4);
1145 WRITEL(reg, S5P_FIMV_E_PICTURE_PROFILE_V6);
1146
1147 /* rate control config. */
1148 reg = READL(S5P_FIMV_E_RC_CONFIG_V6);
1149 /** macroblock level rate control */
1150 reg &= ~(0x1 << 8);
1151 reg |= ((p->rc_mb & 0x1) << 8);
1152 WRITEL(reg, S5P_FIMV_E_RC_CONFIG_V6);
1153 /** frame QP */
1154 reg &= ~(0x3F);
1155 reg |= p_h263->rc_frame_qp & 0x3F;
1156 WRITEL(reg, S5P_FIMV_E_RC_CONFIG_V6);
1157
1158 /* max & min value of QP */
1159 reg = 0;
1160 /** max QP */
1161 reg |= ((p_h263->rc_max_qp & 0x3F) << 8);
1162 /** min QP */
1163 reg |= p_h263->rc_min_qp & 0x3F;
1164 WRITEL(reg, S5P_FIMV_E_RC_QP_BOUND_V6);
1165
1166 /* other QPs */
1167 WRITEL(0x0, S5P_FIMV_E_FIXED_PICTURE_QP_V6);
1168 if (!p->rc_frame && !p->rc_mb) {
1169 reg = 0;
1170 reg |= ((p_h263->rc_b_frame_qp & 0x3F) << 16);
1171 reg |= ((p_h263->rc_p_frame_qp & 0x3F) << 8);
1172 reg |= p_h263->rc_frame_qp & 0x3F;
1173 WRITEL(reg, S5P_FIMV_E_FIXED_PICTURE_QP_V6);
1174 }
1175
1176 /* frame rate */
1177 if (p->rc_frame && p->rc_framerate_num && p->rc_framerate_denom) {
1178 reg = 0;
1179 reg |= ((p->rc_framerate_num & 0xFFFF) << 16);
1180 reg |= p->rc_framerate_denom & 0xFFFF;
1181 WRITEL(reg, S5P_FIMV_E_RC_FRAME_RATE_V6);
1182 }
1183
1184 /* vbv buffer size */
1185 if (p->frame_skip_mode ==
1186 V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
1187 WRITEL(p->vbv_size & 0xFFFF, S5P_FIMV_E_VBV_BUFFER_SIZE_V6);
1188
1189 if (p->rc_frame)
1190 WRITEL(p->vbv_delay, S5P_FIMV_E_VBV_INIT_DELAY_V6);
1191 }
1192
1193 mfc_debug_leave();
1194
1195 return 0;
1196}
1197
1198/* Initialize decoding */
1199int s5p_mfc_init_decode_v6(struct s5p_mfc_ctx *ctx)
1200{
1201 struct s5p_mfc_dev *dev = ctx->dev;
1202 unsigned int reg = 0;
1203 int fmo_aso_ctrl = 0;
1204
1205 mfc_debug_enter();
1206 mfc_debug(2, "InstNo: %d/%d\n", ctx->inst_no,
1207 S5P_FIMV_CH_SEQ_HEADER_V6);
1208 mfc_debug(2, "BUFs: %08x %08x %08x\n",
1209 READL(S5P_FIMV_D_CPB_BUFFER_ADDR_V6),
1210 READL(S5P_FIMV_D_CPB_BUFFER_ADDR_V6),
1211 READL(S5P_FIMV_D_CPB_BUFFER_ADDR_V6));
1212
1213 /* FMO_ASO_CTRL - 0: Enable, 1: Disable */
1214 reg |= (fmo_aso_ctrl << S5P_FIMV_D_OPT_FMO_ASO_CTRL_MASK_V6);
1215
1216 /* When user sets desplay_delay to 0,
1217 * It works as "display_delay enable" and delay set to 0.
1218 * If user wants display_delay disable, It should be
1219 * set to negative value. */
1220 if (ctx->display_delay >= 0) {
1221 reg |= (0x1 << S5P_FIMV_D_OPT_DDELAY_EN_SHIFT_V6);
1222 WRITEL(ctx->display_delay, S5P_FIMV_D_DISPLAY_DELAY_V6);
1223 }
1224 /* Setup loop filter, for decoding this is only valid for MPEG4 */
1225 if (ctx->codec_mode == S5P_MFC_CODEC_MPEG4_DEC) {
1226 mfc_debug(2, "Set loop filter to: %d\n",
1227 ctx->loop_filter_mpeg4);
1228 reg |= (ctx->loop_filter_mpeg4 <<
1229 S5P_FIMV_D_OPT_LF_CTRL_SHIFT_V6);
1230 }
1231 if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16)
1232 reg |= (0x1 << S5P_FIMV_D_OPT_TILE_MODE_SHIFT_V6);
1233
1234 WRITEL(reg, S5P_FIMV_D_DEC_OPTIONS_V6);
1235
1236 /* 0: NV12(CbCr), 1: NV21(CrCb) */
1237 if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_NV21M)
1238 WRITEL(0x1, S5P_FIMV_PIXEL_FORMAT_V6);
1239 else
1240 WRITEL(0x0, S5P_FIMV_PIXEL_FORMAT_V6);
1241
1242 /* sei parse */
1243 WRITEL(ctx->sei_fp_parse & 0x1, S5P_FIMV_D_SEI_ENABLE_V6);
1244
1245 WRITEL(ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6);
1246 s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
1247 S5P_FIMV_CH_SEQ_HEADER_V6, NULL);
1248
1249 mfc_debug_leave();
1250 return 0;
1251}
1252
1253static inline void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush)
1254{
1255 struct s5p_mfc_dev *dev = ctx->dev;
1256 unsigned int dpb;
1257 if (flush)
1258 dpb = READL(S5P_FIMV_SI_CH0_DPB_CONF_CTRL) | (1 << 14);
1259 else
1260 dpb = READL(S5P_FIMV_SI_CH0_DPB_CONF_CTRL) & ~(1 << 14);
1261 WRITEL(dpb, S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
1262}
1263
1264/* Decode a single frame */
1265int s5p_mfc_decode_one_frame_v6(struct s5p_mfc_ctx *ctx,
1266 enum s5p_mfc_decode_arg last_frame)
1267{
1268 struct s5p_mfc_dev *dev = ctx->dev;
1269
1270 WRITEL(ctx->dec_dst_flag, S5P_FIMV_D_AVAILABLE_DPB_FLAG_LOWER_V6);
1271 WRITEL(ctx->slice_interface & 0x1, S5P_FIMV_D_SLICE_IF_ENABLE_V6);
1272
1273 WRITEL(ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6);
1274 /* Issue different commands to instance basing on whether it
1275 * is the last frame or not. */
1276 switch (last_frame) {
1277 case 0:
1278 s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
1279 S5P_FIMV_CH_FRAME_START_V6, NULL);
1280 break;
1281 case 1:
1282 s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
1283 S5P_FIMV_CH_LAST_FRAME_V6, NULL);
1284 break;
1285 default:
1286 mfc_err("Unsupported last frame arg.\n");
1287 return -EINVAL;
1288 }
1289
1290 mfc_debug(2, "Decoding a usual frame.\n");
1291 return 0;
1292}
1293
1294int s5p_mfc_init_encode_v6(struct s5p_mfc_ctx *ctx)
1295{
1296 struct s5p_mfc_dev *dev = ctx->dev;
1297
1298 if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC)
1299 s5p_mfc_set_enc_params_h264(ctx);
1300 else if (ctx->codec_mode == S5P_MFC_CODEC_MPEG4_ENC)
1301 s5p_mfc_set_enc_params_mpeg4(ctx);
1302 else if (ctx->codec_mode == S5P_MFC_CODEC_H263_ENC)
1303 s5p_mfc_set_enc_params_h263(ctx);
1304 else {
1305 mfc_err("Unknown codec for encoding (%x).\n",
1306 ctx->codec_mode);
1307 return -EINVAL;
1308 }
1309
1310 WRITEL(ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6);
1311 s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
1312 S5P_FIMV_CH_SEQ_HEADER_V6, NULL);
1313
1314 return 0;
1315}
1316
1317int s5p_mfc_h264_set_aso_slice_order_v6(struct s5p_mfc_ctx *ctx)
1318{
1319 struct s5p_mfc_dev *dev = ctx->dev;
1320 struct s5p_mfc_enc_params *p = &ctx->enc_params;
1321 struct s5p_mfc_h264_enc_params *p_h264 = &p->codec.h264;
1322 int i;
1323
1324 if (p_h264->aso) {
1325 for (i = 0; i < 8; i++)
1326 WRITEL(p_h264->aso_slice_order[i],
1327 S5P_FIMV_E_H264_ASO_SLICE_ORDER_0_V6 + i * 4);
1328 }
1329 return 0;
1330}
1331
1332/* Encode a single frame */
1333int s5p_mfc_encode_one_frame_v6(struct s5p_mfc_ctx *ctx)
1334{
1335 struct s5p_mfc_dev *dev = ctx->dev;
1336
1337 mfc_debug(2, "++\n");
1338
1339 /* memory structure cur. frame */
1340
1341 if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC)
1342 s5p_mfc_h264_set_aso_slice_order_v6(ctx);
1343
1344 s5p_mfc_set_slice_mode(ctx);
1345
1346 WRITEL(ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6);
1347 s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
1348 S5P_FIMV_CH_FRAME_START_V6, NULL);
1349
1350 mfc_debug(2, "--\n");
1351
1352 return 0;
1353}
1354
1355static inline int s5p_mfc_get_new_ctx(struct s5p_mfc_dev *dev)
1356{
1357 unsigned long flags;
1358 int new_ctx;
1359 int cnt;
1360
1361 spin_lock_irqsave(&dev->condlock, flags);
1362 mfc_debug(2, "Previos context: %d (bits %08lx)\n", dev->curr_ctx,
1363 dev->ctx_work_bits);
1364 new_ctx = (dev->curr_ctx + 1) % MFC_NUM_CONTEXTS;
1365 cnt = 0;
1366 while (!test_bit(new_ctx, &dev->ctx_work_bits)) {
1367 new_ctx = (new_ctx + 1) % MFC_NUM_CONTEXTS;
1368 cnt++;
1369 if (cnt > MFC_NUM_CONTEXTS) {
1370 /* No contexts to run */
1371 spin_unlock_irqrestore(&dev->condlock, flags);
1372 return -EAGAIN;
1373 }
1374 }
1375 spin_unlock_irqrestore(&dev->condlock, flags);
1376 return new_ctx;
1377}
1378
1379static inline void s5p_mfc_run_dec_last_frames(struct s5p_mfc_ctx *ctx)
1380{
1381 struct s5p_mfc_dev *dev = ctx->dev;
1382 struct s5p_mfc_buf *temp_vb;
1383 unsigned long flags;
1384
1385 spin_lock_irqsave(&dev->irqlock, flags);
1386
1387 /* Frames are being decoded */
1388 if (list_empty(&ctx->src_queue)) {
1389 mfc_debug(2, "No src buffers.\n");
1390 spin_unlock_irqrestore(&dev->irqlock, flags);
1391 return;
1392 }
1393 /* Get the next source buffer */
1394 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1395 temp_vb->flags |= MFC_BUF_FLAG_USED;
1396 s5p_mfc_set_dec_stream_buffer_v6(ctx,
1397 vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), 0, 0);
1398 spin_unlock_irqrestore(&dev->irqlock, flags);
1399
1400 dev->curr_ctx = ctx->num;
1401 s5p_mfc_clean_ctx_int_flags(ctx);
1402 s5p_mfc_decode_one_frame_v6(ctx, 1);
1403}
1404
1405static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx)
1406{
1407 struct s5p_mfc_dev *dev = ctx->dev;
1408 struct s5p_mfc_buf *temp_vb;
1409 unsigned long flags;
1410 int last_frame = 0;
1411 unsigned int index;
1412
1413 spin_lock_irqsave(&dev->irqlock, flags);
1414
1415 /* Frames are being decoded */
1416 if (list_empty(&ctx->src_queue)) {
1417 mfc_debug(2, "No src buffers.\n");
1418 spin_unlock_irqrestore(&dev->irqlock, flags);
1419 return -EAGAIN;
1420 }
1421 /* Get the next source buffer */
1422 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1423 temp_vb->flags |= MFC_BUF_FLAG_USED;
1424 s5p_mfc_set_dec_stream_buffer_v6(ctx,
1425 vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
1426 ctx->consumed_stream,
1427 temp_vb->b->v4l2_planes[0].bytesused);
1428 spin_unlock_irqrestore(&dev->irqlock, flags);
1429
1430 index = temp_vb->b->v4l2_buf.index;
1431
1432 dev->curr_ctx = ctx->num;
1433 s5p_mfc_clean_ctx_int_flags(ctx);
1434 if (temp_vb->b->v4l2_planes[0].bytesused == 0) {
1435 last_frame = 1;
1436 mfc_debug(2, "Setting ctx->state to FINISHING\n");
1437 ctx->state = MFCINST_FINISHING;
1438 }
1439 s5p_mfc_decode_one_frame_v6(ctx, last_frame);
1440
1441 return 0;
1442}
1443
1444static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
1445{
1446 struct s5p_mfc_dev *dev = ctx->dev;
1447 unsigned long flags;
1448 struct s5p_mfc_buf *dst_mb;
1449 struct s5p_mfc_buf *src_mb;
1450 unsigned long src_y_addr, src_c_addr, dst_addr;
1451 /*
1452 unsigned int src_y_size, src_c_size;
1453 */
1454 unsigned int dst_size;
1455 unsigned int index;
1456
1457 spin_lock_irqsave(&dev->irqlock, flags);
1458
1459 if (list_empty(&ctx->src_queue)) {
1460 mfc_debug(2, "no src buffers.\n");
1461 spin_unlock_irqrestore(&dev->irqlock, flags);
1462 return -EAGAIN;
1463 }
1464
1465 if (list_empty(&ctx->dst_queue)) {
1466 mfc_debug(2, "no dst buffers.\n");
1467 spin_unlock_irqrestore(&dev->irqlock, flags);
1468 return -EAGAIN;
1469 }
1470
1471 src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1472 src_mb->flags |= MFC_BUF_FLAG_USED;
1473 src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0);
1474 src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1);
1475
1476 mfc_debug(2, "enc src y addr: 0x%08lx", src_y_addr);
1477 mfc_debug(2, "enc src c addr: 0x%08lx", src_c_addr);
1478
1479 s5p_mfc_set_enc_frame_buffer_v6(ctx, src_y_addr, src_c_addr);
1480
1481 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
1482 dst_mb->flags |= MFC_BUF_FLAG_USED;
1483 dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
1484 dst_size = vb2_plane_size(dst_mb->b, 0);
1485
1486 s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size);
1487
1488 spin_unlock_irqrestore(&dev->irqlock, flags);
1489
1490 index = src_mb->b->v4l2_buf.index;
1491
1492 dev->curr_ctx = ctx->num;
1493 s5p_mfc_clean_ctx_int_flags(ctx);
1494 s5p_mfc_encode_one_frame_v6(ctx);
1495
1496 return 0;
1497}
1498
1499static inline void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx)
1500{
1501 struct s5p_mfc_dev *dev = ctx->dev;
1502 unsigned long flags;
1503 struct s5p_mfc_buf *temp_vb;
1504
1505 /* Initializing decoding - parsing header */
1506 spin_lock_irqsave(&dev->irqlock, flags);
1507 mfc_debug(2, "Preparing to init decoding.\n");
1508 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1509 mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
1510 s5p_mfc_set_dec_stream_buffer_v6(ctx,
1511 vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), 0,
1512 temp_vb->b->v4l2_planes[0].bytesused);
1513 spin_unlock_irqrestore(&dev->irqlock, flags);
1514 dev->curr_ctx = ctx->num;
1515 s5p_mfc_clean_ctx_int_flags(ctx);
1516 s5p_mfc_init_decode_v6(ctx);
1517}
1518
1519static inline void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx)
1520{
1521 struct s5p_mfc_dev *dev = ctx->dev;
1522 unsigned long flags;
1523 struct s5p_mfc_buf *dst_mb;
1524 unsigned long dst_addr;
1525 unsigned int dst_size;
1526
1527 spin_lock_irqsave(&dev->irqlock, flags);
1528
1529 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
1530 dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
1531 dst_size = vb2_plane_size(dst_mb->b, 0);
1532 s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size);
1533 spin_unlock_irqrestore(&dev->irqlock, flags);
1534 dev->curr_ctx = ctx->num;
1535 s5p_mfc_clean_ctx_int_flags(ctx);
1536 s5p_mfc_init_encode_v6(ctx);
1537}
1538
1539static inline int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
1540{
1541 struct s5p_mfc_dev *dev = ctx->dev;
1542 int ret;
1543 /* Header was parsed now start processing
1544 * First set the output frame buffers
1545 * s5p_mfc_alloc_dec_buffers(ctx); */
1546
1547 if (ctx->capture_state != QUEUE_BUFS_MMAPED) {
1548 mfc_err("It seems that not all destionation buffers were\n"
1549 "mmaped.MFC requires that all destination are mmaped\n"
1550 "before starting processing.\n");
1551 return -EAGAIN;
1552 }
1553
1554 dev->curr_ctx = ctx->num;
1555 s5p_mfc_clean_ctx_int_flags(ctx);
1556 ret = s5p_mfc_set_dec_frame_buffer_v6(ctx);
1557 if (ret) {
1558 mfc_err("Failed to alloc frame mem.\n");
1559 ctx->state = MFCINST_ERROR;
1560 }
1561 return ret;
1562}
1563
1564static inline int s5p_mfc_run_init_enc_buffers(struct s5p_mfc_ctx *ctx)
1565{
1566 struct s5p_mfc_dev *dev = ctx->dev;
1567 int ret;
1568
1569 ret = s5p_mfc_alloc_codec_buffers_v6(ctx);
1570 if (ret) {
1571 mfc_err("Failed to allocate encoding buffers.\n");
1572 return -ENOMEM;
1573 }
1574
1575 /* Header was generated now starting processing
1576 * First set the reference frame buffers
1577 */
1578 if (ctx->capture_state != QUEUE_BUFS_REQUESTED) {
1579 mfc_err("It seems that destionation buffers were not\n"
1580 "requested.MFC requires that header should be generated\n"
1581 "before allocating codec buffer.\n");
1582 return -EAGAIN;
1583 }
1584
1585 dev->curr_ctx = ctx->num;
1586 s5p_mfc_clean_ctx_int_flags(ctx);
1587 ret = s5p_mfc_set_enc_ref_buffer_v6(ctx);
1588 if (ret) {
1589 mfc_err("Failed to alloc frame mem.\n");
1590 ctx->state = MFCINST_ERROR;
1591 }
1592 return ret;
1593}
1594
1595/* Try running an operation on hardware */
1596void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev)
1597{
1598 struct s5p_mfc_ctx *ctx;
1599 int new_ctx;
1600 unsigned int ret = 0;
1601
1602 mfc_debug(1, "Try run dev: %p\n", dev);
1603
1604 /* Check whether hardware is not running */
1605 if (test_and_set_bit(0, &dev->hw_lock) != 0) {
1606 /* This is perfectly ok, the scheduled ctx should wait */
1607 mfc_debug(1, "Couldn't lock HW.\n");
1608 return;
1609 }
1610
1611 /* Choose the context to run */
1612 new_ctx = s5p_mfc_get_new_ctx(dev);
1613 if (new_ctx < 0) {
1614 /* No contexts to run */
1615 if (test_and_clear_bit(0, &dev->hw_lock) == 0) {
1616 mfc_err("Failed to unlock hardware.\n");
1617 return;
1618 }
1619
1620 mfc_debug(1, "No ctx is scheduled to be run.\n");
1621 return;
1622 }
1623
1624 mfc_debug(1, "New context: %d\n", new_ctx);
1625 ctx = dev->ctx[new_ctx];
1626 mfc_debug(1, "Seting new context to %p\n", ctx);
1627 /* Got context to run in ctx */
1628 mfc_debug(1, "ctx->dst_queue_cnt=%d ctx->dpb_count=%d ctx->src_queue_cnt=%d\n",
1629 ctx->dst_queue_cnt, ctx->dpb_count, ctx->src_queue_cnt);
1630 mfc_debug(1, "ctx->state=%d\n", ctx->state);
1631 /* Last frame has already been sent to MFC
1632 * Now obtaining frames from MFC buffer */
1633
1634 s5p_mfc_clock_on();
1635 if (ctx->type == MFCINST_DECODER) {
1636 switch (ctx->state) {
1637 case MFCINST_FINISHING:
1638 s5p_mfc_run_dec_last_frames(ctx);
1639 break;
1640 case MFCINST_RUNNING:
1641 ret = s5p_mfc_run_dec_frame(ctx);
1642 break;
1643 case MFCINST_INIT:
1644 s5p_mfc_clean_ctx_int_flags(ctx);
1645 ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd,
1646 ctx);
1647 break;
1648 case MFCINST_RETURN_INST:
1649 s5p_mfc_clean_ctx_int_flags(ctx);
1650 ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd,
1651 ctx);
1652 break;
1653 case MFCINST_GOT_INST:
1654 s5p_mfc_run_init_dec(ctx);
1655 break;
1656 case MFCINST_HEAD_PARSED:
1657 ret = s5p_mfc_run_init_dec_buffers(ctx);
1658 break;
1659 case MFCINST_RES_CHANGE_INIT:
1660 s5p_mfc_run_dec_last_frames(ctx);
1661 break;
1662 case MFCINST_RES_CHANGE_FLUSH:
1663 s5p_mfc_run_dec_last_frames(ctx);
1664 break;
1665 case MFCINST_RES_CHANGE_END:
1666 mfc_debug(2, "Finished remaining frames after resolution change.\n");
1667 ctx->capture_state = QUEUE_FREE;
1668 mfc_debug(2, "Will re-init the codec`.\n");
1669 s5p_mfc_run_init_dec(ctx);
1670 break;
1671 default:
1672 ret = -EAGAIN;
1673 }
1674 } else if (ctx->type == MFCINST_ENCODER) {
1675 switch (ctx->state) {
1676 case MFCINST_FINISHING:
1677 case MFCINST_RUNNING:
1678 ret = s5p_mfc_run_enc_frame(ctx);
1679 break;
1680 case MFCINST_INIT:
1681 ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd,
1682 ctx);
1683 break;
1684 case MFCINST_RETURN_INST:
1685 ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd,
1686 ctx);
1687 break;
1688 case MFCINST_GOT_INST:
1689 s5p_mfc_run_init_enc(ctx);
1690 break;
1691 case MFCINST_HEAD_PARSED: /* Only for MFC6.x */
1692 ret = s5p_mfc_run_init_enc_buffers(ctx);
1693 break;
1694 default:
1695 ret = -EAGAIN;
1696 }
1697 } else {
1698 mfc_err("invalid context type: %d\n", ctx->type);
1699 ret = -EAGAIN;
1700 }
1701
1702 if (ret) {
1703 /* Free hardware lock */
1704 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
1705 mfc_err("Failed to unlock hardware.\n");
1706
1707 /* This is in deed imporant, as no operation has been
1708 * scheduled, reduce the clock count as no one will
1709 * ever do this, because no interrupt related to this try_run
1710 * will ever come from hardware. */
1711 s5p_mfc_clock_off();
1712 }
1713}
1714
1715
1716void s5p_mfc_cleanup_queue_v6(struct list_head *lh, struct vb2_queue *vq)
1717{
1718 struct s5p_mfc_buf *b;
1719 int i;
1720
1721 while (!list_empty(lh)) {
1722 b = list_entry(lh->next, struct s5p_mfc_buf, list);
1723 for (i = 0; i < b->b->num_planes; i++)
1724 vb2_set_plane_payload(b->b, i, 0);
1725 vb2_buffer_done(b->b, VB2_BUF_STATE_ERROR);
1726 list_del(&b->list);
1727 }
1728}
1729
1730void s5p_mfc_clear_int_flags_v6(struct s5p_mfc_dev *dev)
1731{
1732 mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD_V6);
1733 mfc_write(dev, 0, S5P_FIMV_RISC2HOST_INT_V6);
1734}
1735
1736void s5p_mfc_write_info_v6(struct s5p_mfc_ctx *ctx, unsigned int data,
1737 unsigned int ofs)
1738{
1739 struct s5p_mfc_dev *dev = ctx->dev;
1740
1741 s5p_mfc_clock_on();
1742 WRITEL(data, ofs);
1743 s5p_mfc_clock_off();
1744}
1745
1746unsigned int s5p_mfc_read_info_v6(struct s5p_mfc_ctx *ctx, unsigned int ofs)
1747{
1748 struct s5p_mfc_dev *dev = ctx->dev;
1749 int ret;
1750
1751 s5p_mfc_clock_on();
1752 ret = READL(ofs);
1753 s5p_mfc_clock_off();
1754
1755 return ret;
1756}
1757
1758int s5p_mfc_get_dspl_y_adr_v6(struct s5p_mfc_dev *dev)
1759{
1760 return mfc_read(dev, S5P_FIMV_D_DISPLAY_LUMA_ADDR_V6);
1761}
1762
1763int s5p_mfc_get_dec_y_adr_v6(struct s5p_mfc_dev *dev)
1764{
1765 return mfc_read(dev, S5P_FIMV_D_DISPLAY_LUMA_ADDR_V6);
1766}
1767
1768int s5p_mfc_get_dspl_status_v6(struct s5p_mfc_dev *dev)
1769{
1770 return mfc_read(dev, S5P_FIMV_D_DISPLAY_STATUS_V6);
1771}
1772
1773int s5p_mfc_get_decoded_status_v6(struct s5p_mfc_dev *dev)
1774{
1775 return mfc_read(dev, S5P_FIMV_D_DECODED_STATUS_V6);
1776}
1777
1778int s5p_mfc_get_dec_frame_type_v6(struct s5p_mfc_dev *dev)
1779{
1780 return mfc_read(dev, S5P_FIMV_D_DECODED_FRAME_TYPE_V6) &
1781 S5P_FIMV_DECODE_FRAME_MASK_V6;
1782}
1783
1784int s5p_mfc_get_disp_frame_type_v6(struct s5p_mfc_ctx *ctx)
1785{
1786 return mfc_read(ctx->dev, S5P_FIMV_D_DISPLAY_FRAME_TYPE_V6) &
1787 S5P_FIMV_DECODE_FRAME_MASK_V6;
1788}
1789
1790int s5p_mfc_get_consumed_stream_v6(struct s5p_mfc_dev *dev)
1791{
1792 return mfc_read(dev, S5P_FIMV_D_DECODED_NAL_SIZE_V6);
1793}
1794
1795int s5p_mfc_get_int_reason_v6(struct s5p_mfc_dev *dev)
1796{
1797 return mfc_read(dev, S5P_FIMV_RISC2HOST_CMD_V6) &
1798 S5P_FIMV_RISC2HOST_CMD_MASK;
1799}
1800
1801int s5p_mfc_get_int_err_v6(struct s5p_mfc_dev *dev)
1802{
1803 return mfc_read(dev, S5P_FIMV_ERROR_CODE_V6);
1804}
1805
1806int s5p_mfc_err_dec_v6(unsigned int err)
1807{
1808 return (err & S5P_FIMV_ERR_DEC_MASK_V6) >> S5P_FIMV_ERR_DEC_SHIFT_V6;
1809}
1810
1811int s5p_mfc_err_dspl_v6(unsigned int err)
1812{
1813 return (err & S5P_FIMV_ERR_DSPL_MASK_V6) >> S5P_FIMV_ERR_DSPL_SHIFT_V6;
1814}
1815
1816int s5p_mfc_get_img_width_v6(struct s5p_mfc_dev *dev)
1817{
1818 return mfc_read(dev, S5P_FIMV_D_DISPLAY_FRAME_WIDTH_V6);
1819}
1820
1821int s5p_mfc_get_img_height_v6(struct s5p_mfc_dev *dev)
1822{
1823 return mfc_read(dev, S5P_FIMV_D_DISPLAY_FRAME_HEIGHT_V6);
1824}
1825
1826int s5p_mfc_get_dpb_count_v6(struct s5p_mfc_dev *dev)
1827{
1828 return mfc_read(dev, S5P_FIMV_D_MIN_NUM_DPB_V6);
1829}
1830
1831int s5p_mfc_get_mv_count_v6(struct s5p_mfc_dev *dev)
1832{
1833 return mfc_read(dev, S5P_FIMV_D_MIN_NUM_MV_V6);
1834}
1835
1836int s5p_mfc_get_inst_no_v6(struct s5p_mfc_dev *dev)
1837{
1838 return mfc_read(dev, S5P_FIMV_RET_INSTANCE_ID_V6);
1839}
1840
1841int s5p_mfc_get_enc_dpb_count_v6(struct s5p_mfc_dev *dev)
1842{
1843 return mfc_read(dev, S5P_FIMV_E_NUM_DPB_V6);
1844}
1845
1846int s5p_mfc_get_enc_strm_size_v6(struct s5p_mfc_dev *dev)
1847{
1848 return mfc_read(dev, S5P_FIMV_E_STREAM_SIZE_V6);
1849}
1850
1851int s5p_mfc_get_enc_slice_type_v6(struct s5p_mfc_dev *dev)
1852{
1853 return mfc_read(dev, S5P_FIMV_E_SLICE_TYPE_V6);
1854}
1855
1856int s5p_mfc_get_enc_pic_count_v6(struct s5p_mfc_dev *dev)
1857{
1858 return mfc_read(dev, S5P_FIMV_E_PICTURE_COUNT_V6);
1859}
1860
1861int s5p_mfc_get_sei_avail_status_v6(struct s5p_mfc_ctx *ctx)
1862{
1863 return mfc_read(ctx->dev, S5P_FIMV_D_FRAME_PACK_SEI_AVAIL_V6);
1864}
1865
1866int s5p_mfc_get_mvc_num_views_v6(struct s5p_mfc_dev *dev)
1867{
1868 return mfc_read(dev, S5P_FIMV_D_MVC_NUM_VIEWS_V6);
1869}
1870
1871int s5p_mfc_get_mvc_view_id_v6(struct s5p_mfc_dev *dev)
1872{
1873 return mfc_read(dev, S5P_FIMV_D_MVC_VIEW_ID_V6);
1874}
1875
1876unsigned int s5p_mfc_get_pic_type_top_v6(struct s5p_mfc_ctx *ctx)
1877{
1878 return s5p_mfc_read_info_v6(ctx, PIC_TIME_TOP_V6);
1879}
1880
1881unsigned int s5p_mfc_get_pic_type_bot_v6(struct s5p_mfc_ctx *ctx)
1882{
1883 return s5p_mfc_read_info_v6(ctx, PIC_TIME_BOT_V6);
1884}
1885
1886unsigned int s5p_mfc_get_crop_info_h_v6(struct s5p_mfc_ctx *ctx)
1887{
1888 return s5p_mfc_read_info_v6(ctx, CROP_INFO_H_V6);
1889}
1890
1891unsigned int s5p_mfc_get_crop_info_v_v6(struct s5p_mfc_ctx *ctx)
1892{
1893 return s5p_mfc_read_info_v6(ctx, CROP_INFO_V_V6);
1894}
1895
1896/* Initialize opr function pointers for MFC v6 */
1897static struct s5p_mfc_hw_ops s5p_mfc_ops_v6 = {
1898 .alloc_dec_temp_buffers = s5p_mfc_alloc_dec_temp_buffers_v6,
1899 .release_dec_desc_buffer = s5p_mfc_release_dec_desc_buffer_v6,
1900 .alloc_codec_buffers = s5p_mfc_alloc_codec_buffers_v6,
1901 .release_codec_buffers = s5p_mfc_release_codec_buffers_v6,
1902 .alloc_instance_buffer = s5p_mfc_alloc_instance_buffer_v6,
1903 .release_instance_buffer = s5p_mfc_release_instance_buffer_v6,
1904 .alloc_dev_context_buffer =
1905 s5p_mfc_alloc_dev_context_buffer_v6,
1906 .release_dev_context_buffer =
1907 s5p_mfc_release_dev_context_buffer_v6,
1908 .dec_calc_dpb_size = s5p_mfc_dec_calc_dpb_size_v6,
1909 .enc_calc_src_size = s5p_mfc_enc_calc_src_size_v6,
1910 .set_dec_stream_buffer = s5p_mfc_set_dec_stream_buffer_v6,
1911 .set_dec_frame_buffer = s5p_mfc_set_dec_frame_buffer_v6,
1912 .set_enc_stream_buffer = s5p_mfc_set_enc_stream_buffer_v6,
1913 .set_enc_frame_buffer = s5p_mfc_set_enc_frame_buffer_v6,
1914 .get_enc_frame_buffer = s5p_mfc_get_enc_frame_buffer_v6,
1915 .set_enc_ref_buffer = s5p_mfc_set_enc_ref_buffer_v6,
1916 .init_decode = s5p_mfc_init_decode_v6,
1917 .init_encode = s5p_mfc_init_encode_v6,
1918 .encode_one_frame = s5p_mfc_encode_one_frame_v6,
1919 .try_run = s5p_mfc_try_run_v6,
1920 .cleanup_queue = s5p_mfc_cleanup_queue_v6,
1921 .clear_int_flags = s5p_mfc_clear_int_flags_v6,
1922 .write_info = s5p_mfc_write_info_v6,
1923 .read_info = s5p_mfc_read_info_v6,
1924 .get_dspl_y_adr = s5p_mfc_get_dspl_y_adr_v6,
1925 .get_dec_y_adr = s5p_mfc_get_dec_y_adr_v6,
1926 .get_dspl_status = s5p_mfc_get_dspl_status_v6,
1927 .get_dec_status = s5p_mfc_get_dec_status_v6,
1928 .get_dec_frame_type = s5p_mfc_get_dec_frame_type_v6,
1929 .get_disp_frame_type = s5p_mfc_get_disp_frame_type_v6,
1930 .get_consumed_stream = s5p_mfc_get_consumed_stream_v6,
1931 .get_int_reason = s5p_mfc_get_int_reason_v6,
1932 .get_int_err = s5p_mfc_get_int_err_v6,
1933 .err_dec = s5p_mfc_err_dec_v6,
1934 .err_dspl = s5p_mfc_err_dspl_v6,
1935 .get_img_width = s5p_mfc_get_img_width_v6,
1936 .get_img_height = s5p_mfc_get_img_height_v6,
1937 .get_dpb_count = s5p_mfc_get_dpb_count_v6,
1938 .get_mv_count = s5p_mfc_get_mv_count_v6,
1939 .get_inst_no = s5p_mfc_get_inst_no_v6,
1940 .get_enc_strm_size = s5p_mfc_get_enc_strm_size_v6,
1941 .get_enc_slice_type = s5p_mfc_get_enc_slice_type_v6,
1942 .get_enc_dpb_count = s5p_mfc_get_enc_dpb_count_v6,
1943 .get_enc_pic_count = s5p_mfc_get_enc_pic_count_v6,
1944 .get_sei_avail_status = s5p_mfc_get_sei_avail_status_v6,
1945 .get_mvc_num_views = s5p_mfc_get_mvc_num_views_v6,
1946 .get_mvc_view_id = s5p_mfc_get_mvc_view_id_v6,
1947 .get_pic_type_top = s5p_mfc_get_pic_type_top_v6,
1948 .get_pic_type_bot = s5p_mfc_get_pic_type_bot_v6,
1949 .get_crop_info_h = s5p_mfc_get_crop_info_h_v6,
1950 .get_crop_info_v = s5p_mfc_get_crop_info_v_v6,
1951};
1952
1953struct s5p_mfc_hw_ops *s5p_mfc_init_hw_ops_v6(void)
1954{
1955 return &s5p_mfc_ops_v6;
1956}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.h b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.h
new file mode 100644
index 000000000000..ab164efa127e
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.h
@@ -0,0 +1,50 @@
1/*
2 * drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.h
3 *
4 * Header file for Samsung MFC (Multi Function Codec - FIMV) driver
5 * Contains declarations of hw related functions.
6 *
7 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
8 * http://www.samsung.com/
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#ifndef S5P_MFC_OPR_V6_H_
16#define S5P_MFC_OPR_V6_H_
17
18#include "s5p_mfc_common.h"
19#include "s5p_mfc_opr.h"
20
21#define MFC_CTRL_MODE_CUSTOM MFC_CTRL_MODE_SFR
22
23#define MB_WIDTH(x_size) DIV_ROUND_UP(x_size, 16)
24#define MB_HEIGHT(y_size) DIV_ROUND_UP(y_size, 16)
25#define S5P_MFC_DEC_MV_SIZE_V6(x, y) (MB_WIDTH(x) * \
26 (((MB_HEIGHT(y)+1)/2)*2) * 64 + 128)
27
28/* Definition */
29#define ENC_MULTI_SLICE_MB_MAX ((1 << 30) - 1)
30#define ENC_MULTI_SLICE_BIT_MIN 2800
31#define ENC_INTRA_REFRESH_MB_MAX ((1 << 18) - 1)
32#define ENC_VBV_BUF_SIZE_MAX ((1 << 30) - 1)
33#define ENC_H264_LOOP_FILTER_AB_MIN -12
34#define ENC_H264_LOOP_FILTER_AB_MAX 12
35#define ENC_H264_RC_FRAME_RATE_MAX ((1 << 16) - 1)
36#define ENC_H263_RC_FRAME_RATE_MAX ((1 << 16) - 1)
37#define ENC_H264_PROFILE_MAX 3
38#define ENC_H264_LEVEL_MAX 42
39#define ENC_MPEG4_VOP_TIME_RES_MAX ((1 << 16) - 1)
40#define FRAME_DELTA_H264_H263 1
41#define TIGHT_CBR_MAX 10
42
43/* Definitions for shared memory compatibility */
44#define PIC_TIME_TOP_V6 S5P_FIMV_D_RET_PICTURE_TAG_TOP_V6
45#define PIC_TIME_BOT_V6 S5P_FIMV_D_RET_PICTURE_TAG_BOT_V6
46#define CROP_INFO_H_V6 S5P_FIMV_D_DISPLAY_CROP_INFO1_V6
47#define CROP_INFO_V_V6 S5P_FIMV_D_DISPLAY_CROP_INFO2_V6
48
49struct s5p_mfc_hw_ops *s5p_mfc_init_hw_ops_v6(void);
50#endif /* S5P_MFC_OPR_V6_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
index 0503d14ac94e..367db7552289 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -20,7 +20,6 @@
20#include "s5p_mfc_debug.h" 20#include "s5p_mfc_debug.h"
21#include "s5p_mfc_pm.h" 21#include "s5p_mfc_pm.h"
22 22
23#define MFC_CLKNAME "sclk_mfc"
24#define MFC_GATE_CLK_NAME "mfc" 23#define MFC_GATE_CLK_NAME "mfc"
25 24
26#define CLK_DEBUG 25#define CLK_DEBUG
@@ -51,7 +50,7 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
51 goto err_p_ip_clk; 50 goto err_p_ip_clk;
52 } 51 }
53 52
54 pm->clock = clk_get(&dev->plat_dev->dev, MFC_CLKNAME); 53 pm->clock = clk_get(&dev->plat_dev->dev, dev->variant->mclk_name);
55 if (IS_ERR(pm->clock)) { 54 if (IS_ERR(pm->clock)) {
56 mfc_err("Failed to get MFC clock\n"); 55 mfc_err("Failed to get MFC clock\n");
57 ret = PTR_ERR(pm->clock); 56 ret = PTR_ERR(pm->clock);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_shm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_shm.c
deleted file mode 100644
index b5933d233a4b..000000000000
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_shm.c
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 * linux/drivers/media/platform/s5p-mfc/s5p_mfc_shm.c
3 *
4 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#ifdef CONFIG_ARCH_EXYNOS4
14#include <linux/dma-mapping.h>
15#endif
16#include <linux/io.h>
17#include "s5p_mfc_common.h"
18#include "s5p_mfc_debug.h"
19
20int s5p_mfc_init_shm(struct s5p_mfc_ctx *ctx)
21{
22 struct s5p_mfc_dev *dev = ctx->dev;
23 void *shm_alloc_ctx = dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
24
25 ctx->shm_alloc = vb2_dma_contig_memops.alloc(shm_alloc_ctx,
26 SHARED_BUF_SIZE);
27 if (IS_ERR(ctx->shm_alloc)) {
28 mfc_err("failed to allocate shared memory\n");
29 return PTR_ERR(ctx->shm_alloc);
30 }
31 /* shm_ofs only keeps the offset from base (port a) */
32 ctx->shm_ofs = s5p_mfc_mem_cookie(shm_alloc_ctx, ctx->shm_alloc)
33 - dev->bank1;
34 BUG_ON(ctx->shm_ofs & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
35 ctx->shm = vb2_dma_contig_memops.vaddr(ctx->shm_alloc);
36 if (!ctx->shm) {
37 vb2_dma_contig_memops.put(ctx->shm_alloc);
38 ctx->shm_ofs = 0;
39 ctx->shm_alloc = NULL;
40 mfc_err("failed to virt addr of shared memory\n");
41 return -ENOMEM;
42 }
43 memset((void *)ctx->shm, 0, SHARED_BUF_SIZE);
44 wmb();
45 return 0;
46}
47
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c
index 403d7f17bfab..9fd9d1c5b218 100644
--- a/drivers/media/platform/soc_camera/mx2_camera.c
+++ b/drivers/media/platform/soc_camera/mx2_camera.c
@@ -1376,6 +1376,7 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
1376 __u32 pixfmt = pix->pixelformat; 1376 __u32 pixfmt = pix->pixelformat;
1377 struct soc_camera_host *ici = to_soc_camera_host(icd->parent); 1377 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
1378 struct mx2_camera_dev *pcdev = ici->priv; 1378 struct mx2_camera_dev *pcdev = ici->priv;
1379 struct mx2_fmt_cfg *emma_prp;
1379 unsigned int width_limit; 1380 unsigned int width_limit;
1380 int ret; 1381 int ret;
1381 1382
@@ -1438,12 +1439,11 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
1438 __func__, pcdev->s_width, pcdev->s_height); 1439 __func__, pcdev->s_width, pcdev->s_height);
1439 1440
1440 /* If the sensor does not support image size try PrP resizing */ 1441 /* If the sensor does not support image size try PrP resizing */
1441 pcdev->emma_prp = mx27_emma_prp_get_format(xlate->code, 1442 emma_prp = mx27_emma_prp_get_format(xlate->code,
1442 xlate->host_fmt->fourcc); 1443 xlate->host_fmt->fourcc);
1443 1444
1444 memset(pcdev->resizing, 0, sizeof(pcdev->resizing));
1445 if ((mf.width != pix->width || mf.height != pix->height) && 1445 if ((mf.width != pix->width || mf.height != pix->height) &&
1446 pcdev->emma_prp->cfg.in_fmt == PRP_CNTL_DATA_IN_YUV422) { 1446 emma_prp->cfg.in_fmt == PRP_CNTL_DATA_IN_YUV422) {
1447 if (mx2_emmaprp_resize(pcdev, &mf, pix, false) < 0) 1447 if (mx2_emmaprp_resize(pcdev, &mf, pix, false) < 0)
1448 dev_dbg(icd->parent, "%s: can't resize\n", __func__); 1448 dev_dbg(icd->parent, "%s: can't resize\n", __func__);
1449 } 1449 }
@@ -1655,6 +1655,7 @@ static int __devinit mx27_camera_emma_init(struct platform_device *pdev)
1655 irq_emma = platform_get_irq(pdev, 1); 1655 irq_emma = platform_get_irq(pdev, 1);
1656 if (!res_emma || !irq_emma) { 1656 if (!res_emma || !irq_emma) {
1657 dev_err(pcdev->dev, "no EMMA resources\n"); 1657 dev_err(pcdev->dev, "no EMMA resources\n");
1658 err = -ENODEV;
1658 goto out; 1659 goto out;
1659 } 1660 }
1660 1661
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 3be92944f8e7..d3f0b84e2d70 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -950,11 +950,11 @@ static int soc_camera_s_selection(struct file *file, void *fh,
950 950
951 /* In all these cases cropping emulation will not help */ 951 /* In all these cases cropping emulation will not help */
952 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || 952 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
953 (s->target != V4L2_SEL_TGT_COMPOSE_ACTIVE && 953 (s->target != V4L2_SEL_TGT_COMPOSE &&
954 s->target != V4L2_SEL_TGT_CROP_ACTIVE)) 954 s->target != V4L2_SEL_TGT_CROP))
955 return -EINVAL; 955 return -EINVAL;
956 956
957 if (s->target == V4L2_SEL_TGT_COMPOSE_ACTIVE) { 957 if (s->target == V4L2_SEL_TGT_COMPOSE) {
958 /* No output size change during a running capture! */ 958 /* No output size change during a running capture! */
959 if (is_streaming(ici, icd) && 959 if (is_streaming(ici, icd) &&
960 (icd->user_width != s->r.width || 960 (icd->user_width != s->r.width ||
@@ -974,7 +974,7 @@ static int soc_camera_s_selection(struct file *file, void *fh,
974 974
975 ret = ici->ops->set_selection(icd, s); 975 ret = ici->ops->set_selection(icd, s);
976 if (!ret && 976 if (!ret &&
977 s->target == V4L2_SEL_TGT_COMPOSE_ACTIVE) { 977 s->target == V4L2_SEL_TGT_COMPOSE) {
978 icd->user_width = s->r.width; 978 icd->user_width = s->r.width;
979 icd->user_height = s->r.height; 979 icd->user_height = s->r.height;
980 if (!icd->streamer) 980 if (!icd->streamer)
@@ -1184,7 +1184,8 @@ static int soc_camera_probe(struct soc_camera_device *icd)
1184 sd->grp_id = soc_camera_grp_id(icd); 1184 sd->grp_id = soc_camera_grp_id(icd);
1185 v4l2_set_subdev_hostdata(sd, icd); 1185 v4l2_set_subdev_hostdata(sd, icd);
1186 1186
1187 if (v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler, NULL)) 1187 ret = v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler, NULL);
1188 if (ret < 0)
1188 goto ectrl; 1189 goto ectrl;
1189 1190
1190 /* At this point client .probe() should have run already */ 1191 /* At this point client .probe() should have run already */
@@ -1529,12 +1530,11 @@ static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev)
1529{ 1530{
1530 struct soc_camera_link *icl = pdev->dev.platform_data; 1531 struct soc_camera_link *icl = pdev->dev.platform_data;
1531 struct soc_camera_device *icd; 1532 struct soc_camera_device *icd;
1532 int ret;
1533 1533
1534 if (!icl) 1534 if (!icl)
1535 return -EINVAL; 1535 return -EINVAL;
1536 1536
1537 icd = kzalloc(sizeof(*icd), GFP_KERNEL); 1537 icd = devm_kzalloc(&pdev->dev, sizeof(*icd), GFP_KERNEL);
1538 if (!icd) 1538 if (!icd)
1539 return -ENOMEM; 1539 return -ENOMEM;
1540 1540
@@ -1543,19 +1543,10 @@ static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev)
1543 icd->pdev = &pdev->dev; 1543 icd->pdev = &pdev->dev;
1544 platform_set_drvdata(pdev, icd); 1544 platform_set_drvdata(pdev, icd);
1545 1545
1546 ret = soc_camera_device_register(icd);
1547 if (ret < 0)
1548 goto escdevreg;
1549
1550 icd->user_width = DEFAULT_WIDTH; 1546 icd->user_width = DEFAULT_WIDTH;
1551 icd->user_height = DEFAULT_HEIGHT; 1547 icd->user_height = DEFAULT_HEIGHT;
1552 1548
1553 return 0; 1549 return soc_camera_device_register(icd);
1554
1555escdevreg:
1556 kfree(icd);
1557
1558 return ret;
1559} 1550}
1560 1551
1561/* 1552/*
@@ -1572,8 +1563,6 @@ static int __devexit soc_camera_pdrv_remove(struct platform_device *pdev)
1572 1563
1573 list_del(&icd->list); 1564 list_del(&icd->list);
1574 1565
1575 kfree(icd);
1576
1577 return 0; 1566 return 0;
1578} 1567}
1579 1568
@@ -1586,18 +1575,7 @@ static struct platform_driver __refdata soc_camera_pdrv = {
1586 }, 1575 },
1587}; 1576};
1588 1577
1589static int __init soc_camera_init(void) 1578module_platform_driver(soc_camera_pdrv);
1590{
1591 return platform_driver_register(&soc_camera_pdrv);
1592}
1593
1594static void __exit soc_camera_exit(void)
1595{
1596 platform_driver_unregister(&soc_camera_pdrv);
1597}
1598
1599module_init(soc_camera_init);
1600module_exit(soc_camera_exit);
1601 1579
1602MODULE_DESCRIPTION("Image capture bus driver"); 1580MODULE_DESCRIPTION("Image capture bus driver");
1603MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>"); 1581MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index e5024cfd27a7..4ef55ec8045e 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -308,7 +308,7 @@ static irqreturn_t si470x_i2c_interrupt(int irq, void *dev_id)
308 READCHAN_BLERD) >> 10; 308 READCHAN_BLERD) >> 10;
309 rds = radio->registers[RDSD]; 309 rds = radio->registers[RDSD];
310 break; 310 break;
311 }; 311 }
312 312
313 /* Fill the V4L2 RDS buffer */ 313 /* Fill the V4L2 RDS buffer */
314 put_unaligned_le16(rds, &tmpbuf); 314 put_unaligned_le16(rds, &tmpbuf);
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index be076f7181e7..62f3edec39bc 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -446,7 +446,7 @@ static void si470x_int_in_callback(struct urb *urb)
446 READCHAN_BLERD) >> 10; 446 READCHAN_BLERD) >> 10;
447 rds = radio->registers[RDSD]; 447 rds = radio->registers[RDSD];
448 break; 448 break;
449 }; 449 }
450 450
451 /* Fill the V4L2 RDS buffer */ 451 /* Fill the V4L2 RDS buffer */
452 put_unaligned_le16(rds, &tmpbuf); 452 put_unaligned_le16(rds, &tmpbuf);
diff --git a/drivers/media/radio/si4713-i2c.c b/drivers/media/radio/si4713-i2c.c
index a9e6d17015ef..e3079c142c5f 100644
--- a/drivers/media/radio/si4713-i2c.c
+++ b/drivers/media/radio/si4713-i2c.c
@@ -1009,7 +1009,7 @@ static int si4713_choose_econtrol_action(struct si4713_device *sdev, u32 id,
1009 1009
1010 default: 1010 default:
1011 rval = -EINVAL; 1011 rval = -EINVAL;
1012 }; 1012 }
1013 1013
1014 return rval; 1014 return rval;
1015} 1015}
@@ -1081,7 +1081,7 @@ static int si4713_write_econtrol_string(struct si4713_device *sdev,
1081 default: 1081 default:
1082 rval = -EINVAL; 1082 rval = -EINVAL;
1083 break; 1083 break;
1084 }; 1084 }
1085 1085
1086exit: 1086exit:
1087 return rval; 1087 return rval;
@@ -1130,7 +1130,7 @@ static int si4713_write_econtrol_tune(struct si4713_device *sdev,
1130 default: 1130 default:
1131 rval = -EINVAL; 1131 rval = -EINVAL;
1132 goto unlock; 1132 goto unlock;
1133 }; 1133 }
1134 1134
1135 if (sdev->power_state) 1135 if (sdev->power_state)
1136 rval = si4713_tx_tune_power(sdev, power, antcap); 1136 rval = si4713_tx_tune_power(sdev, power, antcap);
@@ -1420,7 +1420,7 @@ static int si4713_read_econtrol_string(struct si4713_device *sdev,
1420 default: 1420 default:
1421 rval = -EINVAL; 1421 rval = -EINVAL;
1422 break; 1422 break;
1423 }; 1423 }
1424 1424
1425exit: 1425exit:
1426 return rval; 1426 return rval;
@@ -1473,7 +1473,7 @@ static int si4713_read_econtrol_tune(struct si4713_device *sdev,
1473 break; 1473 break;
1474 default: 1474 default:
1475 rval = -EINVAL; 1475 rval = -EINVAL;
1476 }; 1476 }
1477 1477
1478unlock: 1478unlock:
1479 mutex_unlock(&sdev->mutex); 1479 mutex_unlock(&sdev->mutex);
@@ -1698,7 +1698,7 @@ static int si4713_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
1698 default: 1698 default:
1699 rval = -EINVAL; 1699 rval = -EINVAL;
1700 break; 1700 break;
1701 }; 1701 }
1702 1702
1703 return rval; 1703 return rval;
1704} 1704}
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index 647dd951b0e8..d05ac15b5de4 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -881,10 +881,13 @@ static int ene_set_tx_mask(struct rc_dev *rdev, u32 tx_mask)
881static int ene_set_tx_carrier(struct rc_dev *rdev, u32 carrier) 881static int ene_set_tx_carrier(struct rc_dev *rdev, u32 carrier)
882{ 882{
883 struct ene_device *dev = rdev->priv; 883 struct ene_device *dev = rdev->priv;
884 u32 period = 2000000 / carrier; 884 u32 period;
885 885
886 dbg("TX: attempt to set tx carrier to %d kHz", carrier); 886 dbg("TX: attempt to set tx carrier to %d kHz", carrier);
887 if (carrier == 0)
888 return -EINVAL;
887 889
890 period = 2000000 / carrier;
888 if (period && (period > ENE_CIRMOD_PRD_MAX || 891 if (period && (period > ENE_CIRMOD_PRD_MAX ||
889 period < ENE_CIRMOD_PRD_MIN)) { 892 period < ENE_CIRMOD_PRD_MIN)) {
890 893
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index 1e4c68a5cecf..51d7057aca04 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -28,6 +28,7 @@
28#include <media/rc-core.h> 28#include <media/rc-core.h>
29 29
30#define DRIVER_NAME "iguanair" 30#define DRIVER_NAME "iguanair"
31#define BUF_SIZE 152
31 32
32struct iguanair { 33struct iguanair {
33 struct rc_dev *rc; 34 struct rc_dev *rc;
@@ -35,26 +36,23 @@ struct iguanair {
35 struct device *dev; 36 struct device *dev;
36 struct usb_device *udev; 37 struct usb_device *udev;
37 38
38 int pipe_out;
39 uint16_t version; 39 uint16_t version;
40 uint8_t bufsize; 40 uint8_t bufsize;
41 uint8_t cycle_overhead;
41 42
42 struct mutex lock; 43 struct mutex lock;
43 44
44 /* receiver support */ 45 /* receiver support */
45 bool receiver_on; 46 bool receiver_on;
46 dma_addr_t dma_in; 47 dma_addr_t dma_in, dma_out;
47 uint8_t *buf_in; 48 uint8_t *buf_in;
48 struct urb *urb_in; 49 struct urb *urb_in, *urb_out;
49 struct completion completion; 50 struct completion completion;
50 51
51 /* transmit support */ 52 /* transmit support */
52 bool tx_overflow; 53 bool tx_overflow;
53 uint32_t carrier; 54 uint32_t carrier;
54 uint8_t cycle_overhead; 55 struct send_packet *packet;
55 uint8_t channels;
56 uint8_t busy4;
57 uint8_t busy7;
58 56
59 char name[64]; 57 char name[64];
60 char phys[64]; 58 char phys[64];
@@ -73,7 +71,8 @@ struct iguanair {
73#define DIR_IN 0xdc 71#define DIR_IN 0xdc
74#define DIR_OUT 0xcd 72#define DIR_OUT 0xcd
75 73
76#define MAX_PACKET_SIZE 8u 74#define MAX_IN_PACKET 8u
75#define MAX_OUT_PACKET (sizeof(struct send_packet) + BUF_SIZE)
77#define TIMEOUT 1000 76#define TIMEOUT 1000
78#define RX_RESOLUTION 21333 77#define RX_RESOLUTION 21333
79 78
@@ -191,20 +190,25 @@ static void iguanair_rx(struct urb *urb)
191 dev_warn(ir->dev, "failed to resubmit urb: %d\n", rc); 190 dev_warn(ir->dev, "failed to resubmit urb: %d\n", rc);
192} 191}
193 192
194static int iguanair_send(struct iguanair *ir, void *data, unsigned size) 193static void iguanair_irq_out(struct urb *urb)
195{ 194{
196 int rc, transferred; 195 struct iguanair *ir = urb->context;
196
197 if (urb->status)
198 dev_dbg(ir->dev, "Error: out urb status = %d\n", urb->status);
199}
200
201static int iguanair_send(struct iguanair *ir, unsigned size)
202{
203 int rc;
197 204
198 INIT_COMPLETION(ir->completion); 205 INIT_COMPLETION(ir->completion);
199 206
200 rc = usb_interrupt_msg(ir->udev, ir->pipe_out, data, size, 207 ir->urb_out->transfer_buffer_length = size;
201 &transferred, TIMEOUT); 208 rc = usb_submit_urb(ir->urb_out, GFP_KERNEL);
202 if (rc) 209 if (rc)
203 return rc; 210 return rc;
204 211
205 if (transferred != size)
206 return -EIO;
207
208 if (wait_for_completion_timeout(&ir->completion, TIMEOUT) == 0) 212 if (wait_for_completion_timeout(&ir->completion, TIMEOUT) == 0)
209 return -ETIMEDOUT; 213 return -ETIMEDOUT;
210 214
@@ -213,14 +217,13 @@ static int iguanair_send(struct iguanair *ir, void *data, unsigned size)
213 217
214static int iguanair_get_features(struct iguanair *ir) 218static int iguanair_get_features(struct iguanair *ir)
215{ 219{
216 struct packet packet;
217 int rc; 220 int rc;
218 221
219 packet.start = 0; 222 ir->packet->header.start = 0;
220 packet.direction = DIR_OUT; 223 ir->packet->header.direction = DIR_OUT;
221 packet.cmd = CMD_GET_VERSION; 224 ir->packet->header.cmd = CMD_GET_VERSION;
222 225
223 rc = iguanair_send(ir, &packet, sizeof(packet)); 226 rc = iguanair_send(ir, sizeof(ir->packet->header));
224 if (rc) { 227 if (rc) {
225 dev_info(ir->dev, "failed to get version\n"); 228 dev_info(ir->dev, "failed to get version\n");
226 goto out; 229 goto out;
@@ -235,17 +238,23 @@ static int iguanair_get_features(struct iguanair *ir)
235 ir->bufsize = 150; 238 ir->bufsize = 150;
236 ir->cycle_overhead = 65; 239 ir->cycle_overhead = 65;
237 240
238 packet.cmd = CMD_GET_BUFSIZE; 241 ir->packet->header.cmd = CMD_GET_BUFSIZE;
239 242
240 rc = iguanair_send(ir, &packet, sizeof(packet)); 243 rc = iguanair_send(ir, sizeof(ir->packet->header));
241 if (rc) { 244 if (rc) {
242 dev_info(ir->dev, "failed to get buffer size\n"); 245 dev_info(ir->dev, "failed to get buffer size\n");
243 goto out; 246 goto out;
244 } 247 }
245 248
246 packet.cmd = CMD_GET_FEATURES; 249 if (ir->bufsize > BUF_SIZE) {
250 dev_info(ir->dev, "buffer size %u larger than expected\n",
251 ir->bufsize);
252 ir->bufsize = BUF_SIZE;
253 }
254
255 ir->packet->header.cmd = CMD_GET_FEATURES;
247 256
248 rc = iguanair_send(ir, &packet, sizeof(packet)); 257 rc = iguanair_send(ir, sizeof(ir->packet->header));
249 if (rc) { 258 if (rc) {
250 dev_info(ir->dev, "failed to get features\n"); 259 dev_info(ir->dev, "failed to get features\n");
251 goto out; 260 goto out;
@@ -257,13 +266,18 @@ out:
257 266
258static int iguanair_receiver(struct iguanair *ir, bool enable) 267static int iguanair_receiver(struct iguanair *ir, bool enable)
259{ 268{
260 struct packet packet = { 0, DIR_OUT, enable ? 269 int rc;
261 CMD_RECEIVER_ON : CMD_RECEIVER_OFF }; 270
271 ir->packet->header.start = 0;
272 ir->packet->header.direction = DIR_OUT;
273 ir->packet->header.cmd = enable ? CMD_RECEIVER_ON : CMD_RECEIVER_OFF;
262 274
263 if (enable) 275 if (enable)
264 ir_raw_event_reset(ir->rc); 276 ir_raw_event_reset(ir->rc);
265 277
266 return iguanair_send(ir, &packet, sizeof(packet)); 278 rc = iguanair_send(ir, sizeof(ir->packet->header));
279
280 return rc;
267} 281}
268 282
269/* 283/*
@@ -308,8 +322,8 @@ static int iguanair_set_tx_carrier(struct rc_dev *dev, uint32_t carrier)
308 fours = (cycles - sevens * 7) / 4; 322 fours = (cycles - sevens * 7) / 4;
309 323
310 /* magic happens here */ 324 /* magic happens here */
311 ir->busy7 = (4 - sevens) * 2; 325 ir->packet->busy7 = (4 - sevens) * 2;
312 ir->busy4 = 110 - fours; 326 ir->packet->busy4 = 110 - fours;
313 } 327 }
314 328
315 mutex_unlock(&ir->lock); 329 mutex_unlock(&ir->lock);
@@ -325,7 +339,7 @@ static int iguanair_set_tx_mask(struct rc_dev *dev, uint32_t mask)
325 return 4; 339 return 4;
326 340
327 mutex_lock(&ir->lock); 341 mutex_lock(&ir->lock);
328 ir->channels = mask; 342 ir->packet->channels = mask << 4;
329 mutex_unlock(&ir->lock); 343 mutex_unlock(&ir->lock);
330 344
331 return 0; 345 return 0;
@@ -337,16 +351,9 @@ static int iguanair_tx(struct rc_dev *dev, unsigned *txbuf, unsigned count)
337 uint8_t space; 351 uint8_t space;
338 unsigned i, size, periods, bytes; 352 unsigned i, size, periods, bytes;
339 int rc; 353 int rc;
340 struct send_packet *packet;
341 354
342 mutex_lock(&ir->lock); 355 mutex_lock(&ir->lock);
343 356
344 packet = kmalloc(sizeof(*packet) + ir->bufsize, GFP_KERNEL);
345 if (!packet) {
346 rc = -ENOMEM;
347 goto out;
348 }
349
350 /* convert from us to carrier periods */ 357 /* convert from us to carrier periods */
351 for (i = space = size = 0; i < count; i++) { 358 for (i = space = size = 0; i < count; i++) {
352 periods = DIV_ROUND_CLOSEST(txbuf[i] * ir->carrier, 1000000); 359 periods = DIV_ROUND_CLOSEST(txbuf[i] * ir->carrier, 1000000);
@@ -356,11 +363,11 @@ static int iguanair_tx(struct rc_dev *dev, unsigned *txbuf, unsigned count)
356 break; 363 break;
357 } 364 }
358 while (periods > 127) { 365 while (periods > 127) {
359 packet->payload[size++] = 127 | space; 366 ir->packet->payload[size++] = 127 | space;
360 periods -= 127; 367 periods -= 127;
361 } 368 }
362 369
363 packet->payload[size++] = periods | space; 370 ir->packet->payload[size++] = periods | space;
364 space ^= 0x80; 371 space ^= 0x80;
365 } 372 }
366 373
@@ -369,36 +376,19 @@ static int iguanair_tx(struct rc_dev *dev, unsigned *txbuf, unsigned count)
369 goto out; 376 goto out;
370 } 377 }
371 378
372 packet->header.start = 0; 379 ir->packet->header.start = 0;
373 packet->header.direction = DIR_OUT; 380 ir->packet->header.direction = DIR_OUT;
374 packet->header.cmd = CMD_SEND; 381 ir->packet->header.cmd = CMD_SEND;
375 packet->length = size; 382 ir->packet->length = size;
376 packet->channels = ir->channels << 4;
377 packet->busy7 = ir->busy7;
378 packet->busy4 = ir->busy4;
379
380 if (ir->receiver_on) {
381 rc = iguanair_receiver(ir, false);
382 if (rc) {
383 dev_warn(ir->dev, "disable receiver before transmit failed\n");
384 goto out;
385 }
386 }
387 383
388 ir->tx_overflow = false; 384 ir->tx_overflow = false;
389 385
390 rc = iguanair_send(ir, packet, size + 8); 386 rc = iguanair_send(ir, sizeof(*ir->packet) + size);
391 387
392 if (rc == 0 && ir->tx_overflow) 388 if (rc == 0 && ir->tx_overflow)
393 rc = -EOVERFLOW; 389 rc = -EOVERFLOW;
394 390
395 if (ir->receiver_on) {
396 if (iguanair_receiver(ir, true))
397 dev_warn(ir->dev, "re-enable receiver after transmit failed\n");
398 }
399
400out: 391out:
401 kfree(packet);
402 mutex_unlock(&ir->lock); 392 mutex_unlock(&ir->lock);
403 393
404 return rc ? rc : count; 394 return rc ? rc : count;
@@ -411,8 +401,6 @@ static int iguanair_open(struct rc_dev *rdev)
411 401
412 mutex_lock(&ir->lock); 402 mutex_lock(&ir->lock);
413 403
414 BUG_ON(ir->receiver_on);
415
416 rc = iguanair_receiver(ir, true); 404 rc = iguanair_receiver(ir, true);
417 if (rc == 0) 405 if (rc == 0)
418 ir->receiver_on = true; 406 ir->receiver_on = true;
@@ -443,7 +431,7 @@ static int __devinit iguanair_probe(struct usb_interface *intf,
443 struct usb_device *udev = interface_to_usbdev(intf); 431 struct usb_device *udev = interface_to_usbdev(intf);
444 struct iguanair *ir; 432 struct iguanair *ir;
445 struct rc_dev *rc; 433 struct rc_dev *rc;
446 int ret, pipein; 434 int ret, pipein, pipeout;
447 struct usb_host_interface *idesc; 435 struct usb_host_interface *idesc;
448 436
449 ir = kzalloc(sizeof(*ir), GFP_KERNEL); 437 ir = kzalloc(sizeof(*ir), GFP_KERNEL);
@@ -453,11 +441,14 @@ static int __devinit iguanair_probe(struct usb_interface *intf,
453 goto out; 441 goto out;
454 } 442 }
455 443
456 ir->buf_in = usb_alloc_coherent(udev, MAX_PACKET_SIZE, GFP_KERNEL, 444 ir->buf_in = usb_alloc_coherent(udev, MAX_IN_PACKET, GFP_KERNEL,
457 &ir->dma_in); 445 &ir->dma_in);
446 ir->packet = usb_alloc_coherent(udev, MAX_OUT_PACKET, GFP_KERNEL,
447 &ir->dma_out);
458 ir->urb_in = usb_alloc_urb(0, GFP_KERNEL); 448 ir->urb_in = usb_alloc_urb(0, GFP_KERNEL);
449 ir->urb_out = usb_alloc_urb(0, GFP_KERNEL);
459 450
460 if (!ir->buf_in || !ir->urb_in) { 451 if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out) {
461 ret = -ENOMEM; 452 ret = -ENOMEM;
462 goto out; 453 goto out;
463 } 454 }
@@ -472,13 +463,18 @@ static int __devinit iguanair_probe(struct usb_interface *intf,
472 ir->rc = rc; 463 ir->rc = rc;
473 ir->dev = &intf->dev; 464 ir->dev = &intf->dev;
474 ir->udev = udev; 465 ir->udev = udev;
475 ir->pipe_out = usb_sndintpipe(udev,
476 idesc->endpoint[1].desc.bEndpointAddress);
477 mutex_init(&ir->lock); 466 mutex_init(&ir->lock);
467
478 init_completion(&ir->completion); 468 init_completion(&ir->completion);
469 pipeout = usb_sndintpipe(udev,
470 idesc->endpoint[1].desc.bEndpointAddress);
471 usb_fill_int_urb(ir->urb_out, udev, pipeout, ir->packet, MAX_OUT_PACKET,
472 iguanair_irq_out, ir, 1);
473 ir->urb_out->transfer_dma = ir->dma_out;
474 ir->urb_out->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
479 475
480 pipein = usb_rcvintpipe(udev, idesc->endpoint[0].desc.bEndpointAddress); 476 pipein = usb_rcvintpipe(udev, idesc->endpoint[0].desc.bEndpointAddress);
481 usb_fill_int_urb(ir->urb_in, udev, pipein, ir->buf_in, MAX_PACKET_SIZE, 477 usb_fill_int_urb(ir->urb_in, udev, pipein, ir->buf_in, MAX_IN_PACKET,
482 iguanair_rx, ir, 1); 478 iguanair_rx, ir, 1);
483 ir->urb_in->transfer_dma = ir->dma_in; 479 ir->urb_in->transfer_dma = ir->dma_in;
484 ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 480 ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -528,11 +524,14 @@ static int __devinit iguanair_probe(struct usb_interface *intf,
528 return 0; 524 return 0;
529out2: 525out2:
530 usb_kill_urb(ir->urb_in); 526 usb_kill_urb(ir->urb_in);
527 usb_kill_urb(ir->urb_out);
531out: 528out:
532 if (ir) { 529 if (ir) {
533 usb_free_urb(ir->urb_in); 530 usb_free_urb(ir->urb_in);
534 usb_free_coherent(udev, MAX_PACKET_SIZE, ir->buf_in, 531 usb_free_urb(ir->urb_out);
535 ir->dma_in); 532 usb_free_coherent(udev, MAX_IN_PACKET, ir->buf_in, ir->dma_in);
533 usb_free_coherent(udev, MAX_OUT_PACKET, ir->packet,
534 ir->dma_out);
536 } 535 }
537 rc_free_device(rc); 536 rc_free_device(rc);
538 kfree(ir); 537 kfree(ir);
@@ -546,8 +545,11 @@ static void __devexit iguanair_disconnect(struct usb_interface *intf)
546 rc_unregister_device(ir->rc); 545 rc_unregister_device(ir->rc);
547 usb_set_intfdata(intf, NULL); 546 usb_set_intfdata(intf, NULL);
548 usb_kill_urb(ir->urb_in); 547 usb_kill_urb(ir->urb_in);
548 usb_kill_urb(ir->urb_out);
549 usb_free_urb(ir->urb_in); 549 usb_free_urb(ir->urb_in);
550 usb_free_coherent(ir->udev, MAX_PACKET_SIZE, ir->buf_in, ir->dma_in); 550 usb_free_urb(ir->urb_out);
551 usb_free_coherent(ir->udev, MAX_IN_PACKET, ir->buf_in, ir->dma_in);
552 usb_free_coherent(ir->udev, MAX_OUT_PACKET, ir->packet, ir->dma_out);
551 kfree(ir); 553 kfree(ir);
552} 554}
553 555
@@ -565,6 +567,7 @@ static int iguanair_suspend(struct usb_interface *intf, pm_message_t message)
565 } 567 }
566 568
567 usb_kill_urb(ir->urb_in); 569 usb_kill_urb(ir->urb_in);
570 usb_kill_urb(ir->urb_out);
568 571
569 mutex_unlock(&ir->lock); 572 mutex_unlock(&ir->lock);
570 573
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index 569124b03de3..870c93052fd0 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -203,13 +203,13 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
203 /* TX settings */ 203 /* TX settings */
204 case LIRC_SET_TRANSMITTER_MASK: 204 case LIRC_SET_TRANSMITTER_MASK:
205 if (!dev->s_tx_mask) 205 if (!dev->s_tx_mask)
206 return -EINVAL; 206 return -ENOSYS;
207 207
208 return dev->s_tx_mask(dev, val); 208 return dev->s_tx_mask(dev, val);
209 209
210 case LIRC_SET_SEND_CARRIER: 210 case LIRC_SET_SEND_CARRIER:
211 if (!dev->s_tx_carrier) 211 if (!dev->s_tx_carrier)
212 return -EINVAL; 212 return -ENOSYS;
213 213
214 return dev->s_tx_carrier(dev, val); 214 return dev->s_tx_carrier(dev, val);
215 215
diff --git a/drivers/media/rc/keymaps/rc-msi-digivox-ii.c b/drivers/media/rc/keymaps/rc-msi-digivox-ii.c
index c64e9e30045d..2fa71d0d72d7 100644
--- a/drivers/media/rc/keymaps/rc-msi-digivox-ii.c
+++ b/drivers/media/rc/keymaps/rc-msi-digivox-ii.c
@@ -22,24 +22,24 @@
22#include <linux/module.h> 22#include <linux/module.h>
23 23
24static struct rc_map_table msi_digivox_ii[] = { 24static struct rc_map_table msi_digivox_ii[] = {
25 { 0x0002, KEY_2 }, 25 { 0x0302, KEY_2 },
26 { 0x0003, KEY_UP }, /* up */ 26 { 0x0303, KEY_UP }, /* up */
27 { 0x0004, KEY_3 }, 27 { 0x0304, KEY_3 },
28 { 0x0005, KEY_CHANNELDOWN }, 28 { 0x0305, KEY_CHANNELDOWN },
29 { 0x0008, KEY_5 }, 29 { 0x0308, KEY_5 },
30 { 0x0009, KEY_0 }, 30 { 0x0309, KEY_0 },
31 { 0x000b, KEY_8 }, 31 { 0x030b, KEY_8 },
32 { 0x000d, KEY_DOWN }, /* down */ 32 { 0x030d, KEY_DOWN }, /* down */
33 { 0x0010, KEY_9 }, 33 { 0x0310, KEY_9 },
34 { 0x0011, KEY_7 }, 34 { 0x0311, KEY_7 },
35 { 0x0014, KEY_VOLUMEUP }, 35 { 0x0314, KEY_VOLUMEUP },
36 { 0x0015, KEY_CHANNELUP }, 36 { 0x0315, KEY_CHANNELUP },
37 { 0x0016, KEY_OK }, 37 { 0x0316, KEY_OK },
38 { 0x0017, KEY_POWER2 }, 38 { 0x0317, KEY_POWER2 },
39 { 0x001a, KEY_1 }, 39 { 0x031a, KEY_1 },
40 { 0x001c, KEY_4 }, 40 { 0x031c, KEY_4 },
41 { 0x001d, KEY_6 }, 41 { 0x031d, KEY_6 },
42 { 0x001f, KEY_VOLUMEDOWN }, 42 { 0x031f, KEY_VOLUMEDOWN },
43}; 43};
44 44
45static struct rc_map_list msi_digivox_ii_map = { 45static struct rc_map_list msi_digivox_ii_map = {
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 699eef39128b..2ea913a44ae8 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -517,6 +517,9 @@ static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
517 struct nvt_dev *nvt = dev->priv; 517 struct nvt_dev *nvt = dev->priv;
518 u16 val; 518 u16 val;
519 519
520 if (carrier == 0)
521 return -EINVAL;
522
520 nvt_cir_reg_write(nvt, 1, CIR_CP); 523 nvt_cir_reg_write(nvt, 1, CIR_CP);
521 val = 3000000 / (carrier) - 1; 524 val = 3000000 / (carrier) - 1;
522 nvt_cir_reg_write(nvt, val & 0xff, CIR_CC); 525 nvt_cir_reg_write(nvt, val & 0xff, CIR_CC);
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 49731b1a9c57..9f5a17bb5ef5 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -890,6 +890,9 @@ static int redrat3_set_tx_carrier(struct rc_dev *rcdev, u32 carrier)
890 struct device *dev = rr3->dev; 890 struct device *dev = rr3->dev;
891 891
892 rr3_dbg(dev, "Setting modulation frequency to %u", carrier); 892 rr3_dbg(dev, "Setting modulation frequency to %u", carrier);
893 if (carrier == 0)
894 return -EINVAL;
895
893 rr3->carrier = carrier; 896 rr3->carrier = carrier;
894 897
895 return carrier; 898 return carrier;
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 30ae1f24abc3..7c9b5f33113b 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -184,7 +184,7 @@ enum wbcir_txstate {
184}; 184};
185 185
186/* Misc */ 186/* Misc */
187#define WBCIR_NAME "Winbond CIR" 187#define WBCIR_NAME "winbond-cir"
188#define WBCIR_ID_FAMILY 0xF1 /* Family ID for the WPCD376I */ 188#define WBCIR_ID_FAMILY 0xF1 /* Family ID for the WPCD376I */
189#define WBCIR_ID_CHIP 0x04 /* Chip ID for the WPCD376I */ 189#define WBCIR_ID_CHIP 0x04 /* Chip ID for the WPCD376I */
190#define INVALID_SCANCODE 0x7FFFFFFF /* Invalid with all protos */ 190#define INVALID_SCANCODE 0x7FFFFFFF /* Invalid with all protos */
diff --git a/drivers/media/tuners/mt2063.c b/drivers/media/tuners/mt2063.c
index 0ed9091ff48e..2e1a02e360ff 100644
--- a/drivers/media/tuners/mt2063.c
+++ b/drivers/media/tuners/mt2063.c
@@ -245,7 +245,7 @@ struct mt2063_state {
245/* 245/*
246 * mt2063_write - Write data into the I2C bus 246 * mt2063_write - Write data into the I2C bus
247 */ 247 */
248static u32 mt2063_write(struct mt2063_state *state, u8 reg, u8 *data, u32 len) 248static int mt2063_write(struct mt2063_state *state, u8 reg, u8 *data, u32 len)
249{ 249{
250 struct dvb_frontend *fe = state->frontend; 250 struct dvb_frontend *fe = state->frontend;
251 int ret; 251 int ret;
@@ -277,9 +277,9 @@ static u32 mt2063_write(struct mt2063_state *state, u8 reg, u8 *data, u32 len)
277/* 277/*
278 * mt2063_write - Write register data into the I2C bus, caching the value 278 * mt2063_write - Write register data into the I2C bus, caching the value
279 */ 279 */
280static u32 mt2063_setreg(struct mt2063_state *state, u8 reg, u8 val) 280static int mt2063_setreg(struct mt2063_state *state, u8 reg, u8 val)
281{ 281{
282 u32 status; 282 int status;
283 283
284 dprintk(2, "\n"); 284 dprintk(2, "\n");
285 285
@@ -298,10 +298,10 @@ static u32 mt2063_setreg(struct mt2063_state *state, u8 reg, u8 val)
298/* 298/*
299 * mt2063_read - Read data from the I2C bus 299 * mt2063_read - Read data from the I2C bus
300 */ 300 */
301static u32 mt2063_read(struct mt2063_state *state, 301static int mt2063_read(struct mt2063_state *state,
302 u8 subAddress, u8 *pData, u32 cnt) 302 u8 subAddress, u8 *pData, u32 cnt)
303{ 303{
304 u32 status = 0; /* Status to be returned */ 304 int status = 0; /* Status to be returned */
305 struct dvb_frontend *fe = state->frontend; 305 struct dvb_frontend *fe = state->frontend;
306 u32 i = 0; 306 u32 i = 0;
307 307
@@ -816,7 +816,7 @@ static u32 IsSpurInBand(struct MT2063_AvoidSpursData_t *pAS_Info,
816 */ 816 */
817static u32 MT2063_AvoidSpurs(struct MT2063_AvoidSpursData_t *pAS_Info) 817static u32 MT2063_AvoidSpurs(struct MT2063_AvoidSpursData_t *pAS_Info)
818{ 818{
819 u32 status = 0; 819 int status = 0;
820 u32 fm, fp; /* restricted range on LO's */ 820 u32 fm, fp; /* restricted range on LO's */
821 pAS_Info->bSpurAvoided = 0; 821 pAS_Info->bSpurAvoided = 0;
822 pAS_Info->nSpursFound = 0; 822 pAS_Info->nSpursFound = 0;
@@ -935,14 +935,14 @@ static u32 MT2063_AvoidSpurs(struct MT2063_AvoidSpursData_t *pAS_Info)
935 * 935 *
936 * This function returns 0, if no lock, 1 if locked and a value < 1 if error 936 * This function returns 0, if no lock, 1 if locked and a value < 1 if error
937 */ 937 */
938static unsigned int mt2063_lockStatus(struct mt2063_state *state) 938static int mt2063_lockStatus(struct mt2063_state *state)
939{ 939{
940 const u32 nMaxWait = 100; /* wait a maximum of 100 msec */ 940 const u32 nMaxWait = 100; /* wait a maximum of 100 msec */
941 const u32 nPollRate = 2; /* poll status bits every 2 ms */ 941 const u32 nPollRate = 2; /* poll status bits every 2 ms */
942 const u32 nMaxLoops = nMaxWait / nPollRate; 942 const u32 nMaxLoops = nMaxWait / nPollRate;
943 const u8 LO1LK = 0x80; 943 const u8 LO1LK = 0x80;
944 u8 LO2LK = 0x08; 944 u8 LO2LK = 0x08;
945 u32 status; 945 int status;
946 u32 nDelays = 0; 946 u32 nDelays = 0;
947 947
948 dprintk(2, "\n"); 948 dprintk(2, "\n");
@@ -1069,7 +1069,7 @@ static u32 mt2063_get_dnc_output_enable(struct mt2063_state *state,
1069static u32 mt2063_set_dnc_output_enable(struct mt2063_state *state, 1069static u32 mt2063_set_dnc_output_enable(struct mt2063_state *state,
1070 enum MT2063_DNC_Output_Enable nValue) 1070 enum MT2063_DNC_Output_Enable nValue)
1071{ 1071{
1072 u32 status = 0; /* Status to be returned */ 1072 int status = 0; /* Status to be returned */
1073 u8 val = 0; 1073 u8 val = 0;
1074 1074
1075 dprintk(2, "\n"); 1075 dprintk(2, "\n");
@@ -1203,7 +1203,7 @@ static u32 mt2063_set_dnc_output_enable(struct mt2063_state *state,
1203static u32 MT2063_SetReceiverMode(struct mt2063_state *state, 1203static u32 MT2063_SetReceiverMode(struct mt2063_state *state,
1204 enum mt2063_delivery_sys Mode) 1204 enum mt2063_delivery_sys Mode)
1205{ 1205{
1206 u32 status = 0; /* Status to be returned */ 1206 int status = 0; /* Status to be returned */
1207 u8 val; 1207 u8 val;
1208 u32 longval; 1208 u32 longval;
1209 1209
@@ -1345,7 +1345,7 @@ static u32 MT2063_SetReceiverMode(struct mt2063_state *state,
1345static u32 MT2063_ClearPowerMaskBits(struct mt2063_state *state, 1345static u32 MT2063_ClearPowerMaskBits(struct mt2063_state *state,
1346 enum MT2063_Mask_Bits Bits) 1346 enum MT2063_Mask_Bits Bits)
1347{ 1347{
1348 u32 status = 0; 1348 int status = 0;
1349 1349
1350 dprintk(2, "\n"); 1350 dprintk(2, "\n");
1351 Bits = (enum MT2063_Mask_Bits)(Bits & MT2063_ALL_SD); /* Only valid bits for this tuner */ 1351 Bits = (enum MT2063_Mask_Bits)(Bits & MT2063_ALL_SD); /* Only valid bits for this tuner */
@@ -1374,7 +1374,7 @@ static u32 MT2063_ClearPowerMaskBits(struct mt2063_state *state,
1374 */ 1374 */
1375static u32 MT2063_SoftwareShutdown(struct mt2063_state *state, u8 Shutdown) 1375static u32 MT2063_SoftwareShutdown(struct mt2063_state *state, u8 Shutdown)
1376{ 1376{
1377 u32 status; 1377 int status;
1378 1378
1379 dprintk(2, "\n"); 1379 dprintk(2, "\n");
1380 if (Shutdown == 1) 1380 if (Shutdown == 1)
@@ -1540,7 +1540,7 @@ static u32 FindClearTuneFilter(struct mt2063_state *state, u32 f_in)
1540static u32 MT2063_Tune(struct mt2063_state *state, u32 f_in) 1540static u32 MT2063_Tune(struct mt2063_state *state, u32 f_in)
1541{ /* RF input center frequency */ 1541{ /* RF input center frequency */
1542 1542
1543 u32 status = 0; 1543 int status = 0;
1544 u32 LO1; /* 1st LO register value */ 1544 u32 LO1; /* 1st LO register value */
1545 u32 Num1; /* Numerator for LO1 reg. value */ 1545 u32 Num1; /* Numerator for LO1 reg. value */
1546 u32 f_IF1; /* 1st IF requested */ 1546 u32 f_IF1; /* 1st IF requested */
@@ -1803,7 +1803,7 @@ static const u8 MT2063B3_defaults[] = {
1803 1803
1804static int mt2063_init(struct dvb_frontend *fe) 1804static int mt2063_init(struct dvb_frontend *fe)
1805{ 1805{
1806 u32 status; 1806 int status;
1807 struct mt2063_state *state = fe->tuner_priv; 1807 struct mt2063_state *state = fe->tuner_priv;
1808 u8 all_resets = 0xF0; /* reset/load bits */ 1808 u8 all_resets = 0xF0; /* reset/load bits */
1809 const u8 *def = NULL; 1809 const u8 *def = NULL;
@@ -2249,8 +2249,8 @@ struct dvb_frontend *mt2063_attach(struct dvb_frontend *fe,
2249 dprintk(2, "\n"); 2249 dprintk(2, "\n");
2250 2250
2251 state = kzalloc(sizeof(struct mt2063_state), GFP_KERNEL); 2251 state = kzalloc(sizeof(struct mt2063_state), GFP_KERNEL);
2252 if (state == NULL) 2252 if (!state)
2253 goto error; 2253 return NULL;
2254 2254
2255 state->config = config; 2255 state->config = config;
2256 state->i2c = i2c; 2256 state->i2c = i2c;
@@ -2261,18 +2261,15 @@ struct dvb_frontend *mt2063_attach(struct dvb_frontend *fe,
2261 2261
2262 printk(KERN_INFO "%s: Attaching MT2063\n", __func__); 2262 printk(KERN_INFO "%s: Attaching MT2063\n", __func__);
2263 return fe; 2263 return fe;
2264
2265error:
2266 kfree(state);
2267 return NULL;
2268} 2264}
2269EXPORT_SYMBOL_GPL(mt2063_attach); 2265EXPORT_SYMBOL_GPL(mt2063_attach);
2270 2266
2267#if 0
2271/* 2268/*
2272 * Ancillary routines visible outside mt2063 2269 * Ancillary routines visible outside mt2063
2273 * FIXME: Remove them in favor of using standard tuner callbacks 2270 * FIXME: Remove them in favor of using standard tuner callbacks
2274 */ 2271 */
2275unsigned int tuner_MT2063_SoftwareShutdown(struct dvb_frontend *fe) 2272static int tuner_MT2063_SoftwareShutdown(struct dvb_frontend *fe)
2276{ 2273{
2277 struct mt2063_state *state = fe->tuner_priv; 2274 struct mt2063_state *state = fe->tuner_priv;
2278 int err = 0; 2275 int err = 0;
@@ -2285,9 +2282,8 @@ unsigned int tuner_MT2063_SoftwareShutdown(struct dvb_frontend *fe)
2285 2282
2286 return err; 2283 return err;
2287} 2284}
2288EXPORT_SYMBOL_GPL(tuner_MT2063_SoftwareShutdown);
2289 2285
2290unsigned int tuner_MT2063_ClearPowerMaskBits(struct dvb_frontend *fe) 2286static int tuner_MT2063_ClearPowerMaskBits(struct dvb_frontend *fe)
2291{ 2287{
2292 struct mt2063_state *state = fe->tuner_priv; 2288 struct mt2063_state *state = fe->tuner_priv;
2293 int err = 0; 2289 int err = 0;
@@ -2300,7 +2296,7 @@ unsigned int tuner_MT2063_ClearPowerMaskBits(struct dvb_frontend *fe)
2300 2296
2301 return err; 2297 return err;
2302} 2298}
2303EXPORT_SYMBOL_GPL(tuner_MT2063_ClearPowerMaskBits); 2299#endif
2304 2300
2305MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); 2301MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
2306MODULE_DESCRIPTION("MT2063 Silicon tuner"); 2302MODULE_DESCRIPTION("MT2063 Silicon tuner");
diff --git a/drivers/media/tuners/mt2063.h b/drivers/media/tuners/mt2063.h
index 3f5cfd93713f..ab24170c1571 100644
--- a/drivers/media/tuners/mt2063.h
+++ b/drivers/media/tuners/mt2063.h
@@ -23,10 +23,6 @@ static inline struct dvb_frontend *mt2063_attach(struct dvb_frontend *fe,
23 return NULL; 23 return NULL;
24} 24}
25 25
26/* FIXME: Should use the standard DVB attachment interfaces */
27unsigned int tuner_MT2063_SoftwareShutdown(struct dvb_frontend *fe);
28unsigned int tuner_MT2063_ClearPowerMaskBits(struct dvb_frontend *fe);
29
30#endif /* CONFIG_DVB_MT2063 */ 26#endif /* CONFIG_DVB_MT2063 */
31 27
32#endif /* __MT2063_H__ */ 28#endif /* __MT2063_H__ */
diff --git a/drivers/media/tuners/tda18271-common.c b/drivers/media/tuners/tda18271-common.c
index 221171eeb0c3..18c77afe2e4f 100644
--- a/drivers/media/tuners/tda18271-common.c
+++ b/drivers/media/tuners/tda18271-common.c
@@ -187,7 +187,8 @@ int tda18271_read_extended(struct dvb_frontend *fe)
187 return (ret == 2 ? 0 : ret); 187 return (ret == 2 ? 0 : ret);
188} 188}
189 189
190int tda18271_write_regs(struct dvb_frontend *fe, int idx, int len) 190static int __tda18271_write_regs(struct dvb_frontend *fe, int idx, int len,
191 bool lock_i2c)
191{ 192{
192 struct tda18271_priv *priv = fe->tuner_priv; 193 struct tda18271_priv *priv = fe->tuner_priv;
193 unsigned char *regs = priv->tda18271_regs; 194 unsigned char *regs = priv->tda18271_regs;
@@ -198,7 +199,6 @@ int tda18271_write_regs(struct dvb_frontend *fe, int idx, int len)
198 199
199 BUG_ON((len == 0) || (idx + len > sizeof(buf))); 200 BUG_ON((len == 0) || (idx + len > sizeof(buf)));
200 201
201
202 switch (priv->small_i2c) { 202 switch (priv->small_i2c) {
203 case TDA18271_03_BYTE_CHUNK_INIT: 203 case TDA18271_03_BYTE_CHUNK_INIT:
204 max = 3; 204 max = 3;
@@ -214,7 +214,19 @@ int tda18271_write_regs(struct dvb_frontend *fe, int idx, int len)
214 max = 39; 214 max = 39;
215 } 215 }
216 216
217 tda18271_i2c_gate_ctrl(fe, 1); 217
218 /*
219 * If lock_i2c is true, it will take the I2C bus for tda18271 private
220 * usage during the entire write ops, as otherwise, bad things could
221 * happen.
222 * During device init, several write operations will happen. So,
223 * tda18271_init_regs controls the I2C lock directly,
224 * disabling lock_i2c here.
225 */
226 if (lock_i2c) {
227 tda18271_i2c_gate_ctrl(fe, 1);
228 i2c_lock_adapter(priv->i2c_props.adap);
229 }
218 while (len) { 230 while (len) {
219 if (max > len) 231 if (max > len)
220 max = len; 232 max = len;
@@ -226,14 +238,17 @@ int tda18271_write_regs(struct dvb_frontend *fe, int idx, int len)
226 msg.len = max + 1; 238 msg.len = max + 1;
227 239
228 /* write registers */ 240 /* write registers */
229 ret = i2c_transfer(priv->i2c_props.adap, &msg, 1); 241 ret = __i2c_transfer(priv->i2c_props.adap, &msg, 1);
230 if (ret != 1) 242 if (ret != 1)
231 break; 243 break;
232 244
233 idx += max; 245 idx += max;
234 len -= max; 246 len -= max;
235 } 247 }
236 tda18271_i2c_gate_ctrl(fe, 0); 248 if (lock_i2c) {
249 i2c_unlock_adapter(priv->i2c_props.adap);
250 tda18271_i2c_gate_ctrl(fe, 0);
251 }
237 252
238 if (ret != 1) 253 if (ret != 1)
239 tda_err("ERROR: idx = 0x%x, len = %d, " 254 tda_err("ERROR: idx = 0x%x, len = %d, "
@@ -242,10 +257,16 @@ int tda18271_write_regs(struct dvb_frontend *fe, int idx, int len)
242 return (ret == 1 ? 0 : ret); 257 return (ret == 1 ? 0 : ret);
243} 258}
244 259
260int tda18271_write_regs(struct dvb_frontend *fe, int idx, int len)
261{
262 return __tda18271_write_regs(fe, idx, len, true);
263}
264
245/*---------------------------------------------------------------------*/ 265/*---------------------------------------------------------------------*/
246 266
247int tda18271_charge_pump_source(struct dvb_frontend *fe, 267static int __tda18271_charge_pump_source(struct dvb_frontend *fe,
248 enum tda18271_pll pll, int force) 268 enum tda18271_pll pll, int force,
269 bool lock_i2c)
249{ 270{
250 struct tda18271_priv *priv = fe->tuner_priv; 271 struct tda18271_priv *priv = fe->tuner_priv;
251 unsigned char *regs = priv->tda18271_regs; 272 unsigned char *regs = priv->tda18271_regs;
@@ -255,9 +276,16 @@ int tda18271_charge_pump_source(struct dvb_frontend *fe,
255 regs[r_cp] &= ~0x20; 276 regs[r_cp] &= ~0x20;
256 regs[r_cp] |= ((force & 1) << 5); 277 regs[r_cp] |= ((force & 1) << 5);
257 278
258 return tda18271_write_regs(fe, r_cp, 1); 279 return __tda18271_write_regs(fe, r_cp, 1, lock_i2c);
280}
281
282int tda18271_charge_pump_source(struct dvb_frontend *fe,
283 enum tda18271_pll pll, int force)
284{
285 return __tda18271_charge_pump_source(fe, pll, force, true);
259} 286}
260 287
288
261int tda18271_init_regs(struct dvb_frontend *fe) 289int tda18271_init_regs(struct dvb_frontend *fe)
262{ 290{
263 struct tda18271_priv *priv = fe->tuner_priv; 291 struct tda18271_priv *priv = fe->tuner_priv;
@@ -267,6 +295,13 @@ int tda18271_init_regs(struct dvb_frontend *fe)
267 i2c_adapter_id(priv->i2c_props.adap), 295 i2c_adapter_id(priv->i2c_props.adap),
268 priv->i2c_props.addr); 296 priv->i2c_props.addr);
269 297
298 /*
299 * Don't let any other I2C transfer to happen at adapter during init,
300 * as those could cause bad things
301 */
302 tda18271_i2c_gate_ctrl(fe, 1);
303 i2c_lock_adapter(priv->i2c_props.adap);
304
270 /* initialize registers */ 305 /* initialize registers */
271 switch (priv->id) { 306 switch (priv->id) {
272 case TDA18271HDC1: 307 case TDA18271HDC1:
@@ -352,28 +387,28 @@ int tda18271_init_regs(struct dvb_frontend *fe)
352 regs[R_EB22] = 0x48; 387 regs[R_EB22] = 0x48;
353 regs[R_EB23] = 0xb0; 388 regs[R_EB23] = 0xb0;
354 389
355 tda18271_write_regs(fe, 0x00, TDA18271_NUM_REGS); 390 __tda18271_write_regs(fe, 0x00, TDA18271_NUM_REGS, false);
356 391
357 /* setup agc1 gain */ 392 /* setup agc1 gain */
358 regs[R_EB17] = 0x00; 393 regs[R_EB17] = 0x00;
359 tda18271_write_regs(fe, R_EB17, 1); 394 __tda18271_write_regs(fe, R_EB17, 1, false);
360 regs[R_EB17] = 0x03; 395 regs[R_EB17] = 0x03;
361 tda18271_write_regs(fe, R_EB17, 1); 396 __tda18271_write_regs(fe, R_EB17, 1, false);
362 regs[R_EB17] = 0x43; 397 regs[R_EB17] = 0x43;
363 tda18271_write_regs(fe, R_EB17, 1); 398 __tda18271_write_regs(fe, R_EB17, 1, false);
364 regs[R_EB17] = 0x4c; 399 regs[R_EB17] = 0x4c;
365 tda18271_write_regs(fe, R_EB17, 1); 400 __tda18271_write_regs(fe, R_EB17, 1, false);
366 401
367 /* setup agc2 gain */ 402 /* setup agc2 gain */
368 if ((priv->id) == TDA18271HDC1) { 403 if ((priv->id) == TDA18271HDC1) {
369 regs[R_EB20] = 0xa0; 404 regs[R_EB20] = 0xa0;
370 tda18271_write_regs(fe, R_EB20, 1); 405 __tda18271_write_regs(fe, R_EB20, 1, false);
371 regs[R_EB20] = 0xa7; 406 regs[R_EB20] = 0xa7;
372 tda18271_write_regs(fe, R_EB20, 1); 407 __tda18271_write_regs(fe, R_EB20, 1, false);
373 regs[R_EB20] = 0xe7; 408 regs[R_EB20] = 0xe7;
374 tda18271_write_regs(fe, R_EB20, 1); 409 __tda18271_write_regs(fe, R_EB20, 1, false);
375 regs[R_EB20] = 0xec; 410 regs[R_EB20] = 0xec;
376 tda18271_write_regs(fe, R_EB20, 1); 411 __tda18271_write_regs(fe, R_EB20, 1, false);
377 } 412 }
378 413
379 /* image rejection calibration */ 414 /* image rejection calibration */
@@ -391,21 +426,21 @@ int tda18271_init_regs(struct dvb_frontend *fe)
391 regs[R_MD2] = 0x08; 426 regs[R_MD2] = 0x08;
392 regs[R_MD3] = 0x00; 427 regs[R_MD3] = 0x00;
393 428
394 tda18271_write_regs(fe, R_EP3, 11); 429 __tda18271_write_regs(fe, R_EP3, 11, false);
395 430
396 if ((priv->id) == TDA18271HDC2) { 431 if ((priv->id) == TDA18271HDC2) {
397 /* main pll cp source on */ 432 /* main pll cp source on */
398 tda18271_charge_pump_source(fe, TDA18271_MAIN_PLL, 1); 433 __tda18271_charge_pump_source(fe, TDA18271_MAIN_PLL, 1, false);
399 msleep(1); 434 msleep(1);
400 435
401 /* main pll cp source off */ 436 /* main pll cp source off */
402 tda18271_charge_pump_source(fe, TDA18271_MAIN_PLL, 0); 437 __tda18271_charge_pump_source(fe, TDA18271_MAIN_PLL, 0, false);
403 } 438 }
404 439
405 msleep(5); /* pll locking */ 440 msleep(5); /* pll locking */
406 441
407 /* launch detector */ 442 /* launch detector */
408 tda18271_write_regs(fe, R_EP1, 1); 443 __tda18271_write_regs(fe, R_EP1, 1, false);
409 msleep(5); /* wanted low measurement */ 444 msleep(5); /* wanted low measurement */
410 445
411 regs[R_EP5] = 0x85; 446 regs[R_EP5] = 0x85;
@@ -413,11 +448,11 @@ int tda18271_init_regs(struct dvb_frontend *fe)
413 regs[R_CD1] = 0x66; 448 regs[R_CD1] = 0x66;
414 regs[R_CD2] = 0x70; 449 regs[R_CD2] = 0x70;
415 450
416 tda18271_write_regs(fe, R_EP3, 7); 451 __tda18271_write_regs(fe, R_EP3, 7, false);
417 msleep(5); /* pll locking */ 452 msleep(5); /* pll locking */
418 453
419 /* launch optimization algorithm */ 454 /* launch optimization algorithm */
420 tda18271_write_regs(fe, R_EP2, 1); 455 __tda18271_write_regs(fe, R_EP2, 1, false);
421 msleep(30); /* image low optimization completion */ 456 msleep(30); /* image low optimization completion */
422 457
423 /* mid-band */ 458 /* mid-band */
@@ -428,11 +463,11 @@ int tda18271_init_regs(struct dvb_frontend *fe)
428 regs[R_MD1] = 0x73; 463 regs[R_MD1] = 0x73;
429 regs[R_MD2] = 0x1a; 464 regs[R_MD2] = 0x1a;
430 465
431 tda18271_write_regs(fe, R_EP3, 11); 466 __tda18271_write_regs(fe, R_EP3, 11, false);
432 msleep(5); /* pll locking */ 467 msleep(5); /* pll locking */
433 468
434 /* launch detector */ 469 /* launch detector */
435 tda18271_write_regs(fe, R_EP1, 1); 470 __tda18271_write_regs(fe, R_EP1, 1, false);
436 msleep(5); /* wanted mid measurement */ 471 msleep(5); /* wanted mid measurement */
437 472
438 regs[R_EP5] = 0x86; 473 regs[R_EP5] = 0x86;
@@ -440,11 +475,11 @@ int tda18271_init_regs(struct dvb_frontend *fe)
440 regs[R_CD1] = 0x66; 475 regs[R_CD1] = 0x66;
441 regs[R_CD2] = 0xa0; 476 regs[R_CD2] = 0xa0;
442 477
443 tda18271_write_regs(fe, R_EP3, 7); 478 __tda18271_write_regs(fe, R_EP3, 7, false);
444 msleep(5); /* pll locking */ 479 msleep(5); /* pll locking */
445 480
446 /* launch optimization algorithm */ 481 /* launch optimization algorithm */
447 tda18271_write_regs(fe, R_EP2, 1); 482 __tda18271_write_regs(fe, R_EP2, 1, false);
448 msleep(30); /* image mid optimization completion */ 483 msleep(30); /* image mid optimization completion */
449 484
450 /* high-band */ 485 /* high-band */
@@ -456,30 +491,33 @@ int tda18271_init_regs(struct dvb_frontend *fe)
456 regs[R_MD1] = 0x71; 491 regs[R_MD1] = 0x71;
457 regs[R_MD2] = 0xcd; 492 regs[R_MD2] = 0xcd;
458 493
459 tda18271_write_regs(fe, R_EP3, 11); 494 __tda18271_write_regs(fe, R_EP3, 11, false);
460 msleep(5); /* pll locking */ 495 msleep(5); /* pll locking */
461 496
462 /* launch detector */ 497 /* launch detector */
463 tda18271_write_regs(fe, R_EP1, 1); 498 __tda18271_write_regs(fe, R_EP1, 1, false);
464 msleep(5); /* wanted high measurement */ 499 msleep(5); /* wanted high measurement */
465 500
466 regs[R_EP5] = 0x87; 501 regs[R_EP5] = 0x87;
467 regs[R_CD1] = 0x65; 502 regs[R_CD1] = 0x65;
468 regs[R_CD2] = 0x50; 503 regs[R_CD2] = 0x50;
469 504
470 tda18271_write_regs(fe, R_EP3, 7); 505 __tda18271_write_regs(fe, R_EP3, 7, false);
471 msleep(5); /* pll locking */ 506 msleep(5); /* pll locking */
472 507
473 /* launch optimization algorithm */ 508 /* launch optimization algorithm */
474 tda18271_write_regs(fe, R_EP2, 1); 509 __tda18271_write_regs(fe, R_EP2, 1, false);
475 msleep(30); /* image high optimization completion */ 510 msleep(30); /* image high optimization completion */
476 511
477 /* return to normal mode */ 512 /* return to normal mode */
478 regs[R_EP4] = 0x64; 513 regs[R_EP4] = 0x64;
479 tda18271_write_regs(fe, R_EP4, 1); 514 __tda18271_write_regs(fe, R_EP4, 1, false);
480 515
481 /* synchronize */ 516 /* synchronize */
482 tda18271_write_regs(fe, R_EP1, 1); 517 __tda18271_write_regs(fe, R_EP1, 1, false);
518
519 i2c_unlock_adapter(priv->i2c_props.adap);
520 tda18271_i2c_gate_ctrl(fe, 0);
483 521
484 return 0; 522 return 0;
485} 523}
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 824f1911ee21..3d7526e28d42 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -500,7 +500,7 @@ static int af9015_read_config(struct dvb_usb_device *d)
500 case 3: 500 case 3:
501 state->af9013_config[i].clock = 25000000; 501 state->af9013_config[i].clock = 25000000;
502 break; 502 break;
503 }; 503 }
504 dev_dbg(&d->udev->dev, "%s: [%d] xtal=%d set clock=%d\n", 504 dev_dbg(&d->udev->dev, "%s: [%d] xtal=%d set clock=%d\n",
505 __func__, i, val, 505 __func__, i, val,
506 state->af9013_config[i].clock); 506 state->af9013_config[i].clock);
@@ -568,7 +568,7 @@ static int af9015_read_config(struct dvb_usb_device *d)
568 "supported, please report!\n", 568 "supported, please report!\n",
569 KBUILD_MODNAME, val); 569 KBUILD_MODNAME, val);
570 return -ENODEV; 570 return -ENODEV;
571 }; 571 }
572 572
573 state->af9013_config[i].tuner = val; 573 state->af9013_config[i].tuner = val;
574 dev_dbg(&d->udev->dev, "%s: [%d] tuner id=%d\n", 574 dev_dbg(&d->udev->dev, "%s: [%d] tuner id=%d\n",
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index aabd3fc03ea7..ea27eaff4e34 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -520,7 +520,7 @@ static int af9035_read_config(struct dvb_usb_device *d)
520 dev_warn(&d->udev->dev, "%s: tuner id=%02x not " \ 520 dev_warn(&d->udev->dev, "%s: tuner id=%02x not " \
521 "supported, please report!", 521 "supported, please report!",
522 KBUILD_MODNAME, tmp); 522 KBUILD_MODNAME, tmp);
523 }; 523 }
524 524
525 /* tuner IF frequency */ 525 /* tuner IF frequency */
526 ret = af9035_rd_reg(d, EEPROM_1_IFFREQ_L + eeprom_shift, &tmp); 526 ret = af9035_rd_reg(d, EEPROM_1_IFFREQ_L + eeprom_shift, &tmp);
diff --git a/drivers/media/usb/dvb-usb/a800.c b/drivers/media/usb/dvb-usb/a800.c
index 8d7fef84afd8..83684ed023cd 100644
--- a/drivers/media/usb/dvb-usb/a800.c
+++ b/drivers/media/usb/dvb-usb/a800.c
@@ -93,7 +93,7 @@ static int a800_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
93 /* call the universal NEC remote processor, to find out the key's state and event */ 93 /* call the universal NEC remote processor, to find out the key's state and event */
94 dvb_usb_nec_rc_key_to_event(d,key,event,state); 94 dvb_usb_nec_rc_key_to_event(d,key,event,state);
95 if (key[0] != 0) 95 if (key[0] != 0)
96 deb_rc("key: %x %x %x %x %x\n",key[0],key[1],key[2],key[3],key[4]); 96 deb_rc("key: %*ph\n", 5, key);
97 ret = 0; 97 ret = 0;
98out: 98out:
99 kfree(key); 99 kfree(key);
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
index 0a98548ecd17..9fd1527494eb 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
@@ -172,8 +172,7 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
172 if (*event != d->last_event) 172 if (*event != d->last_event)
173 st->rc_counter = 0; 173 st->rc_counter = 0;
174 174
175 deb_rc("key: %x %x %x %x %x\n", 175 deb_rc("key: %*ph\n", 5, key);
176 key[0], key[1], key[2], key[3], key[4]);
177 } 176 }
178 return 0; 177 return 0;
179} 178}
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index a76bbb29ca36..af0d4321845b 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -473,7 +473,7 @@ int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
473 dvb_usb_generic_rw(d,&cmd,1,key,5,0); 473 dvb_usb_generic_rw(d,&cmd,1,key,5,0);
474 dvb_usb_nec_rc_key_to_event(d,key,event,state); 474 dvb_usb_nec_rc_key_to_event(d,key,event,state);
475 if (key[0] != 0) 475 if (key[0] != 0)
476 deb_info("key: %x %x %x %x %x\n",key[0],key[1],key[2],key[3],key[4]); 476 deb_info("key: %*ph\n", 5, key);
477 return 0; 477 return 0;
478} 478}
479EXPORT_SYMBOL(dibusb_rc_query); 479EXPORT_SYMBOL(dibusb_rc_query);
diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
index ff34419a4c88..772bde3c5020 100644
--- a/drivers/media/usb/dvb-usb/digitv.c
+++ b/drivers/media/usb/dvb-usb/digitv.c
@@ -253,7 +253,7 @@ static int digitv_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
253 } 253 }
254 254
255 if (key[0] != 0) 255 if (key[0] != 0)
256 deb_rc("key: %x %x %x %x %x\n",key[0],key[1],key[2],key[3],key[4]); 256 deb_rc("key: %*ph\n", 5, key);
257 return 0; 257 return 0;
258} 258}
259 259
diff --git a/drivers/media/usb/dvb-usb/dtt200u.c b/drivers/media/usb/dvb-usb/dtt200u.c
index 66f205c112b2..c357fb3b0a88 100644
--- a/drivers/media/usb/dvb-usb/dtt200u.c
+++ b/drivers/media/usb/dvb-usb/dtt200u.c
@@ -84,7 +84,7 @@ static int dtt200u_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
84 dvb_usb_generic_rw(d,&cmd,1,key,5,0); 84 dvb_usb_generic_rw(d,&cmd,1,key,5,0);
85 dvb_usb_nec_rc_key_to_event(d,key,event,state); 85 dvb_usb_nec_rc_key_to_event(d,key,event,state);
86 if (key[0] != 0) 86 if (key[0] != 0)
87 deb_info("key: %x %x %x %x %x\n",key[0],key[1],key[2],key[3],key[4]); 87 deb_info("key: %*ph\n", 5, key);
88 return 0; 88 return 0;
89} 89}
90 90
diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c
index 288af29a8bb7..661bb75be955 100644
--- a/drivers/media/usb/dvb-usb/m920x.c
+++ b/drivers/media/usb/dvb-usb/m920x.c
@@ -358,7 +358,7 @@ static int m920x_firmware_download(struct usb_device *udev, const struct firmwar
358 358
359 if ((ret = m920x_read(udev, M9206_FILTER, 0x0, 0x8000, read, 4)) != 0) 359 if ((ret = m920x_read(udev, M9206_FILTER, 0x0, 0x8000, read, 4)) != 0)
360 goto done; 360 goto done;
361 deb("%x %x %x %x\n", read[0], read[1], read[2], read[3]); 361 deb("%*ph\n", 4, read);
362 362
363 if ((ret = m920x_read(udev, M9206_FW, 0x0, 0x0, read, 1)) != 0) 363 if ((ret = m920x_read(udev, M9206_FW, 0x0, 0x0, read, 1)) != 0)
364 goto done; 364 goto done;
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index acefaa89cc53..7a8c8c18590f 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -677,6 +677,7 @@ static struct usb_device_id technisat_usb2_id_table[] = {
677 { USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_DVB_S2) }, 677 { USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_DVB_S2) },
678 { 0 } /* Terminating entry */ 678 { 0 } /* Terminating entry */
679}; 679};
680MODULE_DEVICE_TABLE(usb, technisat_usb2_id_table);
680 681
681/* device description */ 682/* device description */
682static struct dvb_usb_device_properties technisat_usb2_devices = { 683static struct dvb_usb_device_properties technisat_usb2_devices = {
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index f7297ae76b48..16a84f9f46d8 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -2203,7 +2203,7 @@ EXPORT_SYMBOL_GPL(em28xx_tuner_callback);
2203 2203
2204static inline void em28xx_set_model(struct em28xx *dev) 2204static inline void em28xx_set_model(struct em28xx *dev)
2205{ 2205{
2206 memcpy(&dev->board, &em28xx_boards[dev->model], sizeof(dev->board)); 2206 dev->board = em28xx_boards[dev->model];
2207 2207
2208 /* Those are the default values for the majority of boards 2208 /* Those are the default values for the majority of boards
2209 Use those values if not specified otherwise at boards entry 2209 Use those values if not specified otherwise at boards entry
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 913e5227897a..13ae821949e9 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -574,18 +574,19 @@ static void pctv_520e_init(struct em28xx *dev)
574 i2c_master_send(&dev->i2c_client, regs[i].r, regs[i].len); 574 i2c_master_send(&dev->i2c_client, regs[i].r, regs[i].len);
575}; 575};
576 576
577static int em28xx_pctv_290e_set_lna(struct dvb_frontend *fe, int val) 577static int em28xx_pctv_290e_set_lna(struct dvb_frontend *fe)
578{ 578{
579 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
579 struct em28xx *dev = fe->dvb->priv; 580 struct em28xx *dev = fe->dvb->priv;
580#ifdef CONFIG_GPIOLIB 581#ifdef CONFIG_GPIOLIB
581 struct em28xx_dvb *dvb = dev->dvb; 582 struct em28xx_dvb *dvb = dev->dvb;
582 int ret; 583 int ret;
583 unsigned long flags; 584 unsigned long flags;
584 585
585 if (val) 586 if (c->lna == 1)
586 flags = GPIOF_OUT_INIT_LOW; 587 flags = GPIOF_OUT_INIT_HIGH; /* enable LNA */
587 else 588 else
588 flags = GPIOF_OUT_INIT_HIGH; 589 flags = GPIOF_OUT_INIT_LOW; /* disable LNA */
589 590
590 ret = gpio_request_one(dvb->lna_gpio, flags, NULL); 591 ret = gpio_request_one(dvb->lna_gpio, flags, NULL);
591 if (ret) 592 if (ret)
@@ -595,8 +596,8 @@ static int em28xx_pctv_290e_set_lna(struct dvb_frontend *fe, int val)
595 596
596 return ret; 597 return ret;
597#else 598#else
598 dev_warn(&dev->udev->dev, "%s: LNA control is disabled\n", 599 dev_warn(&dev->udev->dev, "%s: LNA control is disabled (lna=%u)\n",
599 KBUILD_MODNAME); 600 KBUILD_MODNAME, c->lna);
600 return 0; 601 return 0;
601#endif 602#endif
602} 603}
diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c
index b62740846061..34a26e0cfe77 100644
--- a/drivers/media/usb/stk1160/stk1160-core.c
+++ b/drivers/media/usb/stk1160/stk1160-core.c
@@ -100,12 +100,21 @@ int stk1160_write_reg(struct stk1160 *dev, u16 reg, u16 value)
100 100
101void stk1160_select_input(struct stk1160 *dev) 101void stk1160_select_input(struct stk1160 *dev)
102{ 102{
103 int route;
103 static const u8 gctrl[] = { 104 static const u8 gctrl[] = {
104 0x98, 0x90, 0x88, 0x80 105 0x98, 0x90, 0x88, 0x80, 0x98
105 }; 106 };
106 107
107 if (dev->ctl_input < ARRAY_SIZE(gctrl)) 108 if (dev->ctl_input == STK1160_SVIDEO_INPUT)
109 route = SAA7115_SVIDEO3;
110 else
111 route = SAA7115_COMPOSITE0;
112
113 if (dev->ctl_input < ARRAY_SIZE(gctrl)) {
114 v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_routing,
115 route, 0, 0);
108 stk1160_write_reg(dev, STK1160_GCTRL, gctrl[dev->ctl_input]); 116 stk1160_write_reg(dev, STK1160_GCTRL, gctrl[dev->ctl_input]);
117 }
109} 118}
110 119
111/* TODO: We should break this into pieces */ 120/* TODO: We should break this into pieces */
@@ -351,8 +360,6 @@ static int stk1160_probe(struct usb_interface *interface,
351 360
352 /* i2c reset saa711x */ 361 /* i2c reset saa711x */
353 v4l2_device_call_all(&dev->v4l2_dev, 0, core, reset, 0); 362 v4l2_device_call_all(&dev->v4l2_dev, 0, core, reset, 0);
354 v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_routing,
355 0, 0, 0);
356 v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0); 363 v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
357 364
358 /* reset stk1160 to default values */ 365 /* reset stk1160 to default values */
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index fe6e857969ca..6694f9e2ca57 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -419,7 +419,12 @@ static int vidioc_enum_input(struct file *file, void *priv,
419 if (i->index > STK1160_MAX_INPUT) 419 if (i->index > STK1160_MAX_INPUT)
420 return -EINVAL; 420 return -EINVAL;
421 421
422 sprintf(i->name, "Composite%d", i->index); 422 /* S-Video special handling */
423 if (i->index == STK1160_SVIDEO_INPUT)
424 sprintf(i->name, "S-Video");
425 else
426 sprintf(i->name, "Composite%d", i->index);
427
423 i->type = V4L2_INPUT_TYPE_CAMERA; 428 i->type = V4L2_INPUT_TYPE_CAMERA;
424 i->std = dev->vdev.tvnorms; 429 i->std = dev->vdev.tvnorms;
425 return 0; 430 return 0;
diff --git a/drivers/media/usb/stk1160/stk1160.h b/drivers/media/usb/stk1160/stk1160.h
index 3feba0033f98..68c8707d36ab 100644
--- a/drivers/media/usb/stk1160/stk1160.h
+++ b/drivers/media/usb/stk1160/stk1160.h
@@ -46,7 +46,8 @@
46 46
47#define STK1160_MIN_PKT_SIZE 3072 47#define STK1160_MIN_PKT_SIZE 3072
48 48
49#define STK1160_MAX_INPUT 3 49#define STK1160_MAX_INPUT 4
50#define STK1160_SVIDEO_INPUT 4
50 51
51#define STK1160_I2C_TIMEOUT 100 52#define STK1160_I2C_TIMEOUT 100
52 53
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index 5577381b5bf0..18a91fae6bc1 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -122,21 +122,27 @@ static struct vb2_ops uvc_queue_qops = {
122 .buf_finish = uvc_buffer_finish, 122 .buf_finish = uvc_buffer_finish,
123}; 123};
124 124
125void uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, 125int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
126 int drop_corrupted) 126 int drop_corrupted)
127{ 127{
128 int ret;
129
128 queue->queue.type = type; 130 queue->queue.type = type;
129 queue->queue.io_modes = VB2_MMAP | VB2_USERPTR; 131 queue->queue.io_modes = VB2_MMAP | VB2_USERPTR;
130 queue->queue.drv_priv = queue; 132 queue->queue.drv_priv = queue;
131 queue->queue.buf_struct_size = sizeof(struct uvc_buffer); 133 queue->queue.buf_struct_size = sizeof(struct uvc_buffer);
132 queue->queue.ops = &uvc_queue_qops; 134 queue->queue.ops = &uvc_queue_qops;
133 queue->queue.mem_ops = &vb2_vmalloc_memops; 135 queue->queue.mem_ops = &vb2_vmalloc_memops;
134 vb2_queue_init(&queue->queue); 136 ret = vb2_queue_init(&queue->queue);
137 if (ret)
138 return ret;
135 139
136 mutex_init(&queue->mutex); 140 mutex_init(&queue->mutex);
137 spin_lock_init(&queue->irqlock); 141 spin_lock_init(&queue->irqlock);
138 INIT_LIST_HEAD(&queue->irqqueue); 142 INIT_LIST_HEAD(&queue->irqqueue);
139 queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0; 143 queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0;
144
145 return 0;
140} 146}
141 147
142/* ----------------------------------------------------------------------------- 148/* -----------------------------------------------------------------------------
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 1c15b4227bdb..57c3076a4625 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1755,7 +1755,9 @@ int uvc_video_init(struct uvc_streaming *stream)
1755 atomic_set(&stream->active, 0); 1755 atomic_set(&stream->active, 0);
1756 1756
1757 /* Initialize the video buffers queue. */ 1757 /* Initialize the video buffers queue. */
1758 uvc_queue_init(&stream->queue, stream->type, !uvc_no_drop_param); 1758 ret = uvc_queue_init(&stream->queue, stream->type, !uvc_no_drop_param);
1759 if (ret)
1760 return ret;
1759 1761
1760 /* Alternate setting 0 should be the default, yet the XBox Live Vision 1762 /* Alternate setting 0 should be the default, yet the XBox Live Vision
1761 * Cam (and possibly other devices) crash or otherwise misbehave if 1763 * Cam (and possibly other devices) crash or otherwise misbehave if
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 3764040475bb..af216ec45e39 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -600,7 +600,7 @@ extern struct uvc_driver uvc_driver;
600extern struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id); 600extern struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id);
601 601
602/* Video buffers queue management. */ 602/* Video buffers queue management. */
603extern void uvc_queue_init(struct uvc_video_queue *queue, 603extern int uvc_queue_init(struct uvc_video_queue *queue,
604 enum v4l2_buf_type type, int drop_corrupted); 604 enum v4l2_buf_type type, int drop_corrupted);
605extern int uvc_alloc_buffers(struct uvc_video_queue *queue, 605extern int uvc_alloc_buffers(struct uvc_video_queue *queue,
606 struct v4l2_requestbuffers *rb); 606 struct v4l2_requestbuffers *rb);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 631cdc0e0bda..f6ee201d9347 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -384,6 +384,25 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
384 "Extended SAR", 384 "Extended SAR",
385 NULL, 385 NULL,
386 }; 386 };
387 static const char * const h264_fp_arrangement_type[] = {
388 "Checkerboard",
389 "Column",
390 "Row",
391 "Side by Side",
392 "Top Bottom",
393 "Temporal",
394 NULL,
395 };
396 static const char * const h264_fmo_map_type[] = {
397 "Interleaved Slices",
398 "Scattered Slices",
399 "Foreground with Leftover",
400 "Box Out",
401 "Raster Scan",
402 "Wipe Scan",
403 "Explicit",
404 NULL,
405 };
387 static const char * const mpeg_mpeg4_level[] = { 406 static const char * const mpeg_mpeg4_level[] = {
388 "0", 407 "0",
389 "0b", 408 "0b",
@@ -508,6 +527,10 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
508 return h264_profile; 527 return h264_profile;
509 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: 528 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
510 return vui_sar_idc; 529 return vui_sar_idc;
530 case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
531 return h264_fp_arrangement_type;
532 case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE:
533 return h264_fmo_map_type;
511 case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: 534 case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
512 return mpeg_mpeg4_level; 535 return mpeg_mpeg4_level;
513 case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: 536 case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
@@ -643,6 +666,22 @@ const char *v4l2_ctrl_get_name(u32 id)
643 case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH: return "Horizontal Size of SAR"; 666 case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH: return "Horizontal Size of SAR";
644 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable"; 667 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable";
645 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC"; 668 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC";
669 case V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING: return "H264 Enable Frame Packing SEI";
670 case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0: return "H264 Set Curr. Frame as Frame0";
671 case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: return "H264 FP Arrangement Type";
672 case V4L2_CID_MPEG_VIDEO_H264_FMO: return "H264 Flexible MB Ordering";
673 case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: return "H264 Map Type for FMO";
674 case V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP: return "H264 FMO Number of Slice Groups";
675 case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION: return "H264 FMO Direction of Change";
676 case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE: return "H264 FMO Size of 1st Slice Grp";
677 case V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH: return "H264 FMO No. of Consecutive MBs";
678 case V4L2_CID_MPEG_VIDEO_H264_ASO: return "H264 Arbitrary Slice Ordering";
679 case V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER: return "H264 ASO Slice Order";
680 case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING: return "Enable H264 Hierarchical Coding";
681 case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: return "H264 Hierarchical Coding Type";
682 case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER:return "H264 Number of HC Layers";
683 case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP:
684 return "H264 Set QP Value for HC Layers";
646 case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value"; 685 case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
647 case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value"; 686 case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value";
648 case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value"; 687 case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value";
@@ -657,6 +696,7 @@ const char *v4l2_ctrl_get_name(u32 id)
657 case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size"; 696 case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size";
658 case V4L2_CID_MPEG_VIDEO_DEC_PTS: return "Video Decoder PTS"; 697 case V4L2_CID_MPEG_VIDEO_DEC_PTS: return "Video Decoder PTS";
659 case V4L2_CID_MPEG_VIDEO_DEC_FRAME: return "Video Decoder Frame Count"; 698 case V4L2_CID_MPEG_VIDEO_DEC_FRAME: return "Video Decoder Frame Count";
699 case V4L2_CID_MPEG_VIDEO_VBV_DELAY: return "Initial Delay for VBV Control";
660 700
661 /* CAMERA controls */ 701 /* CAMERA controls */
662 /* Keep the order of the 'case's the same as in videodev2.h! */ 702 /* Keep the order of the 'case's the same as in videodev2.h! */
@@ -749,6 +789,7 @@ const char *v4l2_ctrl_get_name(u32 id)
749 case V4L2_CID_IMAGE_PROC_CLASS: return "Image Processing Controls"; 789 case V4L2_CID_IMAGE_PROC_CLASS: return "Image Processing Controls";
750 case V4L2_CID_LINK_FREQ: return "Link Frequency"; 790 case V4L2_CID_LINK_FREQ: return "Link Frequency";
751 case V4L2_CID_PIXEL_RATE: return "Pixel Rate"; 791 case V4L2_CID_PIXEL_RATE: return "Pixel Rate";
792 case V4L2_CID_TEST_PATTERN: return "Test Pattern";
752 793
753 /* DV controls */ 794 /* DV controls */
754 case V4L2_CID_DV_CLASS: return "Digital Video Controls"; 795 case V4L2_CID_DV_CLASS: return "Digital Video Controls";
@@ -853,6 +894,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
853 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: 894 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
854 case V4L2_CID_MPEG_VIDEO_H264_PROFILE: 895 case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
855 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: 896 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
897 case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
898 case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE:
856 case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: 899 case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
857 case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: 900 case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
858 case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: 901 case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
@@ -862,6 +905,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
862 case V4L2_CID_DV_TX_MODE: 905 case V4L2_CID_DV_TX_MODE:
863 case V4L2_CID_DV_TX_RGB_RANGE: 906 case V4L2_CID_DV_TX_RGB_RANGE:
864 case V4L2_CID_DV_RX_RGB_RANGE: 907 case V4L2_CID_DV_RX_RGB_RANGE:
908 case V4L2_CID_TEST_PATTERN:
865 *type = V4L2_CTRL_TYPE_MENU; 909 *type = V4L2_CTRL_TYPE_MENU;
866 break; 910 break;
867 case V4L2_CID_LINK_FREQ: 911 case V4L2_CID_LINK_FREQ:
@@ -1648,6 +1692,36 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
1648} 1692}
1649EXPORT_SYMBOL(v4l2_ctrl_new_std_menu); 1693EXPORT_SYMBOL(v4l2_ctrl_new_std_menu);
1650 1694
1695/* Helper function for standard menu controls with driver defined menu */
1696struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
1697 const struct v4l2_ctrl_ops *ops, u32 id, s32 max,
1698 s32 mask, s32 def, const char * const *qmenu)
1699{
1700 enum v4l2_ctrl_type type;
1701 const char *name;
1702 u32 flags;
1703 s32 step;
1704 s32 min;
1705
1706 /* v4l2_ctrl_new_std_menu_items() should only be called for
1707 * standard controls without a standard menu.
1708 */
1709 if (v4l2_ctrl_get_menu(id)) {
1710 handler_set_err(hdl, -EINVAL);
1711 return NULL;
1712 }
1713
1714 v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
1715 if (type != V4L2_CTRL_TYPE_MENU || qmenu == NULL) {
1716 handler_set_err(hdl, -EINVAL);
1717 return NULL;
1718 }
1719 return v4l2_ctrl_new(hdl, ops, id, name, type, 0, max, mask, def,
1720 flags, qmenu, NULL, NULL);
1721
1722}
1723EXPORT_SYMBOL(v4l2_ctrl_new_std_menu_items);
1724
1651/* Helper function for standard integer menu controls */ 1725/* Helper function for standard integer menu controls */
1652struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl, 1726struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
1653 const struct v4l2_ctrl_ops *ops, 1727 const struct v4l2_ctrl_ops *ops,
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 9d3e46c446ad..8f388ff31ebb 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -157,8 +157,7 @@ static const char *v4l2_memory_names[] = {
157 [V4L2_MEMORY_OVERLAY] = "overlay", 157 [V4L2_MEMORY_OVERLAY] = "overlay",
158}; 158};
159 159
160#define prt_names(a, arr) ((((a) >= 0) && ((a) < ARRAY_SIZE(arr))) ? \ 160#define prt_names(a, arr) (((unsigned)(a)) < ARRAY_SIZE(arr) ? arr[a] : "unknown")
161 arr[a] : "unknown")
162 161
163/* ------------------------------------------------------------------ */ 162/* ------------------------------------------------------------------ */
164/* debug help functions */ 163/* debug help functions */
@@ -2188,6 +2187,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
2188 int ret = 0; 2187 int ret = 0;
2189 2188
2190 switch (cmd) { 2189 switch (cmd) {
2190 case VIDIOC_PREPARE_BUF:
2191 case VIDIOC_QUERYBUF: 2191 case VIDIOC_QUERYBUF:
2192 case VIDIOC_QBUF: 2192 case VIDIOC_QBUF:
2193 case VIDIOC_DQBUF: { 2193 case VIDIOC_DQBUF: {
@@ -2211,6 +2211,10 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
2211 struct v4l2_subdev_edid *edid = parg; 2211 struct v4l2_subdev_edid *edid = parg;
2212 2212
2213 if (edid->blocks) { 2213 if (edid->blocks) {
2214 if (edid->blocks > 256) {
2215 ret = -EINVAL;
2216 break;
2217 }
2214 *user_ptr = (void __user *)edid->edid; 2218 *user_ptr = (void __user *)edid->edid;
2215 *kernel_ptr = (void *)&edid->edid; 2219 *kernel_ptr = (void *)&edid->edid;
2216 *array_size = edid->blocks * 128; 2220 *array_size = edid->blocks * 128;
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index e6a26b433e87..432df119af27 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -276,6 +276,9 @@ static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
276 */ 276 */
277static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b) 277static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
278{ 278{
279 if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
280 return 0;
281
279 /* Is memory for copying plane information present? */ 282 /* Is memory for copying plane information present? */
280 if (NULL == b->m.planes) { 283 if (NULL == b->m.planes) {
281 dprintk(1, "Multi-planar buffer passed but " 284 dprintk(1, "Multi-planar buffer passed but "
@@ -331,10 +334,9 @@ static bool __buffers_in_use(struct vb2_queue *q)
331 * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be 334 * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
332 * returned to userspace 335 * returned to userspace
333 */ 336 */
334static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b) 337static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
335{ 338{
336 struct vb2_queue *q = vb->vb2_queue; 339 struct vb2_queue *q = vb->vb2_queue;
337 int ret;
338 340
339 /* Copy back data such as timestamp, flags, etc. */ 341 /* Copy back data such as timestamp, flags, etc. */
340 memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m)); 342 memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
@@ -342,14 +344,11 @@ static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
342 b->reserved = vb->v4l2_buf.reserved; 344 b->reserved = vb->v4l2_buf.reserved;
343 345
344 if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) { 346 if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) {
345 ret = __verify_planes_array(vb, b);
346 if (ret)
347 return ret;
348
349 /* 347 /*
350 * Fill in plane-related data if userspace provided an array 348 * Fill in plane-related data if userspace provided an array
351 * for it. The memory and size is verified above. 349 * for it. The caller has already verified memory and size.
352 */ 350 */
351 b->length = vb->num_planes;
353 memcpy(b->m.planes, vb->v4l2_planes, 352 memcpy(b->m.planes, vb->v4l2_planes,
354 b->length * sizeof(struct v4l2_plane)); 353 b->length * sizeof(struct v4l2_plane));
355 } else { 354 } else {
@@ -391,8 +390,6 @@ static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
391 390
392 if (__buffer_in_use(q, vb)) 391 if (__buffer_in_use(q, vb))
393 b->flags |= V4L2_BUF_FLAG_MAPPED; 392 b->flags |= V4L2_BUF_FLAG_MAPPED;
394
395 return 0;
396} 393}
397 394
398/** 395/**
@@ -411,6 +408,7 @@ static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
411int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b) 408int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
412{ 409{
413 struct vb2_buffer *vb; 410 struct vb2_buffer *vb;
411 int ret;
414 412
415 if (b->type != q->type) { 413 if (b->type != q->type) {
416 dprintk(1, "querybuf: wrong buffer type\n"); 414 dprintk(1, "querybuf: wrong buffer type\n");
@@ -422,8 +420,10 @@ int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
422 return -EINVAL; 420 return -EINVAL;
423 } 421 }
424 vb = q->bufs[b->index]; 422 vb = q->bufs[b->index];
425 423 ret = __verify_planes_array(vb, b);
426 return __fill_v4l2_buffer(vb, b); 424 if (!ret)
425 __fill_v4l2_buffer(vb, b);
426 return ret;
427} 427}
428EXPORT_SYMBOL(vb2_querybuf); 428EXPORT_SYMBOL(vb2_querybuf);
429 429
@@ -813,24 +813,16 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
813EXPORT_SYMBOL_GPL(vb2_buffer_done); 813EXPORT_SYMBOL_GPL(vb2_buffer_done);
814 814
815/** 815/**
816 * __fill_vb2_buffer() - fill a vb2_buffer with information provided in 816 * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
817 * a v4l2_buffer by the userspace 817 * v4l2_buffer by the userspace. The caller has already verified that struct
818 * v4l2_buffer has a valid number of planes.
818 */ 819 */
819static int __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b, 820static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
820 struct v4l2_plane *v4l2_planes) 821 struct v4l2_plane *v4l2_planes)
821{ 822{
822 unsigned int plane; 823 unsigned int plane;
823 int ret;
824 824
825 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { 825 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
826 /*
827 * Verify that the userspace gave us a valid array for
828 * plane information.
829 */
830 ret = __verify_planes_array(vb, b);
831 if (ret)
832 return ret;
833
834 /* Fill in driver-provided information for OUTPUT types */ 826 /* Fill in driver-provided information for OUTPUT types */
835 if (V4L2_TYPE_IS_OUTPUT(b->type)) { 827 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
836 /* 828 /*
@@ -872,8 +864,6 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
872 vb->v4l2_buf.field = b->field; 864 vb->v4l2_buf.field = b->field;
873 vb->v4l2_buf.timestamp = b->timestamp; 865 vb->v4l2_buf.timestamp = b->timestamp;
874 vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_STATE_FLAGS; 866 vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_STATE_FLAGS;
875
876 return 0;
877} 867}
878 868
879/** 869/**
@@ -888,10 +878,8 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
888 int ret; 878 int ret;
889 int write = !V4L2_TYPE_IS_OUTPUT(q->type); 879 int write = !V4L2_TYPE_IS_OUTPUT(q->type);
890 880
891 /* Verify and copy relevant information provided by the userspace */ 881 /* Copy relevant information provided by the userspace */
892 ret = __fill_vb2_buffer(vb, b, planes); 882 __fill_vb2_buffer(vb, b, planes);
893 if (ret)
894 return ret;
895 883
896 for (plane = 0; plane < vb->num_planes; ++plane) { 884 for (plane = 0; plane < vb->num_planes; ++plane) {
897 /* Skip the plane if already verified */ 885 /* Skip the plane if already verified */
@@ -966,7 +954,8 @@ err:
966 */ 954 */
967static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b) 955static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
968{ 956{
969 return __fill_vb2_buffer(vb, b, vb->v4l2_planes); 957 __fill_vb2_buffer(vb, b, vb->v4l2_planes);
958 return 0;
970} 959}
971 960
972/** 961/**
@@ -1059,7 +1048,9 @@ int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
1059 dprintk(1, "%s(): invalid buffer state %d\n", __func__, vb->state); 1048 dprintk(1, "%s(): invalid buffer state %d\n", __func__, vb->state);
1060 return -EINVAL; 1049 return -EINVAL;
1061 } 1050 }
1062 1051 ret = __verify_planes_array(vb, b);
1052 if (ret < 0)
1053 return ret;
1063 ret = __buf_prepare(vb, b); 1054 ret = __buf_prepare(vb, b);
1064 if (ret < 0) 1055 if (ret < 0)
1065 return ret; 1056 return ret;
@@ -1147,6 +1138,9 @@ int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
1147 ret = -EINVAL; 1138 ret = -EINVAL;
1148 goto unlock; 1139 goto unlock;
1149 } 1140 }
1141 ret = __verify_planes_array(vb, b);
1142 if (ret)
1143 goto unlock;
1150 1144
1151 switch (vb->state) { 1145 switch (vb->state) {
1152 case VB2_BUF_STATE_DEQUEUED: 1146 case VB2_BUF_STATE_DEQUEUED:
@@ -1243,8 +1237,10 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1243 * the locks or return an error if one occurred. 1237 * the locks or return an error if one occurred.
1244 */ 1238 */
1245 call_qop(q, wait_finish, q); 1239 call_qop(q, wait_finish, q);
1246 if (ret) 1240 if (ret) {
1241 dprintk(1, "Sleep was interrupted\n");
1247 return ret; 1242 return ret;
1243 }
1248 } 1244 }
1249 return 0; 1245 return 0;
1250} 1246}
@@ -1255,7 +1251,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1255 * Will sleep if required for nonblocking == false. 1251 * Will sleep if required for nonblocking == false.
1256 */ 1252 */
1257static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, 1253static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
1258 int nonblocking) 1254 struct v4l2_buffer *b, int nonblocking)
1259{ 1255{
1260 unsigned long flags; 1256 unsigned long flags;
1261 int ret; 1257 int ret;
@@ -1273,10 +1269,16 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
1273 */ 1269 */
1274 spin_lock_irqsave(&q->done_lock, flags); 1270 spin_lock_irqsave(&q->done_lock, flags);
1275 *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry); 1271 *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
1276 list_del(&(*vb)->done_entry); 1272 /*
1273 * Only remove the buffer from done_list if v4l2_buffer can handle all
1274 * the planes.
1275 */
1276 ret = __verify_planes_array(*vb, b);
1277 if (!ret)
1278 list_del(&(*vb)->done_entry);
1277 spin_unlock_irqrestore(&q->done_lock, flags); 1279 spin_unlock_irqrestore(&q->done_lock, flags);
1278 1280
1279 return 0; 1281 return ret;
1280} 1282}
1281 1283
1282/** 1284/**
@@ -1335,12 +1337,9 @@ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
1335 dprintk(1, "dqbuf: invalid buffer type\n"); 1337 dprintk(1, "dqbuf: invalid buffer type\n");
1336 return -EINVAL; 1338 return -EINVAL;
1337 } 1339 }
1338 1340 ret = __vb2_get_done_vb(q, &vb, b, nonblocking);
1339 ret = __vb2_get_done_vb(q, &vb, nonblocking); 1341 if (ret < 0)
1340 if (ret < 0) {
1341 dprintk(1, "dqbuf: error getting next done buffer\n");
1342 return ret; 1342 return ret;
1343 }
1344 1343
1345 ret = call_qop(q, buf_finish, vb); 1344 ret = call_qop(q, buf_finish, vb);
1346 if (ret) { 1345 if (ret) {
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 99c73352c430..b151b7c1bd59 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -60,16 +60,6 @@ config ATMEL_PWM
60 purposes including software controlled power-efficient backlights 60 purposes including software controlled power-efficient backlights
61 on LCD displays, motor control, and waveform generation. 61 on LCD displays, motor control, and waveform generation.
62 62
63config AB8500_PWM
64 bool "AB8500 PWM support"
65 depends on AB8500_CORE && ARCH_U8500
66 select HAVE_PWM
67 depends on !PWM
68 help
69 This driver exports functions to enable/disble/config/free Pulse
70 Width Modulation in the Analog Baseband Chip AB8500.
71 It is used by led and backlight driver to control the intensity.
72
73config ATMEL_TCLIB 63config ATMEL_TCLIB
74 bool "Atmel AT32/AT91 Timer/Counter Library" 64 bool "Atmel AT32/AT91 Timer/Counter Library"
75 depends on (AVR32 || ARCH_AT91) 65 depends on (AVR32 || ARCH_AT91)
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index b88df7a350b8..2129377c0de6 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -44,7 +44,6 @@ obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
44obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o 44obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
45obj-$(CONFIG_PCH_PHUB) += pch_phub.o 45obj-$(CONFIG_PCH_PHUB) += pch_phub.o
46obj-y += ti-st/ 46obj-y += ti-st/
47obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o
48obj-y += lis3lv02d/ 47obj-y += lis3lv02d/
49obj-y += carma/ 48obj-y += carma/
50obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o 49obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 8ac5246e2ab2..06c42cfb7c34 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -26,6 +26,7 @@
26#include <linux/suspend.h> 26#include <linux/suspend.h>
27#include <linux/fault-inject.h> 27#include <linux/fault-inject.h>
28#include <linux/random.h> 28#include <linux/random.h>
29#include <linux/slab.h>
29 30
30#include <linux/mmc/card.h> 31#include <linux/mmc/card.h>
31#include <linux/mmc/host.h> 32#include <linux/mmc/host.h>
@@ -41,6 +42,12 @@
41#include "sd_ops.h" 42#include "sd_ops.h"
42#include "sdio_ops.h" 43#include "sdio_ops.h"
43 44
45/*
46 * Background operations can take a long time, depending on the housekeeping
47 * operations the card has to perform.
48 */
49#define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
50
44static struct workqueue_struct *workqueue; 51static struct workqueue_struct *workqueue;
45static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; 52static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
46 53
@@ -245,6 +252,70 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
245 host->ops->request(host, mrq); 252 host->ops->request(host, mrq);
246} 253}
247 254
255/**
256 * mmc_start_bkops - start BKOPS for supported cards
257 * @card: MMC card to start BKOPS
258 * @form_exception: A flag to indicate if this function was
259 * called due to an exception raised by the card
260 *
261 * Start background operations whenever requested.
262 * When the urgent BKOPS bit is set in a R1 command response
263 * then background operations should be started immediately.
264*/
265void mmc_start_bkops(struct mmc_card *card, bool from_exception)
266{
267 int err;
268 int timeout;
269 bool use_busy_signal;
270
271 BUG_ON(!card);
272
273 if (!card->ext_csd.bkops_en || mmc_card_doing_bkops(card))
274 return;
275
276 err = mmc_read_bkops_status(card);
277 if (err) {
278 pr_err("%s: Failed to read bkops status: %d\n",
279 mmc_hostname(card->host), err);
280 return;
281 }
282
283 if (!card->ext_csd.raw_bkops_status)
284 return;
285
286 if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
287 from_exception)
288 return;
289
290 mmc_claim_host(card->host);
291 if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
292 timeout = MMC_BKOPS_MAX_TIMEOUT;
293 use_busy_signal = true;
294 } else {
295 timeout = 0;
296 use_busy_signal = false;
297 }
298
299 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
300 EXT_CSD_BKOPS_START, 1, timeout, use_busy_signal);
301 if (err) {
302 pr_warn("%s: Error %d starting bkops\n",
303 mmc_hostname(card->host), err);
304 goto out;
305 }
306
307 /*
308 * For urgent bkops status (LEVEL_2 and more)
309 * bkops executed synchronously, otherwise
310 * the operation is in progress
311 */
312 if (!use_busy_signal)
313 mmc_card_set_doing_bkops(card);
314out:
315 mmc_release_host(card->host);
316}
317EXPORT_SYMBOL(mmc_start_bkops);
318
248static void mmc_wait_done(struct mmc_request *mrq) 319static void mmc_wait_done(struct mmc_request *mrq)
249{ 320{
250 complete(&mrq->completion); 321 complete(&mrq->completion);
@@ -354,6 +425,14 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
354 if (host->areq) { 425 if (host->areq) {
355 mmc_wait_for_req_done(host, host->areq->mrq); 426 mmc_wait_for_req_done(host, host->areq->mrq);
356 err = host->areq->err_check(host->card, host->areq); 427 err = host->areq->err_check(host->card, host->areq);
428 /*
429 * Check BKOPS urgency for each R1 response
430 */
431 if (host->card && mmc_card_mmc(host->card) &&
432 ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
433 (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
434 (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT))
435 mmc_start_bkops(host->card, true);
357 } 436 }
358 437
359 if (!err && areq) 438 if (!err && areq)
@@ -398,7 +477,7 @@ EXPORT_SYMBOL(mmc_wait_for_req);
398 * @card: the MMC card associated with the HPI transfer 477 * @card: the MMC card associated with the HPI transfer
399 * 478 *
400 * Issued High Priority Interrupt, and check for card status 479 * Issued High Priority Interrupt, and check for card status
401 * util out-of prg-state. 480 * until out-of prg-state.
402 */ 481 */
403int mmc_interrupt_hpi(struct mmc_card *card) 482int mmc_interrupt_hpi(struct mmc_card *card)
404{ 483{
@@ -424,8 +503,9 @@ int mmc_interrupt_hpi(struct mmc_card *card)
424 case R1_STATE_IDLE: 503 case R1_STATE_IDLE:
425 case R1_STATE_READY: 504 case R1_STATE_READY:
426 case R1_STATE_STBY: 505 case R1_STATE_STBY:
506 case R1_STATE_TRAN:
427 /* 507 /*
428 * In idle states, HPI is not needed and the caller 508 * In idle and transfer states, HPI is not needed and the caller
429 * can issue the next intended command immediately 509 * can issue the next intended command immediately
430 */ 510 */
431 goto out; 511 goto out;
@@ -489,6 +569,64 @@ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries
489EXPORT_SYMBOL(mmc_wait_for_cmd); 569EXPORT_SYMBOL(mmc_wait_for_cmd);
490 570
491/** 571/**
572 * mmc_stop_bkops - stop ongoing BKOPS
573 * @card: MMC card to check BKOPS
574 *
575 * Send HPI command to stop ongoing background operations to
576 * allow rapid servicing of foreground operations, e.g. read/
577 * writes. Wait until the card comes out of the programming state
578 * to avoid errors in servicing read/write requests.
579 */
580int mmc_stop_bkops(struct mmc_card *card)
581{
582 int err = 0;
583
584 BUG_ON(!card);
585 err = mmc_interrupt_hpi(card);
586
587 /*
588 * If err is EINVAL, we can't issue an HPI.
589 * It should complete the BKOPS.
590 */
591 if (!err || (err == -EINVAL)) {
592 mmc_card_clr_doing_bkops(card);
593 err = 0;
594 }
595
596 return err;
597}
598EXPORT_SYMBOL(mmc_stop_bkops);
599
600int mmc_read_bkops_status(struct mmc_card *card)
601{
602 int err;
603 u8 *ext_csd;
604
605 /*
606 * In future work, we should consider storing the entire ext_csd.
607 */
608 ext_csd = kmalloc(512, GFP_KERNEL);
609 if (!ext_csd) {
610 pr_err("%s: could not allocate buffer to receive the ext_csd.\n",
611 mmc_hostname(card->host));
612 return -ENOMEM;
613 }
614
615 mmc_claim_host(card->host);
616 err = mmc_send_ext_csd(card, ext_csd);
617 mmc_release_host(card->host);
618 if (err)
619 goto out;
620
621 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
622 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
623out:
624 kfree(ext_csd);
625 return err;
626}
627EXPORT_SYMBOL(mmc_read_bkops_status);
628
629/**
492 * mmc_set_data_timeout - set the timeout for a data command 630 * mmc_set_data_timeout - set the timeout for a data command
493 * @data: data phase for command 631 * @data: data phase for command
494 * @card: the MMC card associated with the data transfer 632 * @card: the MMC card associated with the data transfer
@@ -975,7 +1113,8 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
975 int tmp; 1113 int tmp;
976 int voltage; 1114 int voltage;
977 1115
978 /* REVISIT mmc_vddrange_to_ocrmask() may have set some 1116 /*
1117 * REVISIT mmc_vddrange_to_ocrmask() may have set some
979 * bits this regulator doesn't quite support ... don't 1118 * bits this regulator doesn't quite support ... don't
980 * be too picky, most cards and regulators are OK with 1119 * be too picky, most cards and regulators are OK with
981 * a 0.1V range goof (it's a small error percentage). 1120 * a 0.1V range goof (it's a small error percentage).
@@ -989,12 +1128,13 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
989 max_uV = min_uV + 100 * 1000; 1128 max_uV = min_uV + 100 * 1000;
990 } 1129 }
991 1130
992 /* avoid needless changes to this voltage; the regulator 1131 /*
993 * might not allow this operation 1132 * If we're using a fixed/static regulator, don't call
1133 * regulator_set_voltage; it would fail.
994 */ 1134 */
995 voltage = regulator_get_voltage(supply); 1135 voltage = regulator_get_voltage(supply);
996 1136
997 if (mmc->caps2 & MMC_CAP2_BROKEN_VOLTAGE) 1137 if (regulator_count_voltages(supply) == 1)
998 min_uV = max_uV = voltage; 1138 min_uV = max_uV = voltage;
999 1139
1000 if (voltage < 0) 1140 if (voltage < 0)
@@ -1133,48 +1273,6 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1133 mmc_host_clk_release(host); 1273 mmc_host_clk_release(host);
1134} 1274}
1135 1275
1136static void mmc_poweroff_notify(struct mmc_host *host)
1137{
1138 struct mmc_card *card;
1139 unsigned int timeout;
1140 unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
1141 int err = 0;
1142
1143 card = host->card;
1144 mmc_claim_host(host);
1145
1146 /*
1147 * Send power notify command only if card
1148 * is mmc and notify state is powered ON
1149 */
1150 if (card && mmc_card_mmc(card) &&
1151 (card->poweroff_notify_state == MMC_POWERED_ON)) {
1152
1153 if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
1154 notify_type = EXT_CSD_POWER_OFF_SHORT;
1155 timeout = card->ext_csd.generic_cmd6_time;
1156 card->poweroff_notify_state = MMC_POWEROFF_SHORT;
1157 } else {
1158 notify_type = EXT_CSD_POWER_OFF_LONG;
1159 timeout = card->ext_csd.power_off_longtime;
1160 card->poweroff_notify_state = MMC_POWEROFF_LONG;
1161 }
1162
1163 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1164 EXT_CSD_POWER_OFF_NOTIFICATION,
1165 notify_type, timeout);
1166
1167 if (err && err != -EBADMSG)
1168 pr_err("Device failed to respond within %d poweroff "
1169 "time. Forcefully powering down the device\n",
1170 timeout);
1171
1172 /* Set the card state to no notification after the poweroff */
1173 card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
1174 }
1175 mmc_release_host(host);
1176}
1177
1178/* 1276/*
1179 * Apply power to the MMC stack. This is a two-stage process. 1277 * Apply power to the MMC stack. This is a two-stage process.
1180 * First, we enable power to the card without the clock running. 1278 * First, we enable power to the card without the clock running.
@@ -1237,8 +1335,6 @@ static void mmc_power_up(struct mmc_host *host)
1237 1335
1238void mmc_power_off(struct mmc_host *host) 1336void mmc_power_off(struct mmc_host *host)
1239{ 1337{
1240 int err = 0;
1241
1242 if (host->ios.power_mode == MMC_POWER_OFF) 1338 if (host->ios.power_mode == MMC_POWER_OFF)
1243 return; 1339 return;
1244 1340
@@ -1247,22 +1343,6 @@ void mmc_power_off(struct mmc_host *host)
1247 host->ios.clock = 0; 1343 host->ios.clock = 0;
1248 host->ios.vdd = 0; 1344 host->ios.vdd = 0;
1249 1345
1250 /*
1251 * For eMMC 4.5 device send AWAKE command before
1252 * POWER_OFF_NOTIFY command, because in sleep state
1253 * eMMC 4.5 devices respond to only RESET and AWAKE cmd
1254 */
1255 if (host->card && mmc_card_is_sleep(host->card) &&
1256 host->bus_ops->resume) {
1257 err = host->bus_ops->resume(host);
1258
1259 if (!err)
1260 mmc_poweroff_notify(host);
1261 else
1262 pr_warning("%s: error %d during resume "
1263 "(continue with poweroff sequence)\n",
1264 mmc_hostname(host), err);
1265 }
1266 1346
1267 /* 1347 /*
1268 * Reset ocr mask to be the highest possible voltage supported for 1348 * Reset ocr mask to be the highest possible voltage supported for
@@ -2052,6 +2132,11 @@ void mmc_rescan(struct work_struct *work)
2052 if (host->rescan_disable) 2132 if (host->rescan_disable)
2053 return; 2133 return;
2054 2134
2135 /* If there is a non-removable card registered, only scan once */
2136 if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
2137 return;
2138 host->rescan_entered = 1;
2139
2055 mmc_bus_get(host); 2140 mmc_bus_get(host);
2056 2141
2057 /* 2142 /*
@@ -2327,9 +2412,14 @@ int mmc_suspend_host(struct mmc_host *host)
2327 2412
2328 mmc_bus_get(host); 2413 mmc_bus_get(host);
2329 if (host->bus_ops && !host->bus_dead) { 2414 if (host->bus_ops && !host->bus_dead) {
2330 2415 if (host->bus_ops->suspend) {
2331 if (host->bus_ops->suspend) 2416 if (mmc_card_doing_bkops(host->card)) {
2417 err = mmc_stop_bkops(host->card);
2418 if (err)
2419 goto out;
2420 }
2332 err = host->bus_ops->suspend(host); 2421 err = host->bus_ops->suspend(host);
2422 }
2333 2423
2334 if (err == -ENOSYS || !host->bus_ops->resume) { 2424 if (err == -ENOSYS || !host->bus_ops->resume) {
2335 /* 2425 /*
@@ -2411,15 +2501,24 @@ int mmc_pm_notify(struct notifier_block *notify_block,
2411 struct mmc_host *host = container_of( 2501 struct mmc_host *host = container_of(
2412 notify_block, struct mmc_host, pm_notify); 2502 notify_block, struct mmc_host, pm_notify);
2413 unsigned long flags; 2503 unsigned long flags;
2414 2504 int err = 0;
2415 2505
2416 switch (mode) { 2506 switch (mode) {
2417 case PM_HIBERNATION_PREPARE: 2507 case PM_HIBERNATION_PREPARE:
2418 case PM_SUSPEND_PREPARE: 2508 case PM_SUSPEND_PREPARE:
2509 if (host->card && mmc_card_mmc(host->card) &&
2510 mmc_card_doing_bkops(host->card)) {
2511 err = mmc_stop_bkops(host->card);
2512 if (err) {
2513 pr_err("%s: didn't stop bkops\n",
2514 mmc_hostname(host));
2515 return err;
2516 }
2517 mmc_card_clr_doing_bkops(host->card);
2518 }
2419 2519
2420 spin_lock_irqsave(&host->lock, flags); 2520 spin_lock_irqsave(&host->lock, flags);
2421 host->rescan_disable = 1; 2521 host->rescan_disable = 1;
2422 host->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
2423 spin_unlock_irqrestore(&host->lock, flags); 2522 spin_unlock_irqrestore(&host->lock, flags);
2424 cancel_delayed_work_sync(&host->detect); 2523 cancel_delayed_work_sync(&host->detect);
2425 2524
@@ -2443,7 +2542,6 @@ int mmc_pm_notify(struct notifier_block *notify_block,
2443 2542
2444 spin_lock_irqsave(&host->lock, flags); 2543 spin_lock_irqsave(&host->lock, flags);
2445 host->rescan_disable = 0; 2544 host->rescan_disable = 0;
2446 host->power_notify_type = MMC_HOST_PW_NOTIFY_LONG;
2447 spin_unlock_irqrestore(&host->lock, flags); 2545 spin_unlock_irqrestore(&host->lock, flags);
2448 mmc_detect_change(host, 0); 2546 mmc_detect_change(host, 0);
2449 2547
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 9ab5b17d488a..d96c643dde1c 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -281,7 +281,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
281 if (err) 281 if (err)
282 goto out_free; 282 goto out_free;
283 283
284 for (i = 511; i >= 0; i--) 284 for (i = 0; i < 512; i++)
285 n += sprintf(buf + n, "%02x", ext_csd[i]); 285 n += sprintf(buf + n, "%02x", ext_csd[i]);
286 n += sprintf(buf + n, "\n"); 286 n += sprintf(buf + n, "\n");
287 BUG_ON(n != EXT_CSD_STR_LEN); 287 BUG_ON(n != EXT_CSD_STR_LEN);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 396b25891bb9..7cc46382fd64 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -463,6 +463,17 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
463 } 463 }
464 464
465 if (card->ext_csd.rev >= 5) { 465 if (card->ext_csd.rev >= 5) {
466 /* check whether the eMMC card supports BKOPS */
467 if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
468 card->ext_csd.bkops = 1;
469 card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN];
470 card->ext_csd.raw_bkops_status =
471 ext_csd[EXT_CSD_BKOPS_STATUS];
472 if (!card->ext_csd.bkops_en)
473 pr_info("%s: BKOPS_EN bit is not set\n",
474 mmc_hostname(card->host));
475 }
476
466 /* check whether the eMMC card supports HPI */ 477 /* check whether the eMMC card supports HPI */
467 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) { 478 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
468 card->ext_csd.hpi = 1; 479 card->ext_csd.hpi = 1;
@@ -996,7 +1007,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
996 * so check for success and update the flag 1007 * so check for success and update the flag
997 */ 1008 */
998 if (!err) 1009 if (!err)
999 card->poweroff_notify_state = MMC_POWERED_ON; 1010 card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
1000 } 1011 }
1001 1012
1002 /* 1013 /*
@@ -1262,6 +1273,35 @@ err:
1262 return err; 1273 return err;
1263} 1274}
1264 1275
1276static int mmc_can_poweroff_notify(const struct mmc_card *card)
1277{
1278 return card &&
1279 mmc_card_mmc(card) &&
1280 (card->ext_csd.power_off_notification == EXT_CSD_POWER_ON);
1281}
1282
1283static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
1284{
1285 unsigned int timeout = card->ext_csd.generic_cmd6_time;
1286 int err;
1287
1288 /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */
1289 if (notify_type == EXT_CSD_POWER_OFF_LONG)
1290 timeout = card->ext_csd.power_off_longtime;
1291
1292 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1293 EXT_CSD_POWER_OFF_NOTIFICATION,
1294 notify_type, timeout);
1295 if (err)
1296 pr_err("%s: Power Off Notification timed out, %u\n",
1297 mmc_hostname(card->host), timeout);
1298
1299 /* Disable the power off notification after the switch operation. */
1300 card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION;
1301
1302 return err;
1303}
1304
1265/* 1305/*
1266 * Host is being removed. Free up the current card. 1306 * Host is being removed. Free up the current card.
1267 */ 1307 */
@@ -1322,11 +1362,11 @@ static int mmc_suspend(struct mmc_host *host)
1322 BUG_ON(!host->card); 1362 BUG_ON(!host->card);
1323 1363
1324 mmc_claim_host(host); 1364 mmc_claim_host(host);
1325 if (mmc_card_can_sleep(host)) { 1365 if (mmc_can_poweroff_notify(host->card))
1366 err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_SHORT);
1367 else if (mmc_card_can_sleep(host))
1326 err = mmc_card_sleep(host); 1368 err = mmc_card_sleep(host);
1327 if (!err) 1369 else if (!mmc_host_is_spi(host))
1328 mmc_card_set_sleep(host->card);
1329 } else if (!mmc_host_is_spi(host))
1330 err = mmc_deselect_cards(host); 1370 err = mmc_deselect_cards(host);
1331 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); 1371 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
1332 mmc_release_host(host); 1372 mmc_release_host(host);
@@ -1348,11 +1388,7 @@ static int mmc_resume(struct mmc_host *host)
1348 BUG_ON(!host->card); 1388 BUG_ON(!host->card);
1349 1389
1350 mmc_claim_host(host); 1390 mmc_claim_host(host);
1351 if (mmc_card_is_sleep(host->card)) { 1391 err = mmc_init_card(host, host->ocr, host->card);
1352 err = mmc_card_awake(host);
1353 mmc_card_clr_sleep(host->card);
1354 } else
1355 err = mmc_init_card(host, host->ocr, host->card);
1356 mmc_release_host(host); 1392 mmc_release_host(host);
1357 1393
1358 return err; 1394 return err;
@@ -1363,7 +1399,6 @@ static int mmc_power_restore(struct mmc_host *host)
1363 int ret; 1399 int ret;
1364 1400
1365 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); 1401 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
1366 mmc_card_clr_sleep(host->card);
1367 mmc_claim_host(host); 1402 mmc_claim_host(host);
1368 ret = mmc_init_card(host, host->ocr, host->card); 1403 ret = mmc_init_card(host, host->ocr, host->card);
1369 mmc_release_host(host); 1404 mmc_release_host(host);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 0ed2cc5f35b6..a0e172042e65 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -230,6 +230,10 @@ mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
230 return 0; 230 return 0;
231} 231}
232 232
233/*
234 * NOTE: void *buf, caller for the buf is required to use DMA-capable
235 * buffer or on-stack buffer (with some overhead in callee).
236 */
233static int 237static int
234mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, 238mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
235 u32 opcode, void *buf, unsigned len) 239 u32 opcode, void *buf, unsigned len)
@@ -239,13 +243,19 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
239 struct mmc_data data = {0}; 243 struct mmc_data data = {0};
240 struct scatterlist sg; 244 struct scatterlist sg;
241 void *data_buf; 245 void *data_buf;
246 int is_on_stack;
242 247
243 /* dma onto stack is unsafe/nonportable, but callers to this 248 is_on_stack = object_is_on_stack(buf);
244 * routine normally provide temporary on-stack buffers ... 249 if (is_on_stack) {
245 */ 250 /*
246 data_buf = kmalloc(len, GFP_KERNEL); 251 * dma onto stack is unsafe/nonportable, but callers to this
247 if (data_buf == NULL) 252 * routine normally provide temporary on-stack buffers ...
248 return -ENOMEM; 253 */
254 data_buf = kmalloc(len, GFP_KERNEL);
255 if (!data_buf)
256 return -ENOMEM;
257 } else
258 data_buf = buf;
249 259
250 mrq.cmd = &cmd; 260 mrq.cmd = &cmd;
251 mrq.data = &data; 261 mrq.data = &data;
@@ -280,8 +290,10 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
280 290
281 mmc_wait_for_req(host, &mrq); 291 mmc_wait_for_req(host, &mrq);
282 292
283 memcpy(buf, data_buf, len); 293 if (is_on_stack) {
284 kfree(data_buf); 294 memcpy(buf, data_buf, len);
295 kfree(data_buf);
296 }
285 297
286 if (cmd.error) 298 if (cmd.error)
287 return cmd.error; 299 return cmd.error;
@@ -294,24 +306,32 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
294int mmc_send_csd(struct mmc_card *card, u32 *csd) 306int mmc_send_csd(struct mmc_card *card, u32 *csd)
295{ 307{
296 int ret, i; 308 int ret, i;
309 u32 *csd_tmp;
297 310
298 if (!mmc_host_is_spi(card->host)) 311 if (!mmc_host_is_spi(card->host))
299 return mmc_send_cxd_native(card->host, card->rca << 16, 312 return mmc_send_cxd_native(card->host, card->rca << 16,
300 csd, MMC_SEND_CSD); 313 csd, MMC_SEND_CSD);
301 314
302 ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd, 16); 315 csd_tmp = kmalloc(16, GFP_KERNEL);
316 if (!csd_tmp)
317 return -ENOMEM;
318
319 ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
303 if (ret) 320 if (ret)
304 return ret; 321 goto err;
305 322
306 for (i = 0;i < 4;i++) 323 for (i = 0;i < 4;i++)
307 csd[i] = be32_to_cpu(csd[i]); 324 csd[i] = be32_to_cpu(csd_tmp[i]);
308 325
309 return 0; 326err:
327 kfree(csd_tmp);
328 return ret;
310} 329}
311 330
312int mmc_send_cid(struct mmc_host *host, u32 *cid) 331int mmc_send_cid(struct mmc_host *host, u32 *cid)
313{ 332{
314 int ret, i; 333 int ret, i;
334 u32 *cid_tmp;
315 335
316 if (!mmc_host_is_spi(host)) { 336 if (!mmc_host_is_spi(host)) {
317 if (!host->card) 337 if (!host->card)
@@ -320,14 +340,20 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid)
320 cid, MMC_SEND_CID); 340 cid, MMC_SEND_CID);
321 } 341 }
322 342
323 ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid, 16); 343 cid_tmp = kmalloc(16, GFP_KERNEL);
344 if (!cid_tmp)
345 return -ENOMEM;
346
347 ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
324 if (ret) 348 if (ret)
325 return ret; 349 goto err;
326 350
327 for (i = 0;i < 4;i++) 351 for (i = 0;i < 4;i++)
328 cid[i] = be32_to_cpu(cid[i]); 352 cid[i] = be32_to_cpu(cid_tmp[i]);
329 353
330 return 0; 354err:
355 kfree(cid_tmp);
356 return ret;
331} 357}
332 358
333int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd) 359int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
@@ -367,18 +393,19 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
367} 393}
368 394
369/** 395/**
370 * mmc_switch - modify EXT_CSD register 396 * __mmc_switch - modify EXT_CSD register
371 * @card: the MMC card associated with the data transfer 397 * @card: the MMC card associated with the data transfer
372 * @set: cmd set values 398 * @set: cmd set values
373 * @index: EXT_CSD register index 399 * @index: EXT_CSD register index
374 * @value: value to program into EXT_CSD register 400 * @value: value to program into EXT_CSD register
375 * @timeout_ms: timeout (ms) for operation performed by register write, 401 * @timeout_ms: timeout (ms) for operation performed by register write,
376 * timeout of zero implies maximum possible timeout 402 * timeout of zero implies maximum possible timeout
403 * @use_busy_signal: use the busy signal as response type
377 * 404 *
378 * Modifies the EXT_CSD register for selected card. 405 * Modifies the EXT_CSD register for selected card.
379 */ 406 */
380int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 407int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
381 unsigned int timeout_ms) 408 unsigned int timeout_ms, bool use_busy_signal)
382{ 409{
383 int err; 410 int err;
384 struct mmc_command cmd = {0}; 411 struct mmc_command cmd = {0};
@@ -392,13 +419,23 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
392 (index << 16) | 419 (index << 16) |
393 (value << 8) | 420 (value << 8) |
394 set; 421 set;
395 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 422 cmd.flags = MMC_CMD_AC;
423 if (use_busy_signal)
424 cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
425 else
426 cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
427
428
396 cmd.cmd_timeout_ms = timeout_ms; 429 cmd.cmd_timeout_ms = timeout_ms;
397 430
398 err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); 431 err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
399 if (err) 432 if (err)
400 return err; 433 return err;
401 434
435 /* No need to check card status in case of unblocking command */
436 if (!use_busy_signal)
437 return 0;
438
402 /* Must check status to be sure of no errors */ 439 /* Must check status to be sure of no errors */
403 do { 440 do {
404 err = mmc_send_status(card, &status); 441 err = mmc_send_status(card, &status);
@@ -423,6 +460,13 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
423 460
424 return 0; 461 return 0;
425} 462}
463EXPORT_SYMBOL_GPL(__mmc_switch);
464
465int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
466 unsigned int timeout_ms)
467{
468 return __mmc_switch(card, set, index, value, timeout_ms, true);
469}
426EXPORT_SYMBOL_GPL(mmc_switch); 470EXPORT_SYMBOL_GPL(mmc_switch);
427 471
428int mmc_send_status(struct mmc_card *card, u32 *status) 472int mmc_send_status(struct mmc_card *card, u32 *status)
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 236842ec955a..6bf68799fe97 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -193,14 +193,7 @@ static int sdio_bus_remove(struct device *dev)
193} 193}
194 194
195#ifdef CONFIG_PM 195#ifdef CONFIG_PM
196
197static int pm_no_operation(struct device *dev)
198{
199 return 0;
200}
201
202static const struct dev_pm_ops sdio_bus_pm_ops = { 196static const struct dev_pm_ops sdio_bus_pm_ops = {
203 SET_SYSTEM_SLEEP_PM_OPS(pm_no_operation, pm_no_operation)
204 SET_RUNTIME_PM_OPS( 197 SET_RUNTIME_PM_OPS(
205 pm_generic_runtime_suspend, 198 pm_generic_runtime_suspend,
206 pm_generic_runtime_resume, 199 pm_generic_runtime_resume,
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 058242916cef..08c6b3dfe080 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -100,7 +100,13 @@ int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio)
100 100
101 ctx = host->slot.handler_priv; 101 ctx = host->slot.handler_priv;
102 102
103 return gpio_request_one(gpio, GPIOF_DIR_IN, ctx->ro_label); 103 ret = gpio_request_one(gpio, GPIOF_DIR_IN, ctx->ro_label);
104 if (ret < 0)
105 return ret;
106
107 ctx->ro_gpio = gpio;
108
109 return 0;
104} 110}
105EXPORT_SYMBOL(mmc_gpio_request_ro); 111EXPORT_SYMBOL(mmc_gpio_request_ro);
106 112
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index aa131b32e3b2..9bf10e7bbfaf 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -540,6 +540,15 @@ config MMC_DW_PLTFM
540 540
541 If unsure, say Y. 541 If unsure, say Y.
542 542
543config MMC_DW_EXYNOS
544 tristate "Exynos specific extentions for Synopsys DW Memory Card Interface"
545 depends on MMC_DW
546 select MMC_DW_PLTFM
547 help
548 This selects support for Samsung Exynos SoC specific extensions to the
549 Synopsys DesignWare Memory Card Interface driver. Select this option
550 for platforms based on Exynos4 and Exynos5 SoC's.
551
543config MMC_DW_PCI 552config MMC_DW_PCI
544 tristate "Synopsys Designware MCI support on PCI bus" 553 tristate "Synopsys Designware MCI support on PCI bus"
545 depends on MMC_DW && PCI 554 depends on MMC_DW && PCI
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 8922b06be925..17ad0a7ba40b 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
39obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o 39obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
40obj-$(CONFIG_MMC_DW) += dw_mmc.o 40obj-$(CONFIG_MMC_DW) += dw_mmc.o
41obj-$(CONFIG_MMC_DW_PLTFM) += dw_mmc-pltfm.o 41obj-$(CONFIG_MMC_DW_PLTFM) += dw_mmc-pltfm.o
42obj-$(CONFIG_MMC_DW_EXYNOS) += dw_mmc-exynos.o
42obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o 43obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o
43obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o 44obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
44obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o 45obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h
index ab56f7db5315..c97001e15227 100644
--- a/drivers/mmc/host/atmel-mci-regs.h
+++ b/drivers/mmc/host/atmel-mci-regs.h
@@ -140,6 +140,13 @@
140#define atmci_writel(port,reg,value) \ 140#define atmci_writel(port,reg,value) \
141 __raw_writel((value), (port)->regs + reg) 141 __raw_writel((value), (port)->regs + reg)
142 142
143/* On AVR chips the Peripheral DMA Controller is not connected to MCI. */
144#ifdef CONFIG_AVR32
145# define ATMCI_PDC_CONNECTED 0
146#else
147# define ATMCI_PDC_CONNECTED 1
148#endif
149
143/* 150/*
144 * Fix sconfig's burst size according to atmel MCI. We need to convert them as: 151 * Fix sconfig's burst size according to atmel MCI. We need to convert them as:
145 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. 152 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 852d5fbda630..ddf096e3803f 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -19,6 +19,9 @@
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/ioport.h> 20#include <linux/ioport.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/of_gpio.h>
22#include <linux/platform_device.h> 25#include <linux/platform_device.h>
23#include <linux/scatterlist.h> 26#include <linux/scatterlist.h>
24#include <linux/seq_file.h> 27#include <linux/seq_file.h>
@@ -71,7 +74,7 @@ enum atmci_pdc_buf {
71}; 74};
72 75
73struct atmel_mci_caps { 76struct atmel_mci_caps {
74 bool has_dma; 77 bool has_dma_conf_reg;
75 bool has_pdc; 78 bool has_pdc;
76 bool has_cfg_reg; 79 bool has_cfg_reg;
77 bool has_cstor_reg; 80 bool has_cstor_reg;
@@ -418,7 +421,7 @@ static int atmci_regs_show(struct seq_file *s, void *v)
418 atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]); 421 atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]);
419 atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]); 422 atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]);
420 423
421 if (host->caps.has_dma) { 424 if (host->caps.has_dma_conf_reg) {
422 u32 val; 425 u32 val;
423 426
424 val = buf[ATMCI_DMA / 4]; 427 val = buf[ATMCI_DMA / 4];
@@ -500,6 +503,70 @@ err:
500 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); 503 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
501} 504}
502 505
506#if defined(CONFIG_OF)
507static const struct of_device_id atmci_dt_ids[] = {
508 { .compatible = "atmel,hsmci" },
509 { /* sentinel */ }
510};
511
512MODULE_DEVICE_TABLE(of, atmci_dt_ids);
513
514static struct mci_platform_data __devinit*
515atmci_of_init(struct platform_device *pdev)
516{
517 struct device_node *np = pdev->dev.of_node;
518 struct device_node *cnp;
519 struct mci_platform_data *pdata;
520 u32 slot_id;
521
522 if (!np) {
523 dev_err(&pdev->dev, "device node not found\n");
524 return ERR_PTR(-EINVAL);
525 }
526
527 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
528 if (!pdata) {
529 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
530 return ERR_PTR(-ENOMEM);
531 }
532
533 for_each_child_of_node(np, cnp) {
534 if (of_property_read_u32(cnp, "reg", &slot_id)) {
535 dev_warn(&pdev->dev, "reg property is missing for %s\n",
536 cnp->full_name);
537 continue;
538 }
539
540 if (slot_id >= ATMCI_MAX_NR_SLOTS) {
541 dev_warn(&pdev->dev, "can't have more than %d slots\n",
542 ATMCI_MAX_NR_SLOTS);
543 break;
544 }
545
546 if (of_property_read_u32(cnp, "bus-width",
547 &pdata->slot[slot_id].bus_width))
548 pdata->slot[slot_id].bus_width = 1;
549
550 pdata->slot[slot_id].detect_pin =
551 of_get_named_gpio(cnp, "cd-gpios", 0);
552
553 pdata->slot[slot_id].detect_is_active_high =
554 of_property_read_bool(cnp, "cd-inverted");
555
556 pdata->slot[slot_id].wp_pin =
557 of_get_named_gpio(cnp, "wp-gpios", 0);
558 }
559
560 return pdata;
561}
562#else /* CONFIG_OF */
563static inline struct mci_platform_data*
564atmci_of_init(struct platform_device *dev)
565{
566 return ERR_PTR(-EINVAL);
567}
568#endif
569
503static inline unsigned int atmci_get_version(struct atmel_mci *host) 570static inline unsigned int atmci_get_version(struct atmel_mci *host)
504{ 571{
505 return atmci_readl(host, ATMCI_VERSION) & 0x00000fff; 572 return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
@@ -774,7 +841,7 @@ static void atmci_dma_complete(void *arg)
774 841
775 dev_vdbg(&host->pdev->dev, "DMA complete\n"); 842 dev_vdbg(&host->pdev->dev, "DMA complete\n");
776 843
777 if (host->caps.has_dma) 844 if (host->caps.has_dma_conf_reg)
778 /* Disable DMA hardware handshaking on MCI */ 845 /* Disable DMA hardware handshaking on MCI */
779 atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN); 846 atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);
780 847
@@ -961,7 +1028,9 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
961 maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst); 1028 maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst);
962 } 1029 }
963 1030
964 atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | ATMCI_DMAEN); 1031 if (host->caps.has_dma_conf_reg)
1032 atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) |
1033 ATMCI_DMAEN);
965 1034
966 sglen = dma_map_sg(chan->device->dev, data->sg, 1035 sglen = dma_map_sg(chan->device->dev, data->sg,
967 data->sg_len, direction); 1036 data->sg_len, direction);
@@ -2046,6 +2115,13 @@ static int __init atmci_init_slot(struct atmel_mci *host,
2046 slot->sdc_reg = sdc_reg; 2115 slot->sdc_reg = sdc_reg;
2047 slot->sdio_irq = sdio_irq; 2116 slot->sdio_irq = sdio_irq;
2048 2117
2118 dev_dbg(&mmc->class_dev,
2119 "slot[%u]: bus_width=%u, detect_pin=%d, "
2120 "detect_is_active_high=%s, wp_pin=%d\n",
2121 id, slot_data->bus_width, slot_data->detect_pin,
2122 slot_data->detect_is_active_high ? "true" : "false",
2123 slot_data->wp_pin);
2124
2049 mmc->ops = &atmci_ops; 2125 mmc->ops = &atmci_ops;
2050 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512); 2126 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
2051 mmc->f_max = host->bus_hz / 2; 2127 mmc->f_max = host->bus_hz / 2;
@@ -2169,7 +2245,10 @@ static bool atmci_configure_dma(struct atmel_mci *host)
2169 2245
2170 pdata = host->pdev->dev.platform_data; 2246 pdata = host->pdev->dev.platform_data;
2171 2247
2172 if (pdata && find_slave_dev(pdata->dma_slave)) { 2248 if (!pdata)
2249 return false;
2250
2251 if (pdata->dma_slave && find_slave_dev(pdata->dma_slave)) {
2173 dma_cap_mask_t mask; 2252 dma_cap_mask_t mask;
2174 2253
2175 /* Try to grab a DMA channel */ 2254 /* Try to grab a DMA channel */
@@ -2210,8 +2289,8 @@ static void __init atmci_get_cap(struct atmel_mci *host)
2210 dev_info(&host->pdev->dev, 2289 dev_info(&host->pdev->dev,
2211 "version: 0x%x\n", version); 2290 "version: 0x%x\n", version);
2212 2291
2213 host->caps.has_dma = 0; 2292 host->caps.has_dma_conf_reg = 0;
2214 host->caps.has_pdc = 1; 2293 host->caps.has_pdc = ATMCI_PDC_CONNECTED;
2215 host->caps.has_cfg_reg = 0; 2294 host->caps.has_cfg_reg = 0;
2216 host->caps.has_cstor_reg = 0; 2295 host->caps.has_cstor_reg = 0;
2217 host->caps.has_highspeed = 0; 2296 host->caps.has_highspeed = 0;
@@ -2228,12 +2307,7 @@ static void __init atmci_get_cap(struct atmel_mci *host)
2228 host->caps.has_odd_clk_div = 1; 2307 host->caps.has_odd_clk_div = 1;
2229 case 0x400: 2308 case 0x400:
2230 case 0x300: 2309 case 0x300:
2231#ifdef CONFIG_AT_HDMAC 2310 host->caps.has_dma_conf_reg = 1;
2232 host->caps.has_dma = 1;
2233#else
2234 dev_info(&host->pdev->dev,
2235 "has dma capability but dma engine is not selected, then use pio\n");
2236#endif
2237 host->caps.has_pdc = 0; 2311 host->caps.has_pdc = 0;
2238 host->caps.has_cfg_reg = 1; 2312 host->caps.has_cfg_reg = 1;
2239 host->caps.has_cstor_reg = 1; 2313 host->caps.has_cstor_reg = 1;
@@ -2268,8 +2342,14 @@ static int __init atmci_probe(struct platform_device *pdev)
2268 if (!regs) 2342 if (!regs)
2269 return -ENXIO; 2343 return -ENXIO;
2270 pdata = pdev->dev.platform_data; 2344 pdata = pdev->dev.platform_data;
2271 if (!pdata) 2345 if (!pdata) {
2272 return -ENXIO; 2346 pdata = atmci_of_init(pdev);
2347 if (IS_ERR(pdata)) {
2348 dev_err(&pdev->dev, "platform data not available\n");
2349 return PTR_ERR(pdata);
2350 }
2351 }
2352
2273 irq = platform_get_irq(pdev, 0); 2353 irq = platform_get_irq(pdev, 0);
2274 if (irq < 0) 2354 if (irq < 0)
2275 return irq; 2355 return irq;
@@ -2308,7 +2388,7 @@ static int __init atmci_probe(struct platform_device *pdev)
2308 2388
2309 /* Get MCI capabilities and set operations according to it */ 2389 /* Get MCI capabilities and set operations according to it */
2310 atmci_get_cap(host); 2390 atmci_get_cap(host);
2311 if (host->caps.has_dma && atmci_configure_dma(host)) { 2391 if (atmci_configure_dma(host)) {
2312 host->prepare_data = &atmci_prepare_data_dma; 2392 host->prepare_data = &atmci_prepare_data_dma;
2313 host->submit_data = &atmci_submit_data_dma; 2393 host->submit_data = &atmci_submit_data_dma;
2314 host->stop_transfer = &atmci_stop_transfer_dma; 2394 host->stop_transfer = &atmci_stop_transfer_dma;
@@ -2487,6 +2567,7 @@ static struct platform_driver atmci_driver = {
2487 .driver = { 2567 .driver = {
2488 .name = "atmel_mci", 2568 .name = "atmel_mci",
2489 .pm = ATMCI_PM_OPS, 2569 .pm = ATMCI_PM_OPS,
2570 .of_match_table = of_match_ptr(atmci_dt_ids),
2490 }, 2571 },
2491}; 2572};
2492 2573
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index a17dd7363ceb..b9b463eca1ec 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -24,9 +24,7 @@
24#include <asm/portmux.h> 24#include <asm/portmux.h>
25#include <asm/bfin_sdh.h> 25#include <asm/bfin_sdh.h>
26 26
27#if defined(CONFIG_BF51x) 27#if defined(CONFIG_BF51x) || defined(__ADSPBF60x__)
28#define bfin_read_SDH_PWR_CTL bfin_read_RSI_PWR_CTL
29#define bfin_write_SDH_PWR_CTL bfin_write_RSI_PWR_CTL
30#define bfin_read_SDH_CLK_CTL bfin_read_RSI_CLK_CTL 28#define bfin_read_SDH_CLK_CTL bfin_read_RSI_CLK_CTL
31#define bfin_write_SDH_CLK_CTL bfin_write_RSI_CLK_CTL 29#define bfin_write_SDH_CLK_CTL bfin_write_RSI_CLK_CTL
32#define bfin_write_SDH_ARGUMENT bfin_write_RSI_ARGUMENT 30#define bfin_write_SDH_ARGUMENT bfin_write_RSI_ARGUMENT
@@ -45,8 +43,16 @@
45#define bfin_write_SDH_E_STATUS bfin_write_RSI_E_STATUS 43#define bfin_write_SDH_E_STATUS bfin_write_RSI_E_STATUS
46#define bfin_read_SDH_STATUS bfin_read_RSI_STATUS 44#define bfin_read_SDH_STATUS bfin_read_RSI_STATUS
47#define bfin_write_SDH_MASK0 bfin_write_RSI_MASK0 45#define bfin_write_SDH_MASK0 bfin_write_RSI_MASK0
46#define bfin_write_SDH_E_MASK bfin_write_RSI_E_MASK
48#define bfin_read_SDH_CFG bfin_read_RSI_CFG 47#define bfin_read_SDH_CFG bfin_read_RSI_CFG
49#define bfin_write_SDH_CFG bfin_write_RSI_CFG 48#define bfin_write_SDH_CFG bfin_write_RSI_CFG
49# if defined(__ADSPBF60x__)
50# define bfin_read_SDH_BLK_SIZE bfin_read_RSI_BLKSZ
51# define bfin_write_SDH_BLK_SIZE bfin_write_RSI_BLKSZ
52# else
53# define bfin_read_SDH_PWR_CTL bfin_read_RSI_PWR_CTL
54# define bfin_write_SDH_PWR_CTL bfin_write_RSI_PWR_CTL
55# endif
50#endif 56#endif
51 57
52struct sdh_host { 58struct sdh_host {
@@ -62,6 +68,7 @@ struct sdh_host {
62 dma_addr_t sg_dma; 68 dma_addr_t sg_dma;
63 int dma_len; 69 int dma_len;
64 70
71 unsigned long sclk;
65 unsigned int imask; 72 unsigned int imask;
66 unsigned int power_mode; 73 unsigned int power_mode;
67 unsigned int clk_div; 74 unsigned int clk_div;
@@ -127,11 +134,15 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
127 /* Only supports power-of-2 block size */ 134 /* Only supports power-of-2 block size */
128 if (data->blksz & (data->blksz - 1)) 135 if (data->blksz & (data->blksz - 1))
129 return -EINVAL; 136 return -EINVAL;
137#ifndef RSI_BLKSZ
130 data_ctl |= ((ffs(data->blksz) - 1) << 4); 138 data_ctl |= ((ffs(data->blksz) - 1) << 4);
139#else
140 bfin_write_SDH_BLK_SIZE(data->blksz);
141#endif
131 142
132 bfin_write_SDH_DATA_CTL(data_ctl); 143 bfin_write_SDH_DATA_CTL(data_ctl);
133 /* the time of a host clock period in ns */ 144 /* the time of a host clock period in ns */
134 cycle_ns = 1000000000 / (get_sclk() / (2 * (host->clk_div + 1))); 145 cycle_ns = 1000000000 / (host->sclk / (2 * (host->clk_div + 1)));
135 timeout = data->timeout_ns / cycle_ns; 146 timeout = data->timeout_ns / cycle_ns;
136 timeout += data->timeout_clks; 147 timeout += data->timeout_clks;
137 bfin_write_SDH_DATA_TIMER(timeout); 148 bfin_write_SDH_DATA_TIMER(timeout);
@@ -145,8 +156,13 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
145 156
146 sdh_enable_stat_irq(host, (DAT_CRC_FAIL | DAT_TIME_OUT | DAT_END)); 157 sdh_enable_stat_irq(host, (DAT_CRC_FAIL | DAT_TIME_OUT | DAT_END));
147 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); 158 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir);
148#if defined(CONFIG_BF54x) 159#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
149 dma_cfg |= DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_32 | DMAEN; 160 dma_cfg |= DMAFLOW_ARRAY | RESTART | WDSIZE_32 | DMAEN;
161# ifdef RSI_BLKSZ
162 dma_cfg |= PSIZE_32 | NDSIZE_3;
163# else
164 dma_cfg |= NDSIZE_5;
165# endif
150 { 166 {
151 struct scatterlist *sg; 167 struct scatterlist *sg;
152 int i; 168 int i;
@@ -156,7 +172,7 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
156 host->sg_cpu[i].x_count = sg_dma_len(sg) / 4; 172 host->sg_cpu[i].x_count = sg_dma_len(sg) / 4;
157 host->sg_cpu[i].x_modify = 4; 173 host->sg_cpu[i].x_modify = 4;
158 dev_dbg(mmc_dev(host->mmc), "%d: start_addr:0x%lx, " 174 dev_dbg(mmc_dev(host->mmc), "%d: start_addr:0x%lx, "
159 "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n", 175 "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
160 i, host->sg_cpu[i].start_addr, 176 i, host->sg_cpu[i].start_addr,
161 host->sg_cpu[i].cfg, host->sg_cpu[i].x_count, 177 host->sg_cpu[i].cfg, host->sg_cpu[i].x_count,
162 host->sg_cpu[i].x_modify); 178 host->sg_cpu[i].x_modify);
@@ -172,6 +188,7 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
172 set_dma_curr_desc_addr(host->dma_ch, (unsigned long *)host->sg_dma); 188 set_dma_curr_desc_addr(host->dma_ch, (unsigned long *)host->sg_dma);
173 set_dma_x_count(host->dma_ch, 0); 189 set_dma_x_count(host->dma_ch, 0);
174 set_dma_x_modify(host->dma_ch, 0); 190 set_dma_x_modify(host->dma_ch, 0);
191 SSYNC();
175 set_dma_config(host->dma_ch, dma_cfg); 192 set_dma_config(host->dma_ch, dma_cfg);
176#elif defined(CONFIG_BF51x) 193#elif defined(CONFIG_BF51x)
177 /* RSI DMA doesn't work in array mode */ 194 /* RSI DMA doesn't work in array mode */
@@ -179,6 +196,7 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
179 set_dma_start_addr(host->dma_ch, sg_dma_address(&data->sg[0])); 196 set_dma_start_addr(host->dma_ch, sg_dma_address(&data->sg[0]));
180 set_dma_x_count(host->dma_ch, length / 4); 197 set_dma_x_count(host->dma_ch, length / 4);
181 set_dma_x_modify(host->dma_ch, 4); 198 set_dma_x_modify(host->dma_ch, 4);
199 SSYNC();
182 set_dma_config(host->dma_ch, dma_cfg); 200 set_dma_config(host->dma_ch, dma_cfg);
183#endif 201#endif
184 bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E); 202 bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E);
@@ -296,7 +314,6 @@ static int sdh_data_done(struct sdh_host *host, unsigned int stat)
296 else 314 else
297 data->bytes_xfered = 0; 315 data->bytes_xfered = 0;
298 316
299 sdh_disable_stat_irq(host, DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN);
300 bfin_write_SDH_STATUS_CLR(DAT_END_STAT | DAT_TIMEOUT_STAT | \ 317 bfin_write_SDH_STATUS_CLR(DAT_END_STAT | DAT_TIMEOUT_STAT | \
301 DAT_CRC_FAIL_STAT | DAT_BLK_END_STAT | RX_OVERRUN | TX_UNDERRUN); 318 DAT_CRC_FAIL_STAT | DAT_BLK_END_STAT | RX_OVERRUN | TX_UNDERRUN);
302 bfin_write_SDH_DATA_CTL(0); 319 bfin_write_SDH_DATA_CTL(0);
@@ -321,74 +338,115 @@ static void sdh_request(struct mmc_host *mmc, struct mmc_request *mrq)
321 dev_dbg(mmc_dev(host->mmc), "%s enter, mrp:%p, cmd:%p\n", __func__, mrq, mrq->cmd); 338 dev_dbg(mmc_dev(host->mmc), "%s enter, mrp:%p, cmd:%p\n", __func__, mrq, mrq->cmd);
322 WARN_ON(host->mrq != NULL); 339 WARN_ON(host->mrq != NULL);
323 340
341 spin_lock(&host->lock);
324 host->mrq = mrq; 342 host->mrq = mrq;
325 host->data = mrq->data; 343 host->data = mrq->data;
326 344
327 if (mrq->data && mrq->data->flags & MMC_DATA_READ) { 345 if (mrq->data && mrq->data->flags & MMC_DATA_READ) {
328 ret = sdh_setup_data(host, mrq->data); 346 ret = sdh_setup_data(host, mrq->data);
329 if (ret) 347 if (ret)
330 return; 348 goto data_err;
331 } 349 }
332 350
333 sdh_start_cmd(host, mrq->cmd); 351 sdh_start_cmd(host, mrq->cmd);
352data_err:
353 spin_unlock(&host->lock);
334} 354}
335 355
336static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 356static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
337{ 357{
338 struct sdh_host *host; 358 struct sdh_host *host;
339 unsigned long flags;
340 u16 clk_ctl = 0; 359 u16 clk_ctl = 0;
360#ifndef RSI_BLKSZ
341 u16 pwr_ctl = 0; 361 u16 pwr_ctl = 0;
362#endif
342 u16 cfg; 363 u16 cfg;
343 host = mmc_priv(mmc); 364 host = mmc_priv(mmc);
344 365
345 spin_lock_irqsave(&host->lock, flags); 366 spin_lock(&host->lock);
346 if (ios->clock) {
347 unsigned long sys_clk, ios_clk;
348 unsigned char clk_div;
349 ios_clk = 2 * ios->clock;
350 sys_clk = get_sclk();
351 clk_div = sys_clk / ios_clk;
352 if (sys_clk % ios_clk == 0)
353 clk_div -= 1;
354 clk_div = min_t(unsigned char, clk_div, 0xFF);
355 clk_ctl |= clk_div;
356 clk_ctl |= CLK_E;
357 host->clk_div = clk_div;
358 } else
359 sdh_stop_clock(host);
360
361 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
362#ifdef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
363 pwr_ctl |= ROD_CTL;
364#else
365 pwr_ctl |= SD_CMD_OD | ROD_CTL;
366#endif
367 367
368 if (ios->bus_width == MMC_BUS_WIDTH_4) { 368 cfg = bfin_read_SDH_CFG();
369 cfg = bfin_read_SDH_CFG(); 369 cfg |= MWE;
370 switch (ios->bus_width) {
371 case MMC_BUS_WIDTH_4:
372#ifndef RSI_BLKSZ
370 cfg &= ~PD_SDDAT3; 373 cfg &= ~PD_SDDAT3;
374#endif
371 cfg |= PUP_SDDAT3; 375 cfg |= PUP_SDDAT3;
372 /* Enable 4 bit SDIO */ 376 /* Enable 4 bit SDIO */
373 cfg |= (SD4E | MWE); 377 cfg |= SD4E;
374 bfin_write_SDH_CFG(cfg); 378 clk_ctl |= WIDE_BUS_4;
375 clk_ctl |= WIDE_BUS; 379 break;
376 } else { 380 case MMC_BUS_WIDTH_8:
377 cfg = bfin_read_SDH_CFG(); 381#ifndef RSI_BLKSZ
378 cfg |= MWE; 382 cfg &= ~PD_SDDAT3;
379 bfin_write_SDH_CFG(cfg); 383#endif
384 cfg |= PUP_SDDAT3;
385 /* Disable 4 bit SDIO */
386 cfg &= ~SD4E;
387 clk_ctl |= BYTE_BUS_8;
388 break;
389 default:
390 cfg &= ~PUP_SDDAT3;
391 /* Disable 4 bit SDIO */
392 cfg &= ~SD4E;
380 } 393 }
381 394
382 bfin_write_SDH_CLK_CTL(clk_ctl);
383
384 host->power_mode = ios->power_mode; 395 host->power_mode = ios->power_mode;
385 if (ios->power_mode == MMC_POWER_ON) 396#ifndef RSI_BLKSZ
397 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
398 pwr_ctl |= ROD_CTL;
399# ifndef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
400 pwr_ctl |= SD_CMD_OD;
401# endif
402 }
403
404 if (ios->power_mode != MMC_POWER_OFF)
386 pwr_ctl |= PWR_ON; 405 pwr_ctl |= PWR_ON;
406 else
407 pwr_ctl &= ~PWR_ON;
387 408
388 bfin_write_SDH_PWR_CTL(pwr_ctl); 409 bfin_write_SDH_PWR_CTL(pwr_ctl);
410#else
411# ifndef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
412 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
413 cfg |= SD_CMD_OD;
414 else
415 cfg &= ~SD_CMD_OD;
416# endif
417
418
419 if (ios->power_mode != MMC_POWER_OFF)
420 cfg |= PWR_ON;
421 else
422 cfg &= ~PWR_ON;
423
424 bfin_write_SDH_CFG(cfg);
425#endif
389 SSYNC(); 426 SSYNC();
390 427
391 spin_unlock_irqrestore(&host->lock, flags); 428 if (ios->power_mode == MMC_POWER_ON && ios->clock) {
429 unsigned char clk_div;
430 clk_div = (get_sclk() / ios->clock - 1) / 2;
431 clk_div = min_t(unsigned char, clk_div, 0xFF);
432 clk_ctl |= clk_div;
433 clk_ctl |= CLK_E;
434 host->clk_div = clk_div;
435 bfin_write_SDH_CLK_CTL(clk_ctl);
436
437 } else
438 sdh_stop_clock(host);
439
440 /* set up sdh interrupt mask*/
441 if (ios->power_mode == MMC_POWER_ON)
442 bfin_write_SDH_MASK0(DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL |
443 RX_OVERRUN | TX_UNDERRUN | CMD_SENT | CMD_RESP_END |
444 CMD_TIME_OUT | CMD_CRC_FAIL);
445 else
446 bfin_write_SDH_MASK0(0);
447 SSYNC();
448
449 spin_unlock(&host->lock);
392 450
393 dev_dbg(mmc_dev(host->mmc), "SDH: clk_div = 0x%x actual clock:%ld expected clock:%d\n", 451 dev_dbg(mmc_dev(host->mmc), "SDH: clk_div = 0x%x actual clock:%ld expected clock:%d\n",
394 host->clk_div, 452 host->clk_div,
@@ -405,7 +463,7 @@ static irqreturn_t sdh_dma_irq(int irq, void *devid)
405{ 463{
406 struct sdh_host *host = devid; 464 struct sdh_host *host = devid;
407 465
408 dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04x\n", __func__, 466 dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04lx\n", __func__,
409 get_dma_curr_irqstat(host->dma_ch)); 467 get_dma_curr_irqstat(host->dma_ch));
410 clear_dma_irqstat(host->dma_ch); 468 clear_dma_irqstat(host->dma_ch);
411 SSYNC(); 469 SSYNC();
@@ -420,6 +478,9 @@ static irqreturn_t sdh_stat_irq(int irq, void *devid)
420 int handled = 0; 478 int handled = 0;
421 479
422 dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__); 480 dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__);
481
482 spin_lock(&host->lock);
483
423 status = bfin_read_SDH_E_STATUS(); 484 status = bfin_read_SDH_E_STATUS();
424 if (status & SD_CARD_DET) { 485 if (status & SD_CARD_DET) {
425 mmc_detect_change(host->mmc, 0); 486 mmc_detect_change(host->mmc, 0);
@@ -437,11 +498,30 @@ static irqreturn_t sdh_stat_irq(int irq, void *devid)
437 if (status & (DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN)) 498 if (status & (DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN))
438 handled |= sdh_data_done(host, status); 499 handled |= sdh_data_done(host, status);
439 500
501 spin_unlock(&host->lock);
502
440 dev_dbg(mmc_dev(host->mmc), "%s exit\n\n", __func__); 503 dev_dbg(mmc_dev(host->mmc), "%s exit\n\n", __func__);
441 504
442 return IRQ_RETVAL(handled); 505 return IRQ_RETVAL(handled);
443} 506}
444 507
508static void sdh_reset(void)
509{
510#if defined(CONFIG_BF54x)
511 /* Secure Digital Host shares DMA with Nand controller */
512 bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1);
513#endif
514
515 bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN);
516 SSYNC();
517
518 /* Disable card inserting detection pin. set MMC_CAP_NEEDS_POLL, and
519 * mmc stack will do the detection.
520 */
521 bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3));
522 SSYNC();
523}
524
445static int __devinit sdh_probe(struct platform_device *pdev) 525static int __devinit sdh_probe(struct platform_device *pdev)
446{ 526{
447 struct mmc_host *mmc; 527 struct mmc_host *mmc;
@@ -462,8 +542,16 @@ static int __devinit sdh_probe(struct platform_device *pdev)
462 } 542 }
463 543
464 mmc->ops = &sdh_ops; 544 mmc->ops = &sdh_ops;
465 mmc->max_segs = 32; 545#if defined(CONFIG_BF51x)
546 mmc->max_segs = 1;
547#else
548 mmc->max_segs = PAGE_SIZE / sizeof(struct dma_desc_array);
549#endif
550#ifdef RSI_BLKSZ
551 mmc->max_seg_size = -1;
552#else
466 mmc->max_seg_size = 1 << 16; 553 mmc->max_seg_size = 1 << 16;
554#endif
467 mmc->max_blk_size = 1 << 11; 555 mmc->max_blk_size = 1 << 11;
468 mmc->max_blk_count = 1 << 11; 556 mmc->max_blk_count = 1 << 11;
469 mmc->max_req_size = PAGE_SIZE; 557 mmc->max_req_size = PAGE_SIZE;
@@ -473,6 +561,7 @@ static int __devinit sdh_probe(struct platform_device *pdev)
473 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NEEDS_POLL; 561 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NEEDS_POLL;
474 host = mmc_priv(mmc); 562 host = mmc_priv(mmc);
475 host->mmc = mmc; 563 host->mmc = mmc;
564 host->sclk = get_sclk();
476 565
477 spin_lock_init(&host->lock); 566 spin_lock_init(&host->lock);
478 host->irq = drv_data->irq_int0; 567 host->irq = drv_data->irq_int0;
@@ -497,7 +586,6 @@ static int __devinit sdh_probe(struct platform_device *pdev)
497 } 586 }
498 587
499 platform_set_drvdata(pdev, mmc); 588 platform_set_drvdata(pdev, mmc);
500 mmc_add_host(mmc);
501 589
502 ret = request_irq(host->irq, sdh_stat_irq, 0, "SDH Status IRQ", host); 590 ret = request_irq(host->irq, sdh_stat_irq, 0, "SDH Status IRQ", host);
503 if (ret) { 591 if (ret) {
@@ -510,20 +598,10 @@ static int __devinit sdh_probe(struct platform_device *pdev)
510 dev_err(&pdev->dev, "unable to request peripheral pins\n"); 598 dev_err(&pdev->dev, "unable to request peripheral pins\n");
511 goto out4; 599 goto out4;
512 } 600 }
513#if defined(CONFIG_BF54x)
514 /* Secure Digital Host shares DMA with Nand controller */
515 bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1);
516#endif
517
518 bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN);
519 SSYNC();
520 601
521 /* Disable card inserting detection pin. set MMC_CAP_NEES_POLL, and 602 sdh_reset();
522 * mmc stack will do the detection.
523 */
524 bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3));
525 SSYNC();
526 603
604 mmc_add_host(mmc);
527 return 0; 605 return 0;
528 606
529out4: 607out4:
@@ -571,7 +649,6 @@ static int sdh_suspend(struct platform_device *dev, pm_message_t state)
571 if (mmc) 649 if (mmc)
572 ret = mmc_suspend_host(mmc); 650 ret = mmc_suspend_host(mmc);
573 651
574 bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON);
575 peripheral_free_list(drv_data->pin_req); 652 peripheral_free_list(drv_data->pin_req);
576 653
577 return ret; 654 return ret;
@@ -589,16 +666,7 @@ static int sdh_resume(struct platform_device *dev)
589 return ret; 666 return ret;
590 } 667 }
591 668
592 bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() | PWR_ON); 669 sdh_reset();
593#if defined(CONFIG_BF54x)
594 /* Secure Digital Host shares DMA with Nand controller */
595 bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1);
596#endif
597 bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN);
598 SSYNC();
599
600 bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3));
601 SSYNC();
602 670
603 if (mmc) 671 if (mmc)
604 ret = mmc_resume_host(mmc); 672 ret = mmc_resume_host(mmc);
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 3dfd3473269d..20636772c09b 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -30,11 +30,12 @@
30#include <linux/io.h> 30#include <linux/io.h>
31#include <linux/irq.h> 31#include <linux/irq.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/dmaengine.h>
33#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35#include <linux/edma.h>
34#include <linux/mmc/mmc.h> 36#include <linux/mmc/mmc.h>
35 37
36#include <linux/platform_data/mmc-davinci.h> 38#include <linux/platform_data/mmc-davinci.h>
37#include <mach/edma.h>
38 39
39/* 40/*
40 * Register Definitions 41 * Register Definitions
@@ -200,21 +201,13 @@ struct mmc_davinci_host {
200 u32 bytes_left; 201 u32 bytes_left;
201 202
202 u32 rxdma, txdma; 203 u32 rxdma, txdma;
204 struct dma_chan *dma_tx;
205 struct dma_chan *dma_rx;
203 bool use_dma; 206 bool use_dma;
204 bool do_dma; 207 bool do_dma;
205 bool sdio_int; 208 bool sdio_int;
206 bool active_request; 209 bool active_request;
207 210
208 /* Scatterlist DMA uses one or more parameter RAM entries:
209 * the main one (associated with rxdma or txdma) plus zero or
210 * more links. The entries for a given transfer differ only
211 * by memory buffer (address, length) and link field.
212 */
213 struct edmacc_param tx_template;
214 struct edmacc_param rx_template;
215 unsigned n_link;
216 u32 links[MAX_NR_SG - 1];
217
218 /* For PIO we walk scatterlists one segment at a time. */ 211 /* For PIO we walk scatterlists one segment at a time. */
219 unsigned int sg_len; 212 unsigned int sg_len;
220 struct scatterlist *sg; 213 struct scatterlist *sg;
@@ -410,153 +403,74 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host,
410 403
411static void davinci_abort_dma(struct mmc_davinci_host *host) 404static void davinci_abort_dma(struct mmc_davinci_host *host)
412{ 405{
413 int sync_dev; 406 struct dma_chan *sync_dev;
414 407
415 if (host->data_dir == DAVINCI_MMC_DATADIR_READ) 408 if (host->data_dir == DAVINCI_MMC_DATADIR_READ)
416 sync_dev = host->rxdma; 409 sync_dev = host->dma_rx;
417 else 410 else
418 sync_dev = host->txdma; 411 sync_dev = host->dma_tx;
419
420 edma_stop(sync_dev);
421 edma_clean_channel(sync_dev);
422}
423
424static void
425mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data);
426
427static void mmc_davinci_dma_cb(unsigned channel, u16 ch_status, void *data)
428{
429 if (DMA_COMPLETE != ch_status) {
430 struct mmc_davinci_host *host = data;
431
432 /* Currently means: DMA Event Missed, or "null" transfer
433 * request was seen. In the future, TC errors (like bad
434 * addresses) might be presented too.
435 */
436 dev_warn(mmc_dev(host->mmc), "DMA %s error\n",
437 (host->data->flags & MMC_DATA_WRITE)
438 ? "write" : "read");
439 host->data->error = -EIO;
440 mmc_davinci_xfer_done(host, host->data);
441 }
442}
443
444/* Set up tx or rx template, to be modified and updated later */
445static void __init mmc_davinci_dma_setup(struct mmc_davinci_host *host,
446 bool tx, struct edmacc_param *template)
447{
448 unsigned sync_dev;
449 const u16 acnt = 4;
450 const u16 bcnt = rw_threshold >> 2;
451 const u16 ccnt = 0;
452 u32 src_port = 0;
453 u32 dst_port = 0;
454 s16 src_bidx, dst_bidx;
455 s16 src_cidx, dst_cidx;
456
457 /*
458 * A-B Sync transfer: each DMA request is for one "frame" of
459 * rw_threshold bytes, broken into "acnt"-size chunks repeated
460 * "bcnt" times. Each segment needs "ccnt" such frames; since
461 * we tell the block layer our mmc->max_seg_size limit, we can
462 * trust (later) that it's within bounds.
463 *
464 * The FIFOs are read/written in 4-byte chunks (acnt == 4) and
465 * EDMA will optimize memory operations to use larger bursts.
466 */
467 if (tx) {
468 sync_dev = host->txdma;
469
470 /* src_prt, ccnt, and link to be set up later */
471 src_bidx = acnt;
472 src_cidx = acnt * bcnt;
473
474 dst_port = host->mem_res->start + DAVINCI_MMCDXR;
475 dst_bidx = 0;
476 dst_cidx = 0;
477 } else {
478 sync_dev = host->rxdma;
479
480 src_port = host->mem_res->start + DAVINCI_MMCDRR;
481 src_bidx = 0;
482 src_cidx = 0;
483
484 /* dst_prt, ccnt, and link to be set up later */
485 dst_bidx = acnt;
486 dst_cidx = acnt * bcnt;
487 }
488
489 /*
490 * We can't use FIFO mode for the FIFOs because MMC FIFO addresses
491 * are not 256-bit (32-byte) aligned. So we use INCR, and the W8BIT
492 * parameter is ignored.
493 */
494 edma_set_src(sync_dev, src_port, INCR, W8BIT);
495 edma_set_dest(sync_dev, dst_port, INCR, W8BIT);
496 412
497 edma_set_src_index(sync_dev, src_bidx, src_cidx); 413 dmaengine_terminate_all(sync_dev);
498 edma_set_dest_index(sync_dev, dst_bidx, dst_cidx);
499
500 edma_set_transfer_params(sync_dev, acnt, bcnt, ccnt, 8, ABSYNC);
501
502 edma_read_slot(sync_dev, template);
503
504 /* don't bother with irqs or chaining */
505 template->opt |= EDMA_CHAN_SLOT(sync_dev) << 12;
506} 414}
507 415
508static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host, 416static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
509 struct mmc_data *data) 417 struct mmc_data *data)
510{ 418{
511 struct edmacc_param *template; 419 struct dma_chan *chan;
512 int channel, slot; 420 struct dma_async_tx_descriptor *desc;
513 unsigned link; 421 int ret = 0;
514 struct scatterlist *sg;
515 unsigned sg_len;
516 unsigned bytes_left = host->bytes_left;
517 const unsigned shift = ffs(rw_threshold) - 1;
518 422
519 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 423 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
520 template = &host->tx_template; 424 struct dma_slave_config dma_tx_conf = {
521 channel = host->txdma; 425 .direction = DMA_MEM_TO_DEV,
426 .dst_addr = host->mem_res->start + DAVINCI_MMCDXR,
427 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
428 .dst_maxburst =
429 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
430 };
431 chan = host->dma_tx;
432 dmaengine_slave_config(host->dma_tx, &dma_tx_conf);
433
434 desc = dmaengine_prep_slave_sg(host->dma_tx,
435 data->sg,
436 host->sg_len,
437 DMA_MEM_TO_DEV,
438 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
439 if (!desc) {
440 dev_dbg(mmc_dev(host->mmc),
441 "failed to allocate DMA TX descriptor");
442 ret = -1;
443 goto out;
444 }
522 } else { 445 } else {
523 template = &host->rx_template; 446 struct dma_slave_config dma_rx_conf = {
524 channel = host->rxdma; 447 .direction = DMA_DEV_TO_MEM,
525 } 448 .src_addr = host->mem_res->start + DAVINCI_MMCDRR,
526 449 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
527 /* We know sg_len and ccnt will never be out of range because 450 .src_maxburst =
528 * we told the mmc layer which in turn tells the block layer 451 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
529 * to ensure that it only hands us one scatterlist segment 452 };
530 * per EDMA PARAM entry. Update the PARAM 453 chan = host->dma_rx;
531 * entries needed for each segment of this scatterlist. 454 dmaengine_slave_config(host->dma_rx, &dma_rx_conf);
532 */ 455
533 for (slot = channel, link = 0, sg = data->sg, sg_len = host->sg_len; 456 desc = dmaengine_prep_slave_sg(host->dma_rx,
534 sg_len-- != 0 && bytes_left; 457 data->sg,
535 sg = sg_next(sg), slot = host->links[link++]) { 458 host->sg_len,
536 u32 buf = sg_dma_address(sg); 459 DMA_DEV_TO_MEM,
537 unsigned count = sg_dma_len(sg); 460 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
538 461 if (!desc) {
539 template->link_bcntrld = sg_len 462 dev_dbg(mmc_dev(host->mmc),
540 ? (EDMA_CHAN_SLOT(host->links[link]) << 5) 463 "failed to allocate DMA RX descriptor");
541 : 0xffff; 464 ret = -1;
542 465 goto out;
543 if (count > bytes_left) 466 }
544 count = bytes_left;
545 bytes_left -= count;
546
547 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
548 template->src = buf;
549 else
550 template->dst = buf;
551 template->ccnt = count >> shift;
552
553 edma_write_slot(slot, template);
554 } 467 }
555 468
556 if (host->version == MMC_CTLR_VERSION_2) 469 dmaengine_submit(desc);
557 edma_clear_event(channel); 470 dma_async_issue_pending(chan);
558 471
559 edma_start(channel); 472out:
473 return ret;
560} 474}
561 475
562static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, 476static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
@@ -564,6 +478,7 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
564{ 478{
565 int i; 479 int i;
566 int mask = rw_threshold - 1; 480 int mask = rw_threshold - 1;
481 int ret = 0;
567 482
568 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 483 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
569 ((data->flags & MMC_DATA_WRITE) 484 ((data->flags & MMC_DATA_WRITE)
@@ -583,70 +498,48 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
583 } 498 }
584 499
585 host->do_dma = 1; 500 host->do_dma = 1;
586 mmc_davinci_send_dma_request(host, data); 501 ret = mmc_davinci_send_dma_request(host, data);
587 502
588 return 0; 503 return ret;
589} 504}
590 505
591static void __init_or_module 506static void __init_or_module
592davinci_release_dma_channels(struct mmc_davinci_host *host) 507davinci_release_dma_channels(struct mmc_davinci_host *host)
593{ 508{
594 unsigned i;
595
596 if (!host->use_dma) 509 if (!host->use_dma)
597 return; 510 return;
598 511
599 for (i = 0; i < host->n_link; i++) 512 dma_release_channel(host->dma_tx);
600 edma_free_slot(host->links[i]); 513 dma_release_channel(host->dma_rx);
601
602 edma_free_channel(host->txdma);
603 edma_free_channel(host->rxdma);
604} 514}
605 515
606static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) 516static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
607{ 517{
608 u32 link_size; 518 int r;
609 int r, i; 519 dma_cap_mask_t mask;
610 520
611 /* Acquire master DMA write channel */ 521 dma_cap_zero(mask);
612 r = edma_alloc_channel(host->txdma, mmc_davinci_dma_cb, host, 522 dma_cap_set(DMA_SLAVE, mask);
613 EVENTQ_DEFAULT); 523
614 if (r < 0) { 524 host->dma_tx =
615 dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n", 525 dma_request_channel(mask, edma_filter_fn, &host->txdma);
616 "tx", r); 526 if (!host->dma_tx) {
617 return r; 527 dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n");
618 } 528 return -ENODEV;
619 mmc_davinci_dma_setup(host, true, &host->tx_template);
620
621 /* Acquire master DMA read channel */
622 r = edma_alloc_channel(host->rxdma, mmc_davinci_dma_cb, host,
623 EVENTQ_DEFAULT);
624 if (r < 0) {
625 dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n",
626 "rx", r);
627 goto free_master_write;
628 } 529 }
629 mmc_davinci_dma_setup(host, false, &host->rx_template);
630 530
631 /* Allocate parameter RAM slots, which will later be bound to a 531 host->dma_rx =
632 * channel as needed to handle a scatterlist. 532 dma_request_channel(mask, edma_filter_fn, &host->rxdma);
633 */ 533 if (!host->dma_rx) {
634 link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links)); 534 dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n");
635 for (i = 0; i < link_size; i++) { 535 r = -ENODEV;
636 r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY); 536 goto free_master_write;
637 if (r < 0) {
638 dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n",
639 r);
640 break;
641 }
642 host->links[i] = r;
643 } 537 }
644 host->n_link = i;
645 538
646 return 0; 539 return 0;
647 540
648free_master_write: 541free_master_write:
649 edma_free_channel(host->txdma); 542 dma_release_channel(host->dma_tx);
650 543
651 return r; 544 return r;
652} 545}
@@ -1359,7 +1252,7 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1359 * Each hw_seg uses one EDMA parameter RAM slot, always one 1252 * Each hw_seg uses one EDMA parameter RAM slot, always one
1360 * channel and then usually some linked slots. 1253 * channel and then usually some linked slots.
1361 */ 1254 */
1362 mmc->max_segs = 1 + host->n_link; 1255 mmc->max_segs = MAX_NR_SG;
1363 1256
1364 /* EDMA limit per hw segment (one or two MBytes) */ 1257 /* EDMA limit per hw segment (one or two MBytes) */
1365 mmc->max_seg_size = MAX_CCNT * rw_threshold; 1258 mmc->max_seg_size = MAX_CCNT * rw_threshold;
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
new file mode 100644
index 000000000000..660bbc528862
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -0,0 +1,253 @@
1/*
2 * Exynos Specific Extensions for Synopsys DW Multimedia Card Interface driver
3 *
4 * Copyright (C) 2012, Samsung Electronics Co., Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/clk.h>
15#include <linux/mmc/host.h>
16#include <linux/mmc/dw_mmc.h>
17#include <linux/of.h>
18#include <linux/of_gpio.h>
19
20#include "dw_mmc.h"
21#include "dw_mmc-pltfm.h"
22
23#define NUM_PINS(x) (x + 2)
24
25#define SDMMC_CLKSEL 0x09C
26#define SDMMC_CLKSEL_CCLK_SAMPLE(x) (((x) & 7) << 0)
27#define SDMMC_CLKSEL_CCLK_DRIVE(x) (((x) & 7) << 16)
28#define SDMMC_CLKSEL_CCLK_DIVIDER(x) (((x) & 7) << 24)
29#define SDMMC_CLKSEL_GET_DRV_WD3(x) (((x) >> 16) & 0x7)
30#define SDMMC_CLKSEL_TIMING(x, y, z) (SDMMC_CLKSEL_CCLK_SAMPLE(x) | \
31 SDMMC_CLKSEL_CCLK_DRIVE(y) | \
32 SDMMC_CLKSEL_CCLK_DIVIDER(z))
33
34#define SDMMC_CMD_USE_HOLD_REG BIT(29)
35
36#define EXYNOS4210_FIXED_CIU_CLK_DIV 2
37#define EXYNOS4412_FIXED_CIU_CLK_DIV 4
38
39/* Variations in Exynos specific dw-mshc controller */
40enum dw_mci_exynos_type {
41 DW_MCI_TYPE_EXYNOS4210,
42 DW_MCI_TYPE_EXYNOS4412,
43 DW_MCI_TYPE_EXYNOS5250,
44};
45
46/* Exynos implementation specific driver private data */
47struct dw_mci_exynos_priv_data {
48 enum dw_mci_exynos_type ctrl_type;
49 u8 ciu_div;
50 u32 sdr_timing;
51 u32 ddr_timing;
52};
53
54static struct dw_mci_exynos_compatible {
55 char *compatible;
56 enum dw_mci_exynos_type ctrl_type;
57} exynos_compat[] = {
58 {
59 .compatible = "samsung,exynos4210-dw-mshc",
60 .ctrl_type = DW_MCI_TYPE_EXYNOS4210,
61 }, {
62 .compatible = "samsung,exynos4412-dw-mshc",
63 .ctrl_type = DW_MCI_TYPE_EXYNOS4412,
64 }, {
65 .compatible = "samsung,exynos5250-dw-mshc",
66 .ctrl_type = DW_MCI_TYPE_EXYNOS5250,
67 },
68};
69
70static int dw_mci_exynos_priv_init(struct dw_mci *host)
71{
72 struct dw_mci_exynos_priv_data *priv;
73 int idx;
74
75 priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
76 if (!priv) {
77 dev_err(host->dev, "mem alloc failed for private data\n");
78 return -ENOMEM;
79 }
80
81 for (idx = 0; idx < ARRAY_SIZE(exynos_compat); idx++) {
82 if (of_device_is_compatible(host->dev->of_node,
83 exynos_compat[idx].compatible))
84 priv->ctrl_type = exynos_compat[idx].ctrl_type;
85 }
86
87 host->priv = priv;
88 return 0;
89}
90
91static int dw_mci_exynos_setup_clock(struct dw_mci *host)
92{
93 struct dw_mci_exynos_priv_data *priv = host->priv;
94
95 if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5250)
96 host->bus_hz /= (priv->ciu_div + 1);
97 else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412)
98 host->bus_hz /= EXYNOS4412_FIXED_CIU_CLK_DIV;
99 else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210)
100 host->bus_hz /= EXYNOS4210_FIXED_CIU_CLK_DIV;
101
102 return 0;
103}
104
105static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr)
106{
107 /*
108 * Exynos4412 and Exynos5250 extends the use of CMD register with the
109 * use of bit 29 (which is reserved on standard MSHC controllers) for
110 * optionally bypassing the HOLD register for command and data. The
111 * HOLD register should be bypassed in case there is no phase shift
112 * applied on CMD/DATA that is sent to the card.
113 */
114 if (SDMMC_CLKSEL_GET_DRV_WD3(mci_readl(host, CLKSEL)))
115 *cmdr |= SDMMC_CMD_USE_HOLD_REG;
116}
117
118static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios)
119{
120 struct dw_mci_exynos_priv_data *priv = host->priv;
121
122 if (ios->timing == MMC_TIMING_UHS_DDR50)
123 mci_writel(host, CLKSEL, priv->ddr_timing);
124 else
125 mci_writel(host, CLKSEL, priv->sdr_timing);
126}
127
128static int dw_mci_exynos_parse_dt(struct dw_mci *host)
129{
130 struct dw_mci_exynos_priv_data *priv = host->priv;
131 struct device_node *np = host->dev->of_node;
132 u32 timing[2];
133 u32 div = 0;
134 int ret;
135
136 of_property_read_u32(np, "samsung,dw-mshc-ciu-div", &div);
137 priv->ciu_div = div;
138
139 ret = of_property_read_u32_array(np,
140 "samsung,dw-mshc-sdr-timing", timing, 2);
141 if (ret)
142 return ret;
143
144 priv->sdr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div);
145
146 ret = of_property_read_u32_array(np,
147 "samsung,dw-mshc-ddr-timing", timing, 2);
148 if (ret)
149 return ret;
150
151 priv->ddr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div);
152 return 0;
153}
154
155static int dw_mci_exynos_setup_bus(struct dw_mci *host,
156 struct device_node *slot_np, u8 bus_width)
157{
158 int idx, gpio, ret;
159
160 if (!slot_np)
161 return -EINVAL;
162
163 /* cmd + clock + bus-width pins */
164 for (idx = 0; idx < NUM_PINS(bus_width); idx++) {
165 gpio = of_get_gpio(slot_np, idx);
166 if (!gpio_is_valid(gpio)) {
167 dev_err(host->dev, "invalid gpio: %d\n", gpio);
168 return -EINVAL;
169 }
170
171 ret = devm_gpio_request(host->dev, gpio, "dw-mci-bus");
172 if (ret) {
173 dev_err(host->dev, "gpio [%d] request failed\n", gpio);
174 return -EBUSY;
175 }
176 }
177
178 gpio = of_get_named_gpio(slot_np, "wp-gpios", 0);
179 if (gpio_is_valid(gpio)) {
180 if (devm_gpio_request(host->dev, gpio, "dw-mci-wp"))
181 dev_info(host->dev, "gpio [%d] request failed\n",
182 gpio);
183 } else {
184 dev_info(host->dev, "wp gpio not available");
185 host->pdata->quirks |= DW_MCI_QUIRK_NO_WRITE_PROTECT;
186 }
187
188 if (host->pdata->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
189 return 0;
190
191 gpio = of_get_named_gpio(slot_np, "samsung,cd-pinmux-gpio", 0);
192 if (gpio_is_valid(gpio)) {
193 if (devm_gpio_request(host->dev, gpio, "dw-mci-cd"))
194 dev_err(host->dev, "gpio [%d] request failed\n", gpio);
195 } else {
196 dev_info(host->dev, "cd gpio not available");
197 }
198
199 return 0;
200}
201
202/* Exynos5250 controller specific capabilities */
203static unsigned long exynos5250_dwmmc_caps[4] = {
204 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR |
205 MMC_CAP_8_BIT_DATA | MMC_CAP_CMD23,
206 MMC_CAP_CMD23,
207 MMC_CAP_CMD23,
208 MMC_CAP_CMD23,
209};
210
211static struct dw_mci_drv_data exynos5250_drv_data = {
212 .caps = exynos5250_dwmmc_caps,
213 .init = dw_mci_exynos_priv_init,
214 .setup_clock = dw_mci_exynos_setup_clock,
215 .prepare_command = dw_mci_exynos_prepare_command,
216 .set_ios = dw_mci_exynos_set_ios,
217 .parse_dt = dw_mci_exynos_parse_dt,
218 .setup_bus = dw_mci_exynos_setup_bus,
219};
220
221static const struct of_device_id dw_mci_exynos_match[] = {
222 { .compatible = "samsung,exynos5250-dw-mshc",
223 .data = (void *)&exynos5250_drv_data, },
224 {},
225};
226MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match);
227
228int dw_mci_exynos_probe(struct platform_device *pdev)
229{
230 struct dw_mci_drv_data *drv_data;
231 const struct of_device_id *match;
232
233 match = of_match_node(dw_mci_exynos_match, pdev->dev.of_node);
234 drv_data = match->data;
235 return dw_mci_pltfm_register(pdev, drv_data);
236}
237
238static struct platform_driver dw_mci_exynos_pltfm_driver = {
239 .probe = dw_mci_exynos_probe,
240 .remove = __exit_p(dw_mci_pltfm_remove),
241 .driver = {
242 .name = "dwmmc_exynos",
243 .of_match_table = of_match_ptr(dw_mci_exynos_match),
244 .pm = &dw_mci_pltfm_pmops,
245 },
246};
247
248module_platform_driver(dw_mci_exynos_pltfm_driver);
249
250MODULE_DESCRIPTION("Samsung Specific DW-MSHC Driver Extension");
251MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com");
252MODULE_LICENSE("GPL v2");
253MODULE_ALIAS("platform:dwmmc-exynos");
diff --git a/drivers/mmc/host/dw_mmc-pci.c b/drivers/mmc/host/dw_mmc-pci.c
index dc0d25a013e0..edb37e9135ae 100644
--- a/drivers/mmc/host/dw_mmc-pci.c
+++ b/drivers/mmc/host/dw_mmc-pci.c
@@ -59,7 +59,7 @@ static int __devinit dw_mci_pci_probe(struct pci_dev *pdev,
59 59
60 host->irq = pdev->irq; 60 host->irq = pdev->irq;
61 host->irq_flags = IRQF_SHARED; 61 host->irq_flags = IRQF_SHARED;
62 host->dev = pdev->dev; 62 host->dev = &pdev->dev;
63 host->pdata = &pci_board_data; 63 host->pdata = &pci_board_data;
64 64
65 host->regs = pci_iomap(pdev, PCI_BAR_NO, COMPLETE_BAR); 65 host->regs = pci_iomap(pdev, PCI_BAR_NO, COMPLETE_BAR);
@@ -140,18 +140,7 @@ static struct pci_driver dw_mci_pci_driver = {
140 }, 140 },
141}; 141};
142 142
143static int __init dw_mci_init(void) 143module_pci_driver(dw_mci_pci_driver);
144{
145 return pci_register_driver(&dw_mci_pci_driver);
146}
147
148static void __exit dw_mci_exit(void)
149{
150 pci_unregister_driver(&dw_mci_pci_driver);
151}
152
153module_init(dw_mci_init);
154module_exit(dw_mci_exit);
155 144
156MODULE_DESCRIPTION("DW Multimedia Card PCI Interface driver"); 145MODULE_DESCRIPTION("DW Multimedia Card PCI Interface driver");
157MODULE_AUTHOR("Shashidhar Hiremath <shashidharh@vayavyalabs.com>"); 146MODULE_AUTHOR("Shashidhar Hiremath <shashidharh@vayavyalabs.com>");
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index 92ec3eb3aae7..c960ca7ffbe6 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -19,59 +19,63 @@
19#include <linux/mmc/host.h> 19#include <linux/mmc/host.h>
20#include <linux/mmc/mmc.h> 20#include <linux/mmc/mmc.h>
21#include <linux/mmc/dw_mmc.h> 21#include <linux/mmc/dw_mmc.h>
22#include <linux/of.h>
23
22#include "dw_mmc.h" 24#include "dw_mmc.h"
23 25
24static int dw_mci_pltfm_probe(struct platform_device *pdev) 26int dw_mci_pltfm_register(struct platform_device *pdev,
27 struct dw_mci_drv_data *drv_data)
25{ 28{
26 struct dw_mci *host; 29 struct dw_mci *host;
27 struct resource *regs; 30 struct resource *regs;
28 int ret; 31 int ret;
29 32
30 host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL); 33 host = devm_kzalloc(&pdev->dev, sizeof(struct dw_mci), GFP_KERNEL);
31 if (!host) 34 if (!host)
32 return -ENOMEM; 35 return -ENOMEM;
33 36
34 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 37 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
35 if (!regs) { 38 if (!regs)
36 ret = -ENXIO; 39 return -ENXIO;
37 goto err_free;
38 }
39 40
40 host->irq = platform_get_irq(pdev, 0); 41 host->irq = platform_get_irq(pdev, 0);
41 if (host->irq < 0) { 42 if (host->irq < 0)
42 ret = host->irq; 43 return host->irq;
43 goto err_free;
44 }
45 44
46 host->dev = pdev->dev; 45 host->drv_data = drv_data;
46 host->dev = &pdev->dev;
47 host->irq_flags = 0; 47 host->irq_flags = 0;
48 host->pdata = pdev->dev.platform_data; 48 host->pdata = pdev->dev.platform_data;
49 ret = -ENOMEM; 49 host->regs = devm_request_and_ioremap(&pdev->dev, regs);
50 host->regs = ioremap(regs->start, resource_size(regs));
51 if (!host->regs) 50 if (!host->regs)
52 goto err_free; 51 return -ENOMEM;
52
53 if (host->drv_data->init) {
54 ret = host->drv_data->init(host);
55 if (ret)
56 return ret;
57 }
58
53 platform_set_drvdata(pdev, host); 59 platform_set_drvdata(pdev, host);
54 ret = dw_mci_probe(host); 60 ret = dw_mci_probe(host);
55 if (ret)
56 goto err_out;
57 return ret;
58err_out:
59 iounmap(host->regs);
60err_free:
61 kfree(host);
62 return ret; 61 return ret;
63} 62}
63EXPORT_SYMBOL_GPL(dw_mci_pltfm_register);
64 64
65static int __exit dw_mci_pltfm_remove(struct platform_device *pdev) 65static int __devinit dw_mci_pltfm_probe(struct platform_device *pdev)
66{
67 return dw_mci_pltfm_register(pdev, NULL);
68}
69
70static int __devexit dw_mci_pltfm_remove(struct platform_device *pdev)
66{ 71{
67 struct dw_mci *host = platform_get_drvdata(pdev); 72 struct dw_mci *host = platform_get_drvdata(pdev);
68 73
69 platform_set_drvdata(pdev, NULL); 74 platform_set_drvdata(pdev, NULL);
70 dw_mci_remove(host); 75 dw_mci_remove(host);
71 iounmap(host->regs);
72 kfree(host);
73 return 0; 76 return 0;
74} 77}
78EXPORT_SYMBOL_GPL(dw_mci_pltfm_remove);
75 79
76#ifdef CONFIG_PM_SLEEP 80#ifdef CONFIG_PM_SLEEP
77/* 81/*
@@ -105,12 +109,20 @@ static int dw_mci_pltfm_resume(struct device *dev)
105#define dw_mci_pltfm_resume NULL 109#define dw_mci_pltfm_resume NULL
106#endif /* CONFIG_PM_SLEEP */ 110#endif /* CONFIG_PM_SLEEP */
107 111
108static SIMPLE_DEV_PM_OPS(dw_mci_pltfm_pmops, dw_mci_pltfm_suspend, dw_mci_pltfm_resume); 112SIMPLE_DEV_PM_OPS(dw_mci_pltfm_pmops, dw_mci_pltfm_suspend, dw_mci_pltfm_resume);
113EXPORT_SYMBOL_GPL(dw_mci_pltfm_pmops);
114
115static const struct of_device_id dw_mci_pltfm_match[] = {
116 { .compatible = "snps,dw-mshc", },
117 {},
118};
119MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match);
109 120
110static struct platform_driver dw_mci_pltfm_driver = { 121static struct platform_driver dw_mci_pltfm_driver = {
111 .remove = __exit_p(dw_mci_pltfm_remove), 122 .remove = __exit_p(dw_mci_pltfm_remove),
112 .driver = { 123 .driver = {
113 .name = "dw_mmc", 124 .name = "dw_mmc",
125 .of_match_table = of_match_ptr(dw_mci_pltfm_match),
114 .pm = &dw_mci_pltfm_pmops, 126 .pm = &dw_mci_pltfm_pmops,
115 }, 127 },
116}; 128};
diff --git a/drivers/mmc/host/dw_mmc-pltfm.h b/drivers/mmc/host/dw_mmc-pltfm.h
new file mode 100644
index 000000000000..301f24541fc2
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc-pltfm.h
@@ -0,0 +1,20 @@
1/*
2 * Synopsys DesignWare Multimedia Card Interface Platform driver
3 *
4 * Copyright (C) 2012, Samsung Electronics Co., Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef _DW_MMC_PLTFM_H_
13#define _DW_MMC_PLTFM_H_
14
15extern int dw_mci_pltfm_register(struct platform_device *pdev,
16 struct dw_mci_drv_data *drv_data);
17extern int __devexit dw_mci_pltfm_remove(struct platform_device *pdev);
18extern const struct dev_pm_ops dw_mci_pltfm_pmops;
19
20#endif /* _DW_MMC_PLTFM_H_ */
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index af40d227bece..c2828f35c3b8 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -33,6 +33,7 @@
33#include <linux/bitops.h> 33#include <linux/bitops.h>
34#include <linux/regulator/consumer.h> 34#include <linux/regulator/consumer.h>
35#include <linux/workqueue.h> 35#include <linux/workqueue.h>
36#include <linux/of.h>
36 37
37#include "dw_mmc.h" 38#include "dw_mmc.h"
38 39
@@ -230,6 +231,7 @@ static void dw_mci_set_timeout(struct dw_mci *host)
230static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) 231static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
231{ 232{
232 struct mmc_data *data; 233 struct mmc_data *data;
234 struct dw_mci_slot *slot = mmc_priv(mmc);
233 u32 cmdr; 235 u32 cmdr;
234 cmd->error = -EINPROGRESS; 236 cmd->error = -EINPROGRESS;
235 237
@@ -259,6 +261,9 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
259 cmdr |= SDMMC_CMD_DAT_WR; 261 cmdr |= SDMMC_CMD_DAT_WR;
260 } 262 }
261 263
264 if (slot->host->drv_data->prepare_command)
265 slot->host->drv_data->prepare_command(slot->host, &cmdr);
266
262 return cmdr; 267 return cmdr;
263} 268}
264 269
@@ -266,7 +271,7 @@ static void dw_mci_start_command(struct dw_mci *host,
266 struct mmc_command *cmd, u32 cmd_flags) 271 struct mmc_command *cmd, u32 cmd_flags)
267{ 272{
268 host->cmd = cmd; 273 host->cmd = cmd;
269 dev_vdbg(&host->dev, 274 dev_vdbg(host->dev,
270 "start command: ARGR=0x%08x CMDR=0x%08x\n", 275 "start command: ARGR=0x%08x CMDR=0x%08x\n",
271 cmd->arg, cmd_flags); 276 cmd->arg, cmd_flags);
272 277
@@ -308,7 +313,7 @@ static void dw_mci_dma_cleanup(struct dw_mci *host)
308 313
309 if (data) 314 if (data)
310 if (!data->host_cookie) 315 if (!data->host_cookie)
311 dma_unmap_sg(&host->dev, 316 dma_unmap_sg(host->dev,
312 data->sg, 317 data->sg,
313 data->sg_len, 318 data->sg_len,
314 dw_mci_get_dma_dir(data)); 319 dw_mci_get_dma_dir(data));
@@ -334,7 +339,7 @@ static void dw_mci_idmac_complete_dma(struct dw_mci *host)
334{ 339{
335 struct mmc_data *data = host->data; 340 struct mmc_data *data = host->data;
336 341
337 dev_vdbg(&host->dev, "DMA complete\n"); 342 dev_vdbg(host->dev, "DMA complete\n");
338 343
339 host->dma_ops->cleanup(host); 344 host->dma_ops->cleanup(host);
340 345
@@ -405,23 +410,11 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
405static int dw_mci_idmac_init(struct dw_mci *host) 410static int dw_mci_idmac_init(struct dw_mci *host)
406{ 411{
407 struct idmac_desc *p; 412 struct idmac_desc *p;
408 int i, dma_support; 413 int i;
409 414
410 /* Number of descriptors in the ring buffer */ 415 /* Number of descriptors in the ring buffer */
411 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); 416 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
412 417
413 /* Check if Hardware Configuration Register has support for DMA */
414 dma_support = (mci_readl(host, HCON) >> 16) & 0x3;
415
416 if (!dma_support || dma_support > 2) {
417 dev_err(&host->dev,
418 "Host Controller does not support IDMA Tx.\n");
419 host->dma_ops = NULL;
420 return -ENODEV;
421 }
422
423 dev_info(&host->dev, "Using internal DMA controller.\n");
424
425 /* Forward link the descriptor list */ 418 /* Forward link the descriptor list */
426 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) 419 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
427 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1)); 420 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
@@ -476,7 +469,7 @@ static int dw_mci_pre_dma_transfer(struct dw_mci *host,
476 return -EINVAL; 469 return -EINVAL;
477 } 470 }
478 471
479 sg_len = dma_map_sg(&host->dev, 472 sg_len = dma_map_sg(host->dev,
480 data->sg, 473 data->sg,
481 data->sg_len, 474 data->sg_len,
482 dw_mci_get_dma_dir(data)); 475 dw_mci_get_dma_dir(data));
@@ -519,7 +512,7 @@ static void dw_mci_post_req(struct mmc_host *mmc,
519 return; 512 return;
520 513
521 if (data->host_cookie) 514 if (data->host_cookie)
522 dma_unmap_sg(&slot->host->dev, 515 dma_unmap_sg(slot->host->dev,
523 data->sg, 516 data->sg,
524 data->sg_len, 517 data->sg_len,
525 dw_mci_get_dma_dir(data)); 518 dw_mci_get_dma_dir(data));
@@ -545,7 +538,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
545 538
546 host->using_dma = 1; 539 host->using_dma = 1;
547 540
548 dev_vdbg(&host->dev, 541 dev_vdbg(host->dev,
549 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 542 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
550 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, 543 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
551 sg_len); 544 sg_len);
@@ -814,6 +807,9 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
814 slot->clock = ios->clock; 807 slot->clock = ios->clock;
815 } 808 }
816 809
810 if (slot->host->drv_data->set_ios)
811 slot->host->drv_data->set_ios(slot->host, ios);
812
817 switch (ios->power_mode) { 813 switch (ios->power_mode) {
818 case MMC_POWER_UP: 814 case MMC_POWER_UP:
819 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); 815 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
@@ -830,7 +826,9 @@ static int dw_mci_get_ro(struct mmc_host *mmc)
830 struct dw_mci_board *brd = slot->host->pdata; 826 struct dw_mci_board *brd = slot->host->pdata;
831 827
832 /* Use platform get_ro function, else try on board write protect */ 828 /* Use platform get_ro function, else try on board write protect */
833 if (brd->get_ro) 829 if (brd->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT)
830 read_only = 0;
831 else if (brd->get_ro)
834 read_only = brd->get_ro(slot->id); 832 read_only = brd->get_ro(slot->id);
835 else 833 else
836 read_only = 834 read_only =
@@ -939,12 +937,12 @@ static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
939 slot = list_entry(host->queue.next, 937 slot = list_entry(host->queue.next,
940 struct dw_mci_slot, queue_node); 938 struct dw_mci_slot, queue_node);
941 list_del(&slot->queue_node); 939 list_del(&slot->queue_node);
942 dev_vdbg(&host->dev, "list not empty: %s is next\n", 940 dev_vdbg(host->dev, "list not empty: %s is next\n",
943 mmc_hostname(slot->mmc)); 941 mmc_hostname(slot->mmc));
944 host->state = STATE_SENDING_CMD; 942 host->state = STATE_SENDING_CMD;
945 dw_mci_start_request(host, slot); 943 dw_mci_start_request(host, slot);
946 } else { 944 } else {
947 dev_vdbg(&host->dev, "list empty\n"); 945 dev_vdbg(host->dev, "list empty\n");
948 host->state = STATE_IDLE; 946 host->state = STATE_IDLE;
949 } 947 }
950 948
@@ -1083,7 +1081,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
1083 data->bytes_xfered = 0; 1081 data->bytes_xfered = 0;
1084 data->error = -ETIMEDOUT; 1082 data->error = -ETIMEDOUT;
1085 } else { 1083 } else {
1086 dev_err(&host->dev, 1084 dev_err(host->dev,
1087 "data FIFO error " 1085 "data FIFO error "
1088 "(status=%08x)\n", 1086 "(status=%08x)\n",
1089 status); 1087 status);
@@ -1767,12 +1765,60 @@ static void dw_mci_work_routine_card(struct work_struct *work)
1767 } 1765 }
1768} 1766}
1769 1767
1770static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) 1768#ifdef CONFIG_OF
1769/* given a slot id, find out the device node representing that slot */
1770static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1771{
1772 struct device_node *np;
1773 const __be32 *addr;
1774 int len;
1775
1776 if (!dev || !dev->of_node)
1777 return NULL;
1778
1779 for_each_child_of_node(dev->of_node, np) {
1780 addr = of_get_property(np, "reg", &len);
1781 if (!addr || (len < sizeof(int)))
1782 continue;
1783 if (be32_to_cpup(addr) == slot)
1784 return np;
1785 }
1786 return NULL;
1787}
1788
1789/* find out bus-width for a given slot */
1790static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
1791{
1792 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
1793 u32 bus_wd = 1;
1794
1795 if (!np)
1796 return 1;
1797
1798 if (of_property_read_u32(np, "bus-width", &bus_wd))
1799 dev_err(dev, "bus-width property not found, assuming width"
1800 " as 1\n");
1801 return bus_wd;
1802}
1803#else /* CONFIG_OF */
1804static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
1805{
1806 return 1;
1807}
1808static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1809{
1810 return NULL;
1811}
1812#endif /* CONFIG_OF */
1813
1814static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1771{ 1815{
1772 struct mmc_host *mmc; 1816 struct mmc_host *mmc;
1773 struct dw_mci_slot *slot; 1817 struct dw_mci_slot *slot;
1818 int ctrl_id, ret;
1819 u8 bus_width;
1774 1820
1775 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->dev); 1821 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
1776 if (!mmc) 1822 if (!mmc)
1777 return -ENOMEM; 1823 return -ENOMEM;
1778 1824
@@ -1780,6 +1826,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1780 slot->id = id; 1826 slot->id = id;
1781 slot->mmc = mmc; 1827 slot->mmc = mmc;
1782 slot->host = host; 1828 slot->host = host;
1829 host->slot[id] = slot;
1783 1830
1784 mmc->ops = &dw_mci_ops; 1831 mmc->ops = &dw_mci_ops;
1785 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510); 1832 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
@@ -1800,21 +1847,44 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1800 if (host->pdata->caps) 1847 if (host->pdata->caps)
1801 mmc->caps = host->pdata->caps; 1848 mmc->caps = host->pdata->caps;
1802 1849
1850 if (host->dev->of_node) {
1851 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
1852 if (ctrl_id < 0)
1853 ctrl_id = 0;
1854 } else {
1855 ctrl_id = to_platform_device(host->dev)->id;
1856 }
1857 if (host->drv_data && host->drv_data->caps)
1858 mmc->caps |= host->drv_data->caps[ctrl_id];
1859
1803 if (host->pdata->caps2) 1860 if (host->pdata->caps2)
1804 mmc->caps2 = host->pdata->caps2; 1861 mmc->caps2 = host->pdata->caps2;
1805 1862
1806 if (host->pdata->get_bus_wd) 1863 if (host->pdata->get_bus_wd)
1807 if (host->pdata->get_bus_wd(slot->id) >= 4) 1864 bus_width = host->pdata->get_bus_wd(slot->id);
1808 mmc->caps |= MMC_CAP_4_BIT_DATA; 1865 else if (host->dev->of_node)
1866 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
1867 else
1868 bus_width = 1;
1869
1870 if (host->drv_data->setup_bus) {
1871 struct device_node *slot_np;
1872 slot_np = dw_mci_of_find_slot_node(host->dev, slot->id);
1873 ret = host->drv_data->setup_bus(host, slot_np, bus_width);
1874 if (ret)
1875 goto err_setup_bus;
1876 }
1877
1878 switch (bus_width) {
1879 case 8:
1880 mmc->caps |= MMC_CAP_8_BIT_DATA;
1881 case 4:
1882 mmc->caps |= MMC_CAP_4_BIT_DATA;
1883 }
1809 1884
1810 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) 1885 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
1811 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 1886 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
1812 1887
1813 if (mmc->caps2 & MMC_CAP2_POWEROFF_NOTIFY)
1814 mmc->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
1815 else
1816 mmc->power_notify_type = MMC_HOST_PW_NOTIFY_NONE;
1817
1818 if (host->pdata->blk_settings) { 1888 if (host->pdata->blk_settings) {
1819 mmc->max_segs = host->pdata->blk_settings->max_segs; 1889 mmc->max_segs = host->pdata->blk_settings->max_segs;
1820 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size; 1890 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
@@ -1850,7 +1920,6 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1850 else 1920 else
1851 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1921 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1852 1922
1853 host->slot[id] = slot;
1854 mmc_add_host(mmc); 1923 mmc_add_host(mmc);
1855 1924
1856#if defined(CONFIG_DEBUG_FS) 1925#if defined(CONFIG_DEBUG_FS)
@@ -1867,6 +1936,10 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1867 queue_work(host->card_workqueue, &host->card_work); 1936 queue_work(host->card_workqueue, &host->card_work);
1868 1937
1869 return 0; 1938 return 0;
1939
1940err_setup_bus:
1941 mmc_free_host(mmc);
1942 return -EINVAL;
1870} 1943}
1871 1944
1872static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) 1945static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
@@ -1884,10 +1957,10 @@ static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
1884static void dw_mci_init_dma(struct dw_mci *host) 1957static void dw_mci_init_dma(struct dw_mci *host)
1885{ 1958{
1886 /* Alloc memory for sg translation */ 1959 /* Alloc memory for sg translation */
1887 host->sg_cpu = dma_alloc_coherent(&host->dev, PAGE_SIZE, 1960 host->sg_cpu = dma_alloc_coherent(host->dev, PAGE_SIZE,
1888 &host->sg_dma, GFP_KERNEL); 1961 &host->sg_dma, GFP_KERNEL);
1889 if (!host->sg_cpu) { 1962 if (!host->sg_cpu) {
1890 dev_err(&host->dev, "%s: could not alloc DMA memory\n", 1963 dev_err(host->dev, "%s: could not alloc DMA memory\n",
1891 __func__); 1964 __func__);
1892 goto no_dma; 1965 goto no_dma;
1893 } 1966 }
@@ -1895,6 +1968,7 @@ static void dw_mci_init_dma(struct dw_mci *host)
1895 /* Determine which DMA interface to use */ 1968 /* Determine which DMA interface to use */
1896#ifdef CONFIG_MMC_DW_IDMAC 1969#ifdef CONFIG_MMC_DW_IDMAC
1897 host->dma_ops = &dw_mci_idmac_ops; 1970 host->dma_ops = &dw_mci_idmac_ops;
1971 dev_info(&host->dev, "Using internal DMA controller.\n");
1898#endif 1972#endif
1899 1973
1900 if (!host->dma_ops) 1974 if (!host->dma_ops)
@@ -1903,12 +1977,12 @@ static void dw_mci_init_dma(struct dw_mci *host)
1903 if (host->dma_ops->init && host->dma_ops->start && 1977 if (host->dma_ops->init && host->dma_ops->start &&
1904 host->dma_ops->stop && host->dma_ops->cleanup) { 1978 host->dma_ops->stop && host->dma_ops->cleanup) {
1905 if (host->dma_ops->init(host)) { 1979 if (host->dma_ops->init(host)) {
1906 dev_err(&host->dev, "%s: Unable to initialize " 1980 dev_err(host->dev, "%s: Unable to initialize "
1907 "DMA Controller.\n", __func__); 1981 "DMA Controller.\n", __func__);
1908 goto no_dma; 1982 goto no_dma;
1909 } 1983 }
1910 } else { 1984 } else {
1911 dev_err(&host->dev, "DMA initialization not found.\n"); 1985 dev_err(host->dev, "DMA initialization not found.\n");
1912 goto no_dma; 1986 goto no_dma;
1913 } 1987 }
1914 1988
@@ -1916,7 +1990,7 @@ static void dw_mci_init_dma(struct dw_mci *host)
1916 return; 1990 return;
1917 1991
1918no_dma: 1992no_dma:
1919 dev_info(&host->dev, "Using PIO mode.\n"); 1993 dev_info(host->dev, "Using PIO mode.\n");
1920 host->use_dma = 0; 1994 host->use_dma = 0;
1921 return; 1995 return;
1922} 1996}
@@ -1942,30 +2016,133 @@ static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
1942 return false; 2016 return false;
1943} 2017}
1944 2018
2019#ifdef CONFIG_OF
2020static struct dw_mci_of_quirks {
2021 char *quirk;
2022 int id;
2023} of_quirks[] = {
2024 {
2025 .quirk = "supports-highspeed",
2026 .id = DW_MCI_QUIRK_HIGHSPEED,
2027 }, {
2028 .quirk = "broken-cd",
2029 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2030 },
2031};
2032
2033static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2034{
2035 struct dw_mci_board *pdata;
2036 struct device *dev = host->dev;
2037 struct device_node *np = dev->of_node;
2038 int idx, ret;
2039
2040 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2041 if (!pdata) {
2042 dev_err(dev, "could not allocate memory for pdata\n");
2043 return ERR_PTR(-ENOMEM);
2044 }
2045
2046 /* find out number of slots supported */
2047 if (of_property_read_u32(dev->of_node, "num-slots",
2048 &pdata->num_slots)) {
2049 dev_info(dev, "num-slots property not found, "
2050 "assuming 1 slot is available\n");
2051 pdata->num_slots = 1;
2052 }
2053
2054 /* get quirks */
2055 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2056 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2057 pdata->quirks |= of_quirks[idx].id;
2058
2059 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2060 dev_info(dev, "fifo-depth property not found, using "
2061 "value of FIFOTH register as default\n");
2062
2063 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2064
2065 if (host->drv_data->parse_dt) {
2066 ret = host->drv_data->parse_dt(host);
2067 if (ret)
2068 return ERR_PTR(ret);
2069 }
2070
2071 return pdata;
2072}
2073
2074#else /* CONFIG_OF */
2075static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2076{
2077 return ERR_PTR(-EINVAL);
2078}
2079#endif /* CONFIG_OF */
2080
1945int dw_mci_probe(struct dw_mci *host) 2081int dw_mci_probe(struct dw_mci *host)
1946{ 2082{
1947 int width, i, ret = 0; 2083 int width, i, ret = 0;
1948 u32 fifo_size; 2084 u32 fifo_size;
2085 int init_slots = 0;
1949 2086
1950 if (!host->pdata || !host->pdata->init) { 2087 if (!host->pdata) {
1951 dev_err(&host->dev, 2088 host->pdata = dw_mci_parse_dt(host);
1952 "Platform data must supply init function\n"); 2089 if (IS_ERR(host->pdata)) {
1953 return -ENODEV; 2090 dev_err(host->dev, "platform data not available\n");
2091 return -EINVAL;
2092 }
1954 } 2093 }
1955 2094
1956 if (!host->pdata->select_slot && host->pdata->num_slots > 1) { 2095 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
1957 dev_err(&host->dev, 2096 dev_err(host->dev,
1958 "Platform data must supply select_slot function\n"); 2097 "Platform data must supply select_slot function\n");
1959 return -ENODEV; 2098 return -ENODEV;
1960 } 2099 }
1961 2100
1962 if (!host->pdata->bus_hz) { 2101 host->biu_clk = clk_get(host->dev, "biu");
1963 dev_err(&host->dev, 2102 if (IS_ERR(host->biu_clk)) {
2103 dev_dbg(host->dev, "biu clock not available\n");
2104 } else {
2105 ret = clk_prepare_enable(host->biu_clk);
2106 if (ret) {
2107 dev_err(host->dev, "failed to enable biu clock\n");
2108 clk_put(host->biu_clk);
2109 return ret;
2110 }
2111 }
2112
2113 host->ciu_clk = clk_get(host->dev, "ciu");
2114 if (IS_ERR(host->ciu_clk)) {
2115 dev_dbg(host->dev, "ciu clock not available\n");
2116 } else {
2117 ret = clk_prepare_enable(host->ciu_clk);
2118 if (ret) {
2119 dev_err(host->dev, "failed to enable ciu clock\n");
2120 clk_put(host->ciu_clk);
2121 goto err_clk_biu;
2122 }
2123 }
2124
2125 if (IS_ERR(host->ciu_clk))
2126 host->bus_hz = host->pdata->bus_hz;
2127 else
2128 host->bus_hz = clk_get_rate(host->ciu_clk);
2129
2130 if (host->drv_data->setup_clock) {
2131 ret = host->drv_data->setup_clock(host);
2132 if (ret) {
2133 dev_err(host->dev,
2134 "implementation specific clock setup failed\n");
2135 goto err_clk_ciu;
2136 }
2137 }
2138
2139 if (!host->bus_hz) {
2140 dev_err(host->dev,
1964 "Platform data must supply bus speed\n"); 2141 "Platform data must supply bus speed\n");
1965 return -ENODEV; 2142 ret = -ENODEV;
2143 goto err_clk_ciu;
1966 } 2144 }
1967 2145
1968 host->bus_hz = host->pdata->bus_hz;
1969 host->quirks = host->pdata->quirks; 2146 host->quirks = host->pdata->quirks;
1970 2147
1971 spin_lock_init(&host->lock); 2148 spin_lock_init(&host->lock);
@@ -1998,7 +2175,7 @@ int dw_mci_probe(struct dw_mci *host)
1998 } 2175 }
1999 2176
2000 /* Reset all blocks */ 2177 /* Reset all blocks */
2001 if (!mci_wait_reset(&host->dev, host)) 2178 if (!mci_wait_reset(host->dev, host))
2002 return -ENODEV; 2179 return -ENODEV;
2003 2180
2004 host->dma_ops = host->pdata->dma_ops; 2181 host->dma_ops = host->pdata->dma_ops;
@@ -2054,10 +2231,18 @@ int dw_mci_probe(struct dw_mci *host)
2054 /* We need at least one slot to succeed */ 2231 /* We need at least one slot to succeed */
2055 for (i = 0; i < host->num_slots; i++) { 2232 for (i = 0; i < host->num_slots; i++) {
2056 ret = dw_mci_init_slot(host, i); 2233 ret = dw_mci_init_slot(host, i);
2057 if (ret) { 2234 if (ret)
2058 ret = -ENODEV; 2235 dev_dbg(host->dev, "slot %d init failed\n", i);
2059 goto err_init_slot; 2236 else
2060 } 2237 init_slots++;
2238 }
2239
2240 if (init_slots) {
2241 dev_info(host->dev, "%d slots initialized\n", init_slots);
2242 } else {
2243 dev_dbg(host->dev, "attempted to initialize %d slots, "
2244 "but failed on all\n", host->num_slots);
2245 goto err_init_slot;
2061 } 2246 }
2062 2247
2063 /* 2248 /*
@@ -2065,7 +2250,7 @@ int dw_mci_probe(struct dw_mci *host)
2065 * Need to check the version-id and set data-offset for DATA register. 2250 * Need to check the version-id and set data-offset for DATA register.
2066 */ 2251 */
2067 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); 2252 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2068 dev_info(&host->dev, "Version ID is %04x\n", host->verid); 2253 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2069 2254
2070 if (host->verid < DW_MMC_240A) 2255 if (host->verid < DW_MMC_240A)
2071 host->data_offset = DATA_OFFSET; 2256 host->data_offset = DATA_OFFSET;
@@ -2082,22 +2267,16 @@ int dw_mci_probe(struct dw_mci *host)
2082 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); 2267 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2083 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ 2268 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2084 2269
2085 dev_info(&host->dev, "DW MMC controller at irq %d, " 2270 dev_info(host->dev, "DW MMC controller at irq %d, "
2086 "%d bit host data width, " 2271 "%d bit host data width, "
2087 "%u deep fifo\n", 2272 "%u deep fifo\n",
2088 host->irq, width, fifo_size); 2273 host->irq, width, fifo_size);
2089 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) 2274 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2090 dev_info(&host->dev, "Internal DMAC interrupt fix enabled.\n"); 2275 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2091 2276
2092 return 0; 2277 return 0;
2093 2278
2094err_init_slot: 2279err_init_slot:
2095 /* De-init any initialized slots */
2096 while (i > 0) {
2097 if (host->slot[i])
2098 dw_mci_cleanup_slot(host->slot[i], i);
2099 i--;
2100 }
2101 free_irq(host->irq, host); 2280 free_irq(host->irq, host);
2102 2281
2103err_workqueue: 2282err_workqueue:
@@ -2106,13 +2285,24 @@ err_workqueue:
2106err_dmaunmap: 2285err_dmaunmap:
2107 if (host->use_dma && host->dma_ops->exit) 2286 if (host->use_dma && host->dma_ops->exit)
2108 host->dma_ops->exit(host); 2287 host->dma_ops->exit(host);
2109 dma_free_coherent(&host->dev, PAGE_SIZE, 2288 dma_free_coherent(host->dev, PAGE_SIZE,
2110 host->sg_cpu, host->sg_dma); 2289 host->sg_cpu, host->sg_dma);
2111 2290
2112 if (host->vmmc) { 2291 if (host->vmmc) {
2113 regulator_disable(host->vmmc); 2292 regulator_disable(host->vmmc);
2114 regulator_put(host->vmmc); 2293 regulator_put(host->vmmc);
2115 } 2294 }
2295
2296err_clk_ciu:
2297 if (!IS_ERR(host->ciu_clk)) {
2298 clk_disable_unprepare(host->ciu_clk);
2299 clk_put(host->ciu_clk);
2300 }
2301err_clk_biu:
2302 if (!IS_ERR(host->biu_clk)) {
2303 clk_disable_unprepare(host->biu_clk);
2304 clk_put(host->biu_clk);
2305 }
2116 return ret; 2306 return ret;
2117} 2307}
2118EXPORT_SYMBOL(dw_mci_probe); 2308EXPORT_SYMBOL(dw_mci_probe);
@@ -2125,7 +2315,7 @@ void dw_mci_remove(struct dw_mci *host)
2125 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 2315 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2126 2316
2127 for (i = 0; i < host->num_slots; i++) { 2317 for (i = 0; i < host->num_slots; i++) {
2128 dev_dbg(&host->dev, "remove slot %d\n", i); 2318 dev_dbg(host->dev, "remove slot %d\n", i);
2129 if (host->slot[i]) 2319 if (host->slot[i])
2130 dw_mci_cleanup_slot(host->slot[i], i); 2320 dw_mci_cleanup_slot(host->slot[i], i);
2131 } 2321 }
@@ -2136,7 +2326,7 @@ void dw_mci_remove(struct dw_mci *host)
2136 2326
2137 free_irq(host->irq, host); 2327 free_irq(host->irq, host);
2138 destroy_workqueue(host->card_workqueue); 2328 destroy_workqueue(host->card_workqueue);
2139 dma_free_coherent(&host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 2329 dma_free_coherent(host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
2140 2330
2141 if (host->use_dma && host->dma_ops->exit) 2331 if (host->use_dma && host->dma_ops->exit)
2142 host->dma_ops->exit(host); 2332 host->dma_ops->exit(host);
@@ -2146,6 +2336,12 @@ void dw_mci_remove(struct dw_mci *host)
2146 regulator_put(host->vmmc); 2336 regulator_put(host->vmmc);
2147 } 2337 }
2148 2338
2339 if (!IS_ERR(host->ciu_clk))
2340 clk_disable_unprepare(host->ciu_clk);
2341 if (!IS_ERR(host->biu_clk))
2342 clk_disable_unprepare(host->biu_clk);
2343 clk_put(host->ciu_clk);
2344 clk_put(host->biu_clk);
2149} 2345}
2150EXPORT_SYMBOL(dw_mci_remove); 2346EXPORT_SYMBOL(dw_mci_remove);
2151 2347
@@ -2188,7 +2384,7 @@ int dw_mci_resume(struct dw_mci *host)
2188 if (host->vmmc) 2384 if (host->vmmc)
2189 regulator_enable(host->vmmc); 2385 regulator_enable(host->vmmc);
2190 2386
2191 if (!mci_wait_reset(&host->dev, host)) { 2387 if (!mci_wait_reset(host->dev, host)) {
2192 ret = -ENODEV; 2388 ret = -ENODEV;
2193 return ret; 2389 return ret;
2194 } 2390 }
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 15c27e17c23f..53b8fd987e47 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -182,4 +182,28 @@ extern int dw_mci_suspend(struct dw_mci *host);
182extern int dw_mci_resume(struct dw_mci *host); 182extern int dw_mci_resume(struct dw_mci *host);
183#endif 183#endif
184 184
185/**
186 * dw_mci driver data - dw-mshc implementation specific driver data.
187 * @caps: mmc subsystem specified capabilities of the controller(s).
188 * @init: early implementation specific initialization.
189 * @setup_clock: implementation specific clock configuration.
190 * @prepare_command: handle CMD register extensions.
191 * @set_ios: handle bus specific extensions.
192 * @parse_dt: parse implementation specific device tree properties.
193 * @setup_bus: initialize io-interface
194 *
195 * Provide controller implementation specific extensions. The usage of this
196 * data structure is fully optional and usage of each member in this structure
197 * is optional as well.
198 */
199struct dw_mci_drv_data {
200 unsigned long *caps;
201 int (*init)(struct dw_mci *host);
202 int (*setup_clock)(struct dw_mci *host);
203 void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
204 void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
205 int (*parse_dt)(struct dw_mci *host);
206 int (*setup_bus)(struct dw_mci *host,
207 struct device_node *slot_np, u8 bus_width);
208};
185#endif /* _DW_MMC_H_ */ 209#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 273306c68d58..a600eabbd6c3 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1532,20 +1532,7 @@ static struct spi_driver mmc_spi_driver = {
1532 .remove = __devexit_p(mmc_spi_remove), 1532 .remove = __devexit_p(mmc_spi_remove),
1533}; 1533};
1534 1534
1535 1535module_spi_driver(mmc_spi_driver);
1536static int __init mmc_spi_init(void)
1537{
1538 return spi_register_driver(&mmc_spi_driver);
1539}
1540module_init(mmc_spi_init);
1541
1542
1543static void __exit mmc_spi_exit(void)
1544{
1545 spi_unregister_driver(&mmc_spi_driver);
1546}
1547module_exit(mmc_spi_exit);
1548
1549 1536
1550MODULE_AUTHOR("Mike Lavender, David Brownell, " 1537MODULE_AUTHOR("Mike Lavender, David Brownell, "
1551 "Hans-Peter Nilsson, Jan Nikitenko"); 1538 "Hans-Peter Nilsson, Jan Nikitenko");
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 50ff19a62368..edc3e9baf0e7 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1309,14 +1309,10 @@ static int __devinit mmci_probe(struct amba_device *dev,
1309 goto host_free; 1309 goto host_free;
1310 } 1310 }
1311 1311
1312 ret = clk_prepare(host->clk); 1312 ret = clk_prepare_enable(host->clk);
1313 if (ret) 1313 if (ret)
1314 goto clk_free; 1314 goto clk_free;
1315 1315
1316 ret = clk_enable(host->clk);
1317 if (ret)
1318 goto clk_unprep;
1319
1320 host->plat = plat; 1316 host->plat = plat;
1321 host->variant = variant; 1317 host->variant = variant;
1322 host->mclk = clk_get_rate(host->clk); 1318 host->mclk = clk_get_rate(host->clk);
@@ -1515,9 +1511,7 @@ static int __devinit mmci_probe(struct amba_device *dev,
1515 err_gpio_cd: 1511 err_gpio_cd:
1516 iounmap(host->base); 1512 iounmap(host->base);
1517 clk_disable: 1513 clk_disable:
1518 clk_disable(host->clk); 1514 clk_disable_unprepare(host->clk);
1519 clk_unprep:
1520 clk_unprepare(host->clk);
1521 clk_free: 1515 clk_free:
1522 clk_put(host->clk); 1516 clk_put(host->clk);
1523 host_free: 1517 host_free:
@@ -1564,8 +1558,7 @@ static int __devexit mmci_remove(struct amba_device *dev)
1564 gpio_free(host->gpio_cd); 1558 gpio_free(host->gpio_cd);
1565 1559
1566 iounmap(host->base); 1560 iounmap(host->base);
1567 clk_disable(host->clk); 1561 clk_disable_unprepare(host->clk);
1568 clk_unprepare(host->clk);
1569 clk_put(host->clk); 1562 clk_put(host->clk);
1570 1563
1571 if (host->vcc) 1564 if (host->vcc)
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 7b1161de01d6..565c2e4fac75 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -44,6 +44,7 @@
44#include <mach/hardware.h> 44#include <mach/hardware.h>
45 45
46#define DRIVER_NAME "mxc-mmc" 46#define DRIVER_NAME "mxc-mmc"
47#define MXCMCI_TIMEOUT_MS 10000
47 48
48#define MMC_REG_STR_STP_CLK 0x00 49#define MMC_REG_STR_STP_CLK 0x00
49#define MMC_REG_STATUS 0x04 50#define MMC_REG_STATUS 0x04
@@ -150,6 +151,8 @@ struct mxcmci_host {
150 int dmareq; 151 int dmareq;
151 struct dma_slave_config dma_slave_config; 152 struct dma_slave_config dma_slave_config;
152 struct imx_dma_data dma_data; 153 struct imx_dma_data dma_data;
154
155 struct timer_list watchdog;
153}; 156};
154 157
155static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); 158static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
@@ -271,9 +274,32 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
271 dmaengine_submit(host->desc); 274 dmaengine_submit(host->desc);
272 dma_async_issue_pending(host->dma); 275 dma_async_issue_pending(host->dma);
273 276
277 mod_timer(&host->watchdog, jiffies + msecs_to_jiffies(MXCMCI_TIMEOUT_MS));
278
274 return 0; 279 return 0;
275} 280}
276 281
282static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat);
283static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat);
284
285static void mxcmci_dma_callback(void *data)
286{
287 struct mxcmci_host *host = data;
288 u32 stat;
289
290 del_timer(&host->watchdog);
291
292 stat = readl(host->base + MMC_REG_STATUS);
293 writel(stat & ~STATUS_DATA_TRANS_DONE, host->base + MMC_REG_STATUS);
294
295 dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
296
297 if (stat & STATUS_READ_OP_DONE)
298 writel(STATUS_READ_OP_DONE, host->base + MMC_REG_STATUS);
299
300 mxcmci_data_done(host, stat);
301}
302
277static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, 303static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
278 unsigned int cmdat) 304 unsigned int cmdat)
279{ 305{
@@ -305,8 +331,14 @@ static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
305 331
306 int_cntr = INT_END_CMD_RES_EN; 332 int_cntr = INT_END_CMD_RES_EN;
307 333
308 if (mxcmci_use_dma(host)) 334 if (mxcmci_use_dma(host)) {
309 int_cntr |= INT_READ_OP_EN | INT_WRITE_OP_DONE_EN; 335 if (host->dma_dir == DMA_FROM_DEVICE) {
336 host->desc->callback = mxcmci_dma_callback;
337 host->desc->callback_param = host;
338 } else {
339 int_cntr |= INT_WRITE_OP_DONE_EN;
340 }
341 }
310 342
311 spin_lock_irqsave(&host->lock, flags); 343 spin_lock_irqsave(&host->lock, flags);
312 if (host->use_sdio) 344 if (host->use_sdio)
@@ -345,11 +377,9 @@ static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
345 struct mmc_data *data = host->data; 377 struct mmc_data *data = host->data;
346 int data_error; 378 int data_error;
347 379
348 if (mxcmci_use_dma(host)) { 380 if (mxcmci_use_dma(host))
349 dmaengine_terminate_all(host->dma);
350 dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len, 381 dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
351 host->dma_dir); 382 host->dma_dir);
352 }
353 383
354 if (stat & STATUS_ERR_MASK) { 384 if (stat & STATUS_ERR_MASK) {
355 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", 385 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
@@ -624,8 +654,10 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)
624 mxcmci_cmd_done(host, stat); 654 mxcmci_cmd_done(host, stat);
625 655
626 if (mxcmci_use_dma(host) && 656 if (mxcmci_use_dma(host) &&
627 (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) 657 (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) {
658 del_timer(&host->watchdog);
628 mxcmci_data_done(host, stat); 659 mxcmci_data_done(host, stat);
660 }
629 661
630 if (host->default_irq_mask && 662 if (host->default_irq_mask &&
631 (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL))) 663 (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))
@@ -836,6 +868,34 @@ static bool filter(struct dma_chan *chan, void *param)
836 return true; 868 return true;
837} 869}
838 870
871static void mxcmci_watchdog(unsigned long data)
872{
873 struct mmc_host *mmc = (struct mmc_host *)data;
874 struct mxcmci_host *host = mmc_priv(mmc);
875 struct mmc_request *req = host->req;
876 unsigned int stat = readl(host->base + MMC_REG_STATUS);
877
878 if (host->dma_dir == DMA_FROM_DEVICE) {
879 dmaengine_terminate_all(host->dma);
880 dev_err(mmc_dev(host->mmc),
881 "%s: read time out (status = 0x%08x)\n",
882 __func__, stat);
883 } else {
884 dev_err(mmc_dev(host->mmc),
885 "%s: write time out (status = 0x%08x)\n",
886 __func__, stat);
887 mxcmci_softreset(host);
888 }
889
890 /* Mark transfer as erroneus and inform the upper layers */
891
892 host->data->error = -ETIMEDOUT;
893 host->req = NULL;
894 host->cmd = NULL;
895 host->data = NULL;
896 mmc_request_done(host->mmc, req);
897}
898
839static const struct mmc_host_ops mxcmci_ops = { 899static const struct mmc_host_ops mxcmci_ops = {
840 .request = mxcmci_request, 900 .request = mxcmci_request,
841 .set_ios = mxcmci_set_ios, 901 .set_ios = mxcmci_set_ios,
@@ -968,6 +1028,10 @@ static int mxcmci_probe(struct platform_device *pdev)
968 1028
969 mmc_add_host(mmc); 1029 mmc_add_host(mmc);
970 1030
1031 init_timer(&host->watchdog);
1032 host->watchdog.function = &mxcmci_watchdog;
1033 host->watchdog.data = (unsigned long)mmc;
1034
971 return 0; 1035 return 0;
972 1036
973out_free_irq: 1037out_free_irq:
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index bb4c2bf04d09..80d1e6d4b0ae 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -525,7 +525,7 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
525 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, 525 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
526 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 526 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
527 writel(BM_SSP_CTRL1_SDIO_IRQ_EN, 527 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
528 host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET); 528 ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_SET);
529 } else { 529 } else {
530 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, 530 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
531 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 531 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index c6259a829544..48ad361613ef 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -27,16 +27,10 @@
27#include <linux/mmc/card.h> 27#include <linux/mmc/card.h>
28#include <linux/clk.h> 28#include <linux/clk.h>
29#include <linux/scatterlist.h> 29#include <linux/scatterlist.h>
30#include <linux/i2c/tps65010.h>
31#include <linux/slab.h> 30#include <linux/slab.h>
32 31
33#include <asm/io.h>
34#include <asm/irq.h>
35
36#include <plat/mmc.h> 32#include <plat/mmc.h>
37#include <asm/gpio.h>
38#include <plat/dma.h> 33#include <plat/dma.h>
39#include <plat/fpga.h>
40 34
41#define OMAP_MMC_REG_CMD 0x00 35#define OMAP_MMC_REG_CMD 0x00
42#define OMAP_MMC_REG_ARGL 0x01 36#define OMAP_MMC_REG_ARGL 0x01
@@ -105,7 +99,6 @@ struct mmc_omap_slot {
105 u16 saved_con; 99 u16 saved_con;
106 u16 bus_mode; 100 u16 bus_mode;
107 unsigned int fclk_freq; 101 unsigned int fclk_freq;
108 unsigned powered:1;
109 102
110 struct tasklet_struct cover_tasklet; 103 struct tasklet_struct cover_tasklet;
111 struct timer_list cover_timer; 104 struct timer_list cover_timer;
@@ -137,7 +130,6 @@ struct mmc_omap_host {
137 unsigned int phys_base; 130 unsigned int phys_base;
138 int irq; 131 int irq;
139 unsigned char bus_mode; 132 unsigned char bus_mode;
140 unsigned char hw_bus_mode;
141 unsigned int reg_shift; 133 unsigned int reg_shift;
142 134
143 struct work_struct cmd_abort_work; 135 struct work_struct cmd_abort_work;
@@ -695,22 +687,29 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
695 host->buffer += nwords; 687 host->buffer += nwords;
696} 688}
697 689
698static inline void mmc_omap_report_irq(u16 status) 690#ifdef CONFIG_MMC_DEBUG
691static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status)
699{ 692{
700 static const char *mmc_omap_status_bits[] = { 693 static const char *mmc_omap_status_bits[] = {
701 "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO", 694 "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO",
702 "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR" 695 "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR"
703 }; 696 };
704 int i, c = 0; 697 int i;
698 char res[64], *buf = res;
699
700 buf += sprintf(buf, "MMC IRQ 0x%x:", status);
705 701
706 for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++) 702 for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
707 if (status & (1 << i)) { 703 if (status & (1 << i))
708 if (c) 704 buf += sprintf(buf, " %s", mmc_omap_status_bits[i]);
709 printk(" "); 705 dev_vdbg(mmc_dev(host->mmc), "%s\n", res);
710 printk("%s", mmc_omap_status_bits[i]);
711 c++;
712 }
713} 706}
707#else
708static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status)
709{
710}
711#endif
712
714 713
715static irqreturn_t mmc_omap_irq(int irq, void *dev_id) 714static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
716{ 715{
@@ -744,12 +743,10 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
744 cmd = host->cmd->opcode; 743 cmd = host->cmd->opcode;
745 else 744 else
746 cmd = -1; 745 cmd = -1;
747#ifdef CONFIG_MMC_DEBUG
748 dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ", 746 dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ",
749 status, cmd); 747 status, cmd);
750 mmc_omap_report_irq(status); 748 mmc_omap_report_irq(host, status);
751 printk("\n"); 749
752#endif
753 if (host->total_bytes_left) { 750 if (host->total_bytes_left) {
754 if ((status & OMAP_MMC_STAT_A_FULL) || 751 if ((status & OMAP_MMC_STAT_A_FULL) ||
755 (status & OMAP_MMC_STAT_END_OF_DATA)) 752 (status & OMAP_MMC_STAT_END_OF_DATA))
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 38adc330c007..54bfd0cc106b 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -35,7 +35,6 @@
35#include <linux/mmc/core.h> 35#include <linux/mmc/core.h>
36#include <linux/mmc/mmc.h> 36#include <linux/mmc/mmc.h>
37#include <linux/io.h> 37#include <linux/io.h>
38#include <linux/semaphore.h>
39#include <linux/gpio.h> 38#include <linux/gpio.h>
40#include <linux/regulator/consumer.h> 39#include <linux/regulator/consumer.h>
41#include <linux/pm_runtime.h> 40#include <linux/pm_runtime.h>
@@ -44,7 +43,6 @@
44#include <plat/cpu.h> 43#include <plat/cpu.h>
45 44
46/* OMAP HSMMC Host Controller Registers */ 45/* OMAP HSMMC Host Controller Registers */
47#define OMAP_HSMMC_SYSCONFIG 0x0010
48#define OMAP_HSMMC_SYSSTATUS 0x0014 46#define OMAP_HSMMC_SYSSTATUS 0x0014
49#define OMAP_HSMMC_CON 0x002C 47#define OMAP_HSMMC_CON 0x002C
50#define OMAP_HSMMC_BLK 0x0104 48#define OMAP_HSMMC_BLK 0x0104
@@ -161,8 +159,6 @@ struct omap_hsmmc_host {
161 unsigned int dma_sg_idx; 159 unsigned int dma_sg_idx;
162 unsigned char bus_mode; 160 unsigned char bus_mode;
163 unsigned char power_mode; 161 unsigned char power_mode;
164 u32 *buffer;
165 u32 bytesleft;
166 int suspended; 162 int suspended;
167 int irq; 163 int irq;
168 int use_dma, dma_ch; 164 int use_dma, dma_ch;
@@ -171,7 +167,6 @@ struct omap_hsmmc_host {
171 int slot_id; 167 int slot_id;
172 int response_busy; 168 int response_busy;
173 int context_loss; 169 int context_loss;
174 int vdd;
175 int protect_card; 170 int protect_card;
176 int reqs_blocked; 171 int reqs_blocked;
177 int use_reg; 172 int use_reg;
@@ -300,12 +295,12 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
300 struct regulator *reg; 295 struct regulator *reg;
301 int ocr_value = 0; 296 int ocr_value = 0;
302 297
303 mmc_slot(host).set_power = omap_hsmmc_set_power;
304
305 reg = regulator_get(host->dev, "vmmc"); 298 reg = regulator_get(host->dev, "vmmc");
306 if (IS_ERR(reg)) { 299 if (IS_ERR(reg)) {
307 dev_dbg(host->dev, "vmmc regulator missing\n"); 300 dev_dbg(host->dev, "vmmc regulator missing\n");
301 return PTR_ERR(reg);
308 } else { 302 } else {
303 mmc_slot(host).set_power = omap_hsmmc_set_power;
309 host->vcc = reg; 304 host->vcc = reg;
310 ocr_value = mmc_regulator_get_ocrmask(reg); 305 ocr_value = mmc_regulator_get_ocrmask(reg);
311 if (!mmc_slot(host).ocr_mask) { 306 if (!mmc_slot(host).ocr_mask) {
@@ -495,7 +490,7 @@ static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host)
495 unsigned long regval; 490 unsigned long regval;
496 unsigned long timeout; 491 unsigned long timeout;
497 492
498 dev_dbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock); 493 dev_vdbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock);
499 494
500 omap_hsmmc_stop_clock(host); 495 omap_hsmmc_stop_clock(host);
501 496
@@ -579,21 +574,8 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
579 if (host->context_loss == context_loss) 574 if (host->context_loss == context_loss)
580 return 1; 575 return 1;
581 576
582 /* Wait for hardware reset */ 577 if (!OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE)
583 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); 578 return 1;
584 while ((OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) != RESETDONE
585 && time_before(jiffies, timeout))
586 ;
587
588 /* Do software reset */
589 OMAP_HSMMC_WRITE(host->base, SYSCONFIG, SOFTRESET);
590 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
591 while ((OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) != RESETDONE
592 && time_before(jiffies, timeout))
593 ;
594
595 OMAP_HSMMC_WRITE(host->base, SYSCONFIG,
596 OMAP_HSMMC_READ(host->base, SYSCONFIG) | AUTOIDLE);
597 579
598 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { 580 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
599 if (host->power_mode != MMC_POWER_OFF && 581 if (host->power_mode != MMC_POWER_OFF &&
@@ -745,7 +727,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
745{ 727{
746 int cmdreg = 0, resptype = 0, cmdtype = 0; 728 int cmdreg = 0, resptype = 0, cmdtype = 0;
747 729
748 dev_dbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n", 730 dev_vdbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n",
749 mmc_hostname(host->mmc), cmd->opcode, cmd->arg); 731 mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
750 host->cmd = cmd; 732 host->cmd = cmd;
751 733
@@ -934,7 +916,7 @@ static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status)
934 buf += len; 916 buf += len;
935 } 917 }
936 918
937 dev_dbg(mmc_dev(host->mmc), "%s\n", res); 919 dev_vdbg(mmc_dev(host->mmc), "%s\n", res);
938} 920}
939#else 921#else
940static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, 922static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host,
@@ -981,72 +963,40 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
981 __func__); 963 __func__);
982} 964}
983 965
966static void hsmmc_command_incomplete(struct omap_hsmmc_host *host, int err)
967{
968 omap_hsmmc_reset_controller_fsm(host, SRC);
969 host->cmd->error = err;
970
971 if (host->data) {
972 omap_hsmmc_reset_controller_fsm(host, SRD);
973 omap_hsmmc_dma_cleanup(host, err);
974 }
975
976}
977
984static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status) 978static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
985{ 979{
986 struct mmc_data *data; 980 struct mmc_data *data;
987 int end_cmd = 0, end_trans = 0; 981 int end_cmd = 0, end_trans = 0;
988 982
989 if (!host->req_in_progress) {
990 do {
991 OMAP_HSMMC_WRITE(host->base, STAT, status);
992 /* Flush posted write */
993 status = OMAP_HSMMC_READ(host->base, STAT);
994 } while (status & INT_EN_MASK);
995 return;
996 }
997
998 data = host->data; 983 data = host->data;
999 dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); 984 dev_vdbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
1000 985
1001 if (status & ERR) { 986 if (status & ERR) {
1002 omap_hsmmc_dbg_report_irq(host, status); 987 omap_hsmmc_dbg_report_irq(host, status);
1003 if ((status & CMD_TIMEOUT) || 988 if (status & (CMD_TIMEOUT | DATA_TIMEOUT))
1004 (status & CMD_CRC)) { 989 hsmmc_command_incomplete(host, -ETIMEDOUT);
1005 if (host->cmd) { 990 else if (status & (CMD_CRC | DATA_CRC))
1006 if (status & CMD_TIMEOUT) { 991 hsmmc_command_incomplete(host, -EILSEQ);
1007 omap_hsmmc_reset_controller_fsm(host, 992
1008 SRC); 993 end_cmd = 1;
1009 host->cmd->error = -ETIMEDOUT; 994 if (host->data || host->response_busy) {
1010 } else { 995 end_trans = 1;
1011 host->cmd->error = -EILSEQ; 996 host->response_busy = 0;
1012 }
1013 end_cmd = 1;
1014 }
1015 if (host->data || host->response_busy) {
1016 if (host->data)
1017 omap_hsmmc_dma_cleanup(host,
1018 -ETIMEDOUT);
1019 host->response_busy = 0;
1020 omap_hsmmc_reset_controller_fsm(host, SRD);
1021 }
1022 }
1023 if ((status & DATA_TIMEOUT) ||
1024 (status & DATA_CRC)) {
1025 if (host->data || host->response_busy) {
1026 int err = (status & DATA_TIMEOUT) ?
1027 -ETIMEDOUT : -EILSEQ;
1028
1029 if (host->data)
1030 omap_hsmmc_dma_cleanup(host, err);
1031 else
1032 host->mrq->cmd->error = err;
1033 host->response_busy = 0;
1034 omap_hsmmc_reset_controller_fsm(host, SRD);
1035 end_trans = 1;
1036 }
1037 }
1038 if (status & CARD_ERR) {
1039 dev_dbg(mmc_dev(host->mmc),
1040 "Ignoring card err CMD%d\n", host->cmd->opcode);
1041 if (host->cmd)
1042 end_cmd = 1;
1043 if (host->data)
1044 end_trans = 1;
1045 } 997 }
1046 } 998 }
1047 999
1048 OMAP_HSMMC_WRITE(host->base, STAT, status);
1049
1050 if (end_cmd || ((status & CC) && host->cmd)) 1000 if (end_cmd || ((status & CC) && host->cmd))
1051 omap_hsmmc_cmd_done(host, host->cmd); 1001 omap_hsmmc_cmd_done(host, host->cmd);
1052 if ((end_trans || (status & TC)) && host->mrq) 1002 if ((end_trans || (status & TC)) && host->mrq)
@@ -1062,11 +1012,13 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
1062 int status; 1012 int status;
1063 1013
1064 status = OMAP_HSMMC_READ(host->base, STAT); 1014 status = OMAP_HSMMC_READ(host->base, STAT);
1065 do { 1015 while (status & INT_EN_MASK && host->req_in_progress) {
1066 omap_hsmmc_do_irq(host, status); 1016 omap_hsmmc_do_irq(host, status);
1017
1067 /* Flush posted write */ 1018 /* Flush posted write */
1019 OMAP_HSMMC_WRITE(host->base, STAT, status);
1068 status = OMAP_HSMMC_READ(host->base, STAT); 1020 status = OMAP_HSMMC_READ(host->base, STAT);
1069 } while (status & INT_EN_MASK); 1021 }
1070 1022
1071 return IRQ_HANDLED; 1023 return IRQ_HANDLED;
1072} 1024}
@@ -1501,12 +1453,10 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1501 case MMC_POWER_OFF: 1453 case MMC_POWER_OFF:
1502 mmc_slot(host).set_power(host->dev, host->slot_id, 1454 mmc_slot(host).set_power(host->dev, host->slot_id,
1503 0, 0); 1455 0, 0);
1504 host->vdd = 0;
1505 break; 1456 break;
1506 case MMC_POWER_UP: 1457 case MMC_POWER_UP:
1507 mmc_slot(host).set_power(host->dev, host->slot_id, 1458 mmc_slot(host).set_power(host->dev, host->slot_id,
1508 1, ios->vdd); 1459 1, ios->vdd);
1509 host->vdd = ios->vdd;
1510 break; 1460 break;
1511 case MMC_POWER_ON: 1461 case MMC_POWER_ON:
1512 do_send_init_stream = 1; 1462 do_send_init_stream = 1;
@@ -1598,10 +1548,6 @@ static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
1598 value = OMAP_HSMMC_READ(host->base, CAPA); 1548 value = OMAP_HSMMC_READ(host->base, CAPA);
1599 OMAP_HSMMC_WRITE(host->base, CAPA, value | capa); 1549 OMAP_HSMMC_WRITE(host->base, CAPA, value | capa);
1600 1550
1601 /* Set the controller to AUTO IDLE mode */
1602 value = OMAP_HSMMC_READ(host->base, SYSCONFIG);
1603 OMAP_HSMMC_WRITE(host->base, SYSCONFIG, value | AUTOIDLE);
1604
1605 /* Set SD bus power bit */ 1551 /* Set SD bus power bit */
1606 set_sd_bus_power(host); 1552 set_sd_bus_power(host);
1607} 1553}
@@ -1659,8 +1605,6 @@ static int omap_hsmmc_regs_show(struct seq_file *s, void *data)
1659 1605
1660 pm_runtime_get_sync(host->dev); 1606 pm_runtime_get_sync(host->dev);
1661 1607
1662 seq_printf(s, "SYSCONFIG:\t0x%08x\n",
1663 OMAP_HSMMC_READ(host->base, SYSCONFIG));
1664 seq_printf(s, "CON:\t\t0x%08x\n", 1608 seq_printf(s, "CON:\t\t0x%08x\n",
1665 OMAP_HSMMC_READ(host->base, CON)); 1609 OMAP_HSMMC_READ(host->base, CON));
1666 seq_printf(s, "HCTL:\t\t0x%08x\n", 1610 seq_printf(s, "HCTL:\t\t0x%08x\n",
@@ -2105,8 +2049,7 @@ static int omap_hsmmc_suspend(struct device *dev)
2105 if (ret) { 2049 if (ret) {
2106 host->suspended = 0; 2050 host->suspended = 0;
2107 if (host->pdata->resume) { 2051 if (host->pdata->resume) {
2108 ret = host->pdata->resume(dev, host->slot_id); 2052 if (host->pdata->resume(dev, host->slot_id))
2109 if (ret)
2110 dev_dbg(dev, "Unmask interrupt failed\n"); 2053 dev_dbg(dev, "Unmask interrupt failed\n");
2111 } 2054 }
2112 goto err; 2055 goto err;
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index ca3915dac03d..3f9d6d577a91 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -30,6 +30,9 @@
30#include <linux/regulator/consumer.h> 30#include <linux/regulator/consumer.h>
31#include <linux/gpio.h> 31#include <linux/gpio.h>
32#include <linux/gfp.h> 32#include <linux/gfp.h>
33#include <linux/of.h>
34#include <linux/of_gpio.h>
35#include <linux/of_device.h>
33 36
34#include <asm/sizes.h> 37#include <asm/sizes.h>
35 38
@@ -573,6 +576,50 @@ static irqreturn_t pxamci_detect_irq(int irq, void *devid)
573 return IRQ_HANDLED; 576 return IRQ_HANDLED;
574} 577}
575 578
579#ifdef CONFIG_OF
580static const struct of_device_id pxa_mmc_dt_ids[] = {
581 { .compatible = "marvell,pxa-mmc" },
582 { }
583};
584
585MODULE_DEVICE_TABLE(of, pxa_mmc_dt_ids);
586
587static int __devinit pxamci_of_init(struct platform_device *pdev)
588{
589 struct device_node *np = pdev->dev.of_node;
590 struct pxamci_platform_data *pdata;
591 u32 tmp;
592
593 if (!np)
594 return 0;
595
596 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
597 if (!pdata)
598 return -ENOMEM;
599
600 pdata->gpio_card_detect =
601 of_get_named_gpio(np, "cd-gpios", 0);
602 pdata->gpio_card_ro =
603 of_get_named_gpio(np, "wp-gpios", 0);
604
605 /* pxa-mmc specific */
606 pdata->gpio_power =
607 of_get_named_gpio(np, "pxa-mmc,gpio-power", 0);
608
609 if (of_property_read_u32(np, "pxa-mmc,detect-delay-ms", &tmp) == 0)
610 pdata->detect_delay_ms = tmp;
611
612 pdev->dev.platform_data = pdata;
613
614 return 0;
615}
616#else
617static int __devinit pxamci_of_init(struct platform_device *pdev)
618{
619 return 0;
620}
621#endif
622
576static int pxamci_probe(struct platform_device *pdev) 623static int pxamci_probe(struct platform_device *pdev)
577{ 624{
578 struct mmc_host *mmc; 625 struct mmc_host *mmc;
@@ -580,6 +627,10 @@ static int pxamci_probe(struct platform_device *pdev)
580 struct resource *r, *dmarx, *dmatx; 627 struct resource *r, *dmarx, *dmatx;
581 int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1; 628 int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
582 629
630 ret = pxamci_of_init(pdev);
631 if (ret)
632 return ret;
633
583 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 634 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
584 irq = platform_get_irq(pdev, 0); 635 irq = platform_get_irq(pdev, 0);
585 if (!r || irq < 0) 636 if (!r || irq < 0)
@@ -866,6 +917,7 @@ static struct platform_driver pxamci_driver = {
866 .driver = { 917 .driver = {
867 .name = DRIVER_NAME, 918 .name = DRIVER_NAME,
868 .owner = THIS_MODULE, 919 .owner = THIS_MODULE,
920 .of_match_table = of_match_ptr(pxa_mmc_dt_ids),
869#ifdef CONFIG_PM 921#ifdef CONFIG_PM
870 .pm = &pxamci_pm_ops, 922 .pm = &pxamci_pm_ops,
871#endif 923#endif
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index a6e53a1ebb08..90140eb03e36 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -24,6 +24,7 @@
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/mmc/host.h> 26#include <linux/mmc/host.h>
27#include <linux/of.h>
27 28
28#include "sdhci-pltfm.h" 29#include "sdhci-pltfm.h"
29 30
@@ -126,11 +127,18 @@ static int __devexit sdhci_dove_remove(struct platform_device *pdev)
126 return sdhci_pltfm_unregister(pdev); 127 return sdhci_pltfm_unregister(pdev);
127} 128}
128 129
130static const struct of_device_id sdhci_dove_of_match_table[] __devinitdata = {
131 { .compatible = "marvell,dove-sdhci", },
132 {}
133};
134MODULE_DEVICE_TABLE(of, sdhci_dove_of_match_table);
135
129static struct platform_driver sdhci_dove_driver = { 136static struct platform_driver sdhci_dove_driver = {
130 .driver = { 137 .driver = {
131 .name = "sdhci-dove", 138 .name = "sdhci-dove",
132 .owner = THIS_MODULE, 139 .owner = THIS_MODULE,
133 .pm = SDHCI_PLTFM_PMOPS, 140 .pm = SDHCI_PLTFM_PMOPS,
141 .of_match_table = of_match_ptr(sdhci_dove_of_match_table),
134 }, 142 },
135 .probe = sdhci_dove_probe, 143 .probe = sdhci_dove_probe,
136 .remove = __devexit_p(sdhci_dove_remove), 144 .remove = __devexit_p(sdhci_dove_remove),
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index f8eb1fb0c921..ae5fcbfa1eef 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -21,6 +21,32 @@
21#include "sdhci-pltfm.h" 21#include "sdhci-pltfm.h"
22#include "sdhci-esdhc.h" 22#include "sdhci-esdhc.h"
23 23
24#define VENDOR_V_22 0x12
25static u32 esdhc_readl(struct sdhci_host *host, int reg)
26{
27 u32 ret;
28
29 ret = in_be32(host->ioaddr + reg);
30 /*
31 * The bit of ADMA flag in eSDHC is not compatible with standard
32 * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
33 * supported by eSDHC.
34 * And for many FSL eSDHC controller, the reset value of field
35 * SDHCI_CAN_DO_ADMA1 is one, but some of them can't support ADMA,
36 * only these vendor version is greater than 2.2/0x12 support ADMA.
37 * For FSL eSDHC, must aligned 4-byte, so use 0xFC to read the
38 * the verdor version number, oxFE is SDHCI_HOST_VERSION.
39 */
40 if ((reg == SDHCI_CAPABILITIES) && (ret & SDHCI_CAN_DO_ADMA1)) {
41 u32 tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
42 tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
43 if (tmp > VENDOR_V_22)
44 ret |= SDHCI_CAN_DO_ADMA2;
45 }
46
47 return ret;
48}
49
24static u16 esdhc_readw(struct sdhci_host *host, int reg) 50static u16 esdhc_readw(struct sdhci_host *host, int reg)
25{ 51{
26 u16 ret; 52 u16 ret;
@@ -144,7 +170,7 @@ static void esdhc_of_resume(struct sdhci_host *host)
144#endif 170#endif
145 171
146static struct sdhci_ops sdhci_esdhc_ops = { 172static struct sdhci_ops sdhci_esdhc_ops = {
147 .read_l = sdhci_be32bs_readl, 173 .read_l = esdhc_readl,
148 .read_w = esdhc_readw, 174 .read_w = esdhc_readw,
149 .read_b = esdhc_readb, 175 .read_b = esdhc_readb,
150 .write_l = sdhci_be32bs_writel, 176 .write_l = sdhci_be32bs_writel,
@@ -161,9 +187,13 @@ static struct sdhci_ops sdhci_esdhc_ops = {
161}; 187};
162 188
163static struct sdhci_pltfm_data sdhci_esdhc_pdata = { 189static struct sdhci_pltfm_data sdhci_esdhc_pdata = {
164 /* card detection could be handled via GPIO */ 190 /*
191 * card detection could be handled via GPIO
192 * eSDHC cannot support End Attribute in NOP ADMA descriptor
193 */
165 .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION 194 .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
166 | SDHCI_QUIRK_NO_CARD_NO_RESET, 195 | SDHCI_QUIRK_NO_CARD_NO_RESET
196 | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
167 .ops = &sdhci_esdhc_ops, 197 .ops = &sdhci_esdhc_ops,
168}; 198};
169 199
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 9722d43d6140..4bb74b042a06 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -1476,24 +1476,7 @@ static struct pci_driver sdhci_driver = {
1476 }, 1476 },
1477}; 1477};
1478 1478
1479/*****************************************************************************\ 1479module_pci_driver(sdhci_driver);
1480 * *
1481 * Driver init/exit *
1482 * *
1483\*****************************************************************************/
1484
1485static int __init sdhci_drv_init(void)
1486{
1487 return pci_register_driver(&sdhci_driver);
1488}
1489
1490static void __exit sdhci_drv_exit(void)
1491{
1492 pci_unregister_driver(&sdhci_driver);
1493}
1494
1495module_init(sdhci_drv_init);
1496module_exit(sdhci_drv_exit);
1497 1480
1498MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 1481MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
1499MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver"); 1482MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver");
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index d9a4ef4f1ed0..65551a9709cc 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -75,6 +75,9 @@ void sdhci_get_of_property(struct platform_device *pdev)
75 if (sdhci_of_wp_inverted(np)) 75 if (sdhci_of_wp_inverted(np))
76 host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT; 76 host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
77 77
78 if (of_get_property(np, "broken-cd", NULL))
79 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
80
78 if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc")) 81 if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc"))
79 host->quirks |= SDHCI_QUIRK_BROKEN_DMA; 82 host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
80 83
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index b6ee8857e226..8e63a9c04e31 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -197,7 +197,7 @@ static int __devinit sdhci_pxav2_probe(struct platform_device *pdev)
197 goto err_clk_get; 197 goto err_clk_get;
198 } 198 }
199 pltfm_host->clk = clk; 199 pltfm_host->clk = clk;
200 clk_enable(clk); 200 clk_prepare_enable(clk);
201 201
202 host->quirks = SDHCI_QUIRK_BROKEN_ADMA 202 host->quirks = SDHCI_QUIRK_BROKEN_ADMA
203 | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL 203 | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
@@ -239,7 +239,7 @@ static int __devinit sdhci_pxav2_probe(struct platform_device *pdev)
239 return 0; 239 return 0;
240 240
241err_add_host: 241err_add_host:
242 clk_disable(clk); 242 clk_disable_unprepare(clk);
243 clk_put(clk); 243 clk_put(clk);
244err_clk_get: 244err_clk_get:
245 sdhci_pltfm_free(pdev); 245 sdhci_pltfm_free(pdev);
@@ -255,7 +255,7 @@ static int __devexit sdhci_pxav2_remove(struct platform_device *pdev)
255 255
256 sdhci_remove_host(host, 1); 256 sdhci_remove_host(host, 1);
257 257
258 clk_disable(pltfm_host->clk); 258 clk_disable_unprepare(pltfm_host->clk);
259 clk_put(pltfm_host->clk); 259 clk_put(pltfm_host->clk);
260 sdhci_pltfm_free(pdev); 260 sdhci_pltfm_free(pdev);
261 kfree(pxa); 261 kfree(pxa);
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 07fe3834fe0b..e918a2bb3af1 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -24,12 +24,14 @@
24#include <linux/gpio.h> 24#include <linux/gpio.h>
25#include <linux/mmc/card.h> 25#include <linux/mmc/card.h>
26#include <linux/mmc/host.h> 26#include <linux/mmc/host.h>
27#include <linux/mmc/slot-gpio.h>
27#include <linux/platform_data/pxa_sdhci.h> 28#include <linux/platform_data/pxa_sdhci.h>
28#include <linux/slab.h> 29#include <linux/slab.h>
29#include <linux/delay.h> 30#include <linux/delay.h>
30#include <linux/module.h> 31#include <linux/module.h>
31#include <linux/of.h> 32#include <linux/of.h>
32#include <linux/of_device.h> 33#include <linux/of_device.h>
34#include <linux/of_gpio.h>
33 35
34#include "sdhci.h" 36#include "sdhci.h"
35#include "sdhci-pltfm.h" 37#include "sdhci-pltfm.h"
@@ -182,6 +184,7 @@ static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
182 struct device_node *np = dev->of_node; 184 struct device_node *np = dev->of_node;
183 u32 bus_width; 185 u32 bus_width;
184 u32 clk_delay_cycles; 186 u32 clk_delay_cycles;
187 enum of_gpio_flags gpio_flags;
185 188
186 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 189 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
187 if (!pdata) 190 if (!pdata)
@@ -198,6 +201,10 @@ static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
198 if (clk_delay_cycles > 0) 201 if (clk_delay_cycles > 0)
199 pdata->clk_delay_cycles = clk_delay_cycles; 202 pdata->clk_delay_cycles = clk_delay_cycles;
200 203
204 pdata->ext_cd_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &gpio_flags);
205 if (gpio_flags != OF_GPIO_ACTIVE_LOW)
206 pdata->host_caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
207
201 return pdata; 208 return pdata;
202} 209}
203#else 210#else
@@ -231,14 +238,14 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
231 pltfm_host = sdhci_priv(host); 238 pltfm_host = sdhci_priv(host);
232 pltfm_host->priv = pxa; 239 pltfm_host->priv = pxa;
233 240
234 clk = clk_get(dev, "PXA-SDHCLK"); 241 clk = clk_get(dev, NULL);
235 if (IS_ERR(clk)) { 242 if (IS_ERR(clk)) {
236 dev_err(dev, "failed to get io clock\n"); 243 dev_err(dev, "failed to get io clock\n");
237 ret = PTR_ERR(clk); 244 ret = PTR_ERR(clk);
238 goto err_clk_get; 245 goto err_clk_get;
239 } 246 }
240 pltfm_host->clk = clk; 247 pltfm_host->clk = clk;
241 clk_enable(clk); 248 clk_prepare_enable(clk);
242 249
243 host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL 250 host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
244 | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC 251 | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
@@ -266,12 +273,25 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
266 host->quirks |= pdata->quirks; 273 host->quirks |= pdata->quirks;
267 if (pdata->host_caps) 274 if (pdata->host_caps)
268 host->mmc->caps |= pdata->host_caps; 275 host->mmc->caps |= pdata->host_caps;
276 if (pdata->host_caps2)
277 host->mmc->caps2 |= pdata->host_caps2;
269 if (pdata->pm_caps) 278 if (pdata->pm_caps)
270 host->mmc->pm_caps |= pdata->pm_caps; 279 host->mmc->pm_caps |= pdata->pm_caps;
280
281 if (gpio_is_valid(pdata->ext_cd_gpio)) {
282 ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio);
283 if (ret) {
284 dev_err(mmc_dev(host->mmc),
285 "failed to allocate card detect gpio\n");
286 goto err_cd_req;
287 }
288 }
271 } 289 }
272 290
273 host->ops = &pxav3_sdhci_ops; 291 host->ops = &pxav3_sdhci_ops;
274 292
293 sdhci_get_of_property(pdev);
294
275 ret = sdhci_add_host(host); 295 ret = sdhci_add_host(host);
276 if (ret) { 296 if (ret) {
277 dev_err(&pdev->dev, "failed to add host\n"); 297 dev_err(&pdev->dev, "failed to add host\n");
@@ -283,8 +303,10 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
283 return 0; 303 return 0;
284 304
285err_add_host: 305err_add_host:
286 clk_disable(clk); 306 clk_disable_unprepare(clk);
287 clk_put(clk); 307 clk_put(clk);
308 mmc_gpio_free_cd(host->mmc);
309err_cd_req:
288err_clk_get: 310err_clk_get:
289 sdhci_pltfm_free(pdev); 311 sdhci_pltfm_free(pdev);
290 kfree(pxa); 312 kfree(pxa);
@@ -296,11 +318,16 @@ static int __devexit sdhci_pxav3_remove(struct platform_device *pdev)
296 struct sdhci_host *host = platform_get_drvdata(pdev); 318 struct sdhci_host *host = platform_get_drvdata(pdev);
297 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 319 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
298 struct sdhci_pxa *pxa = pltfm_host->priv; 320 struct sdhci_pxa *pxa = pltfm_host->priv;
321 struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
299 322
300 sdhci_remove_host(host, 1); 323 sdhci_remove_host(host, 1);
301 324
302 clk_disable(pltfm_host->clk); 325 clk_disable_unprepare(pltfm_host->clk);
303 clk_put(pltfm_host->clk); 326 clk_put(pltfm_host->clk);
327
328 if (gpio_is_valid(pdata->ext_cd_gpio))
329 mmc_gpio_free_cd(host->mmc);
330
304 sdhci_pltfm_free(pdev); 331 sdhci_pltfm_free(pdev);
305 kfree(pxa); 332 kfree(pxa);
306 333
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index a50c205ea208..2903949594c6 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -34,6 +34,9 @@
34 34
35#define MAX_BUS_CLK (4) 35#define MAX_BUS_CLK (4)
36 36
37/* Number of gpio's used is max data bus width + command and clock lines */
38#define NUM_GPIOS(x) (x + 2)
39
37/** 40/**
38 * struct sdhci_s3c - S3C SDHCI instance 41 * struct sdhci_s3c - S3C SDHCI instance
39 * @host: The SDHCI host created 42 * @host: The SDHCI host created
@@ -41,6 +44,7 @@
41 * @ioarea: The resource created when we claimed the IO area. 44 * @ioarea: The resource created when we claimed the IO area.
42 * @pdata: The platform data for this controller. 45 * @pdata: The platform data for this controller.
43 * @cur_clk: The index of the current bus clock. 46 * @cur_clk: The index of the current bus clock.
47 * @gpios: List of gpio numbers parsed from device tree.
44 * @clk_io: The clock for the internal bus interface. 48 * @clk_io: The clock for the internal bus interface.
45 * @clk_bus: The clocks that are available for the SD/MMC bus clock. 49 * @clk_bus: The clocks that are available for the SD/MMC bus clock.
46 */ 50 */
@@ -52,6 +56,7 @@ struct sdhci_s3c {
52 unsigned int cur_clk; 56 unsigned int cur_clk;
53 int ext_cd_irq; 57 int ext_cd_irq;
54 int ext_cd_gpio; 58 int ext_cd_gpio;
59 int *gpios;
55 60
56 struct clk *clk_io; 61 struct clk *clk_io;
57 struct clk *clk_bus[MAX_BUS_CLK]; 62 struct clk *clk_bus[MAX_BUS_CLK];
@@ -166,7 +171,7 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
166 dev_dbg(&ourhost->pdev->dev, "clk %d: rate %ld, want %d, got %ld\n", 171 dev_dbg(&ourhost->pdev->dev, "clk %d: rate %ld, want %d, got %ld\n",
167 src, rate, wanted, rate / div); 172 src, rate, wanted, rate / div);
168 173
169 return (wanted - (rate / div)); 174 return wanted - (rate / div);
170} 175}
171 176
172/** 177/**
@@ -203,10 +208,12 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)
203 best_src, clock, best); 208 best_src, clock, best);
204 209
205 /* select the new clock source */ 210 /* select the new clock source */
206
207 if (ourhost->cur_clk != best_src) { 211 if (ourhost->cur_clk != best_src) {
208 struct clk *clk = ourhost->clk_bus[best_src]; 212 struct clk *clk = ourhost->clk_bus[best_src];
209 213
214 clk_enable(clk);
215 clk_disable(ourhost->clk_bus[ourhost->cur_clk]);
216
210 /* turn clock off to card before changing clock source */ 217 /* turn clock off to card before changing clock source */
211 writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL); 218 writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
212 219
@@ -288,6 +295,7 @@ static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host)
288static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock) 295static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
289{ 296{
290 struct sdhci_s3c *ourhost = to_s3c(host); 297 struct sdhci_s3c *ourhost = to_s3c(host);
298 struct device *dev = &ourhost->pdev->dev;
291 unsigned long timeout; 299 unsigned long timeout;
292 u16 clk = 0; 300 u16 clk = 0;
293 301
@@ -309,8 +317,8 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
309 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) 317 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
310 & SDHCI_CLOCK_INT_STABLE)) { 318 & SDHCI_CLOCK_INT_STABLE)) {
311 if (timeout == 0) { 319 if (timeout == 0) {
312 printk(KERN_ERR "%s: Internal clock never " 320 dev_err(dev, "%s: Internal clock never stabilised.\n",
313 "stabilised.\n", mmc_hostname(host->mmc)); 321 mmc_hostname(host->mmc));
314 return; 322 return;
315 } 323 }
316 timeout--; 324 timeout--;
@@ -404,7 +412,9 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc)
404 if (sc->ext_cd_irq && 412 if (sc->ext_cd_irq &&
405 request_threaded_irq(sc->ext_cd_irq, NULL, 413 request_threaded_irq(sc->ext_cd_irq, NULL,
406 sdhci_s3c_gpio_card_detect_thread, 414 sdhci_s3c_gpio_card_detect_thread,
407 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 415 IRQF_TRIGGER_RISING |
416 IRQF_TRIGGER_FALLING |
417 IRQF_ONESHOT,
408 dev_name(dev), sc) == 0) { 418 dev_name(dev), sc) == 0) {
409 int status = gpio_get_value(sc->ext_cd_gpio); 419 int status = gpio_get_value(sc->ext_cd_gpio);
410 if (pdata->ext_cd_gpio_invert) 420 if (pdata->ext_cd_gpio_invert)
@@ -419,9 +429,121 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc)
419 } 429 }
420} 430}
421 431
432#ifdef CONFIG_OF
433static int __devinit sdhci_s3c_parse_dt(struct device *dev,
434 struct sdhci_host *host, struct s3c_sdhci_platdata *pdata)
435{
436 struct device_node *node = dev->of_node;
437 struct sdhci_s3c *ourhost = to_s3c(host);
438 u32 max_width;
439 int gpio, cnt, ret;
440
441 /* if the bus-width property is not specified, assume width as 1 */
442 if (of_property_read_u32(node, "bus-width", &max_width))
443 max_width = 1;
444 pdata->max_width = max_width;
445
446 ourhost->gpios = devm_kzalloc(dev, NUM_GPIOS(pdata->max_width) *
447 sizeof(int), GFP_KERNEL);
448 if (!ourhost->gpios)
449 return -ENOMEM;
450
451 /* get the card detection method */
452 if (of_get_property(node, "broken-cd", 0)) {
453 pdata->cd_type = S3C_SDHCI_CD_NONE;
454 goto setup_bus;
455 }
456
457 if (of_get_property(node, "non-removable", 0)) {
458 pdata->cd_type = S3C_SDHCI_CD_PERMANENT;
459 goto setup_bus;
460 }
461
462 gpio = of_get_named_gpio(node, "cd-gpios", 0);
463 if (gpio_is_valid(gpio)) {
464 pdata->cd_type = S3C_SDHCI_CD_GPIO;
465 goto found_cd;
466 } else if (gpio != -ENOENT) {
467 dev_err(dev, "invalid card detect gpio specified\n");
468 return -EINVAL;
469 }
470
471 gpio = of_get_named_gpio(node, "samsung,cd-pinmux-gpio", 0);
472 if (gpio_is_valid(gpio)) {
473 pdata->cd_type = S3C_SDHCI_CD_INTERNAL;
474 goto found_cd;
475 } else if (gpio != -ENOENT) {
476 dev_err(dev, "invalid card detect gpio specified\n");
477 return -EINVAL;
478 }
479
480 dev_info(dev, "assuming no card detect line available\n");
481 pdata->cd_type = S3C_SDHCI_CD_NONE;
482
483 found_cd:
484 if (pdata->cd_type == S3C_SDHCI_CD_GPIO) {
485 pdata->ext_cd_gpio = gpio;
486 ourhost->ext_cd_gpio = -1;
487 if (of_get_property(node, "cd-inverted", NULL))
488 pdata->ext_cd_gpio_invert = 1;
489 } else if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
490 ret = gpio_request(gpio, "sdhci-cd");
491 if (ret) {
492 dev_err(dev, "card detect gpio request failed\n");
493 return -EINVAL;
494 }
495 ourhost->ext_cd_gpio = gpio;
496 }
497
498 setup_bus:
499 /* get the gpios for command, clock and data lines */
500 for (cnt = 0; cnt < NUM_GPIOS(pdata->max_width); cnt++) {
501 gpio = of_get_gpio(node, cnt);
502 if (!gpio_is_valid(gpio)) {
503 dev_err(dev, "invalid gpio[%d]\n", cnt);
504 goto err_free_dt_cd_gpio;
505 }
506 ourhost->gpios[cnt] = gpio;
507 }
508
509 for (cnt = 0; cnt < NUM_GPIOS(pdata->max_width); cnt++) {
510 ret = gpio_request(ourhost->gpios[cnt], "sdhci-gpio");
511 if (ret) {
512 dev_err(dev, "gpio[%d] request failed\n", cnt);
513 goto err_free_dt_gpios;
514 }
515 }
516
517 return 0;
518
519 err_free_dt_gpios:
520 while (--cnt >= 0)
521 gpio_free(ourhost->gpios[cnt]);
522 err_free_dt_cd_gpio:
523 if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL)
524 gpio_free(ourhost->ext_cd_gpio);
525 return -EINVAL;
526}
527#else
528static int __devinit sdhci_s3c_parse_dt(struct device *dev,
529 struct sdhci_host *host, struct s3c_sdhci_platdata *pdata)
530{
531 return -EINVAL;
532}
533#endif
534
535static const struct of_device_id sdhci_s3c_dt_match[];
536
422static inline struct sdhci_s3c_drv_data *sdhci_s3c_get_driver_data( 537static inline struct sdhci_s3c_drv_data *sdhci_s3c_get_driver_data(
423 struct platform_device *pdev) 538 struct platform_device *pdev)
424{ 539{
540#ifdef CONFIG_OF
541 if (pdev->dev.of_node) {
542 const struct of_device_id *match;
543 match = of_match_node(sdhci_s3c_dt_match, pdev->dev.of_node);
544 return (struct sdhci_s3c_drv_data *)match->data;
545 }
546#endif
425 return (struct sdhci_s3c_drv_data *) 547 return (struct sdhci_s3c_drv_data *)
426 platform_get_device_id(pdev)->driver_data; 548 platform_get_device_id(pdev)->driver_data;
427} 549}
@@ -436,7 +558,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
436 struct resource *res; 558 struct resource *res;
437 int ret, irq, ptr, clks; 559 int ret, irq, ptr, clks;
438 560
439 if (!pdev->dev.platform_data) { 561 if (!pdev->dev.platform_data && !pdev->dev.of_node) {
440 dev_err(dev, "no device data specified\n"); 562 dev_err(dev, "no device data specified\n");
441 return -ENOENT; 563 return -ENOENT;
442 } 564 }
@@ -452,21 +574,28 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
452 dev_err(dev, "sdhci_alloc_host() failed\n"); 574 dev_err(dev, "sdhci_alloc_host() failed\n");
453 return PTR_ERR(host); 575 return PTR_ERR(host);
454 } 576 }
577 sc = sdhci_priv(host);
455 578
456 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 579 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
457 if (!pdata) { 580 if (!pdata) {
458 ret = -ENOMEM; 581 ret = -ENOMEM;
459 goto err_io_clk; 582 goto err_pdata;
583 }
584
585 if (pdev->dev.of_node) {
586 ret = sdhci_s3c_parse_dt(&pdev->dev, host, pdata);
587 if (ret)
588 goto err_pdata;
589 } else {
590 memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
591 sc->ext_cd_gpio = -1; /* invalid gpio number */
460 } 592 }
461 memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
462 593
463 drv_data = sdhci_s3c_get_driver_data(pdev); 594 drv_data = sdhci_s3c_get_driver_data(pdev);
464 sc = sdhci_priv(host);
465 595
466 sc->host = host; 596 sc->host = host;
467 sc->pdev = pdev; 597 sc->pdev = pdev;
468 sc->pdata = pdata; 598 sc->pdata = pdata;
469 sc->ext_cd_gpio = -1; /* invalid gpio number */
470 599
471 platform_set_drvdata(pdev, host); 600 platform_set_drvdata(pdev, host);
472 601
@@ -486,9 +615,8 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
486 615
487 snprintf(name, 14, "mmc_busclk.%d", ptr); 616 snprintf(name, 14, "mmc_busclk.%d", ptr);
488 clk = clk_get(dev, name); 617 clk = clk_get(dev, name);
489 if (IS_ERR(clk)) { 618 if (IS_ERR(clk))
490 continue; 619 continue;
491 }
492 620
493 clks++; 621 clks++;
494 sc->clk_bus[ptr] = clk; 622 sc->clk_bus[ptr] = clk;
@@ -499,8 +627,6 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
499 */ 627 */
500 sc->cur_clk = ptr; 628 sc->cur_clk = ptr;
501 629
502 clk_enable(clk);
503
504 dev_info(dev, "clock source %d: %s (%ld Hz)\n", 630 dev_info(dev, "clock source %d: %s (%ld Hz)\n",
505 ptr, name, clk_get_rate(clk)); 631 ptr, name, clk_get_rate(clk));
506 } 632 }
@@ -511,6 +637,10 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
511 goto err_no_busclks; 637 goto err_no_busclks;
512 } 638 }
513 639
640#ifndef CONFIG_PM_RUNTIME
641 clk_enable(sc->clk_bus[sc->cur_clk]);
642#endif
643
514 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 644 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
515 host->ioaddr = devm_request_and_ioremap(&pdev->dev, res); 645 host->ioaddr = devm_request_and_ioremap(&pdev->dev, res);
516 if (!host->ioaddr) { 646 if (!host->ioaddr) {
@@ -616,12 +746,17 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
616 gpio_is_valid(pdata->ext_cd_gpio)) 746 gpio_is_valid(pdata->ext_cd_gpio))
617 sdhci_s3c_setup_card_detect_gpio(sc); 747 sdhci_s3c_setup_card_detect_gpio(sc);
618 748
749#ifdef CONFIG_PM_RUNTIME
750 clk_disable(sc->clk_io);
751#endif
619 return 0; 752 return 0;
620 753
621 err_req_regs: 754 err_req_regs:
755#ifndef CONFIG_PM_RUNTIME
756 clk_disable(sc->clk_bus[sc->cur_clk]);
757#endif
622 for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) { 758 for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
623 if (sc->clk_bus[ptr]) { 759 if (sc->clk_bus[ptr]) {
624 clk_disable(sc->clk_bus[ptr]);
625 clk_put(sc->clk_bus[ptr]); 760 clk_put(sc->clk_bus[ptr]);
626 } 761 }
627 } 762 }
@@ -631,6 +766,12 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
631 clk_put(sc->clk_io); 766 clk_put(sc->clk_io);
632 767
633 err_io_clk: 768 err_io_clk:
769 for (ptr = 0; ptr < NUM_GPIOS(sc->pdata->max_width); ptr++)
770 gpio_free(sc->gpios[ptr]);
771 if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL)
772 gpio_free(sc->ext_cd_gpio);
773
774 err_pdata:
634 sdhci_free_host(host); 775 sdhci_free_host(host);
635 776
636 return ret; 777 return ret;
@@ -638,9 +779,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
638 779
639static int __devexit sdhci_s3c_remove(struct platform_device *pdev) 780static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
640{ 781{
641 struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data;
642 struct sdhci_host *host = platform_get_drvdata(pdev); 782 struct sdhci_host *host = platform_get_drvdata(pdev);
643 struct sdhci_s3c *sc = sdhci_priv(host); 783 struct sdhci_s3c *sc = sdhci_priv(host);
784 struct s3c_sdhci_platdata *pdata = sc->pdata;
644 int ptr; 785 int ptr;
645 786
646 if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_cleanup) 787 if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_cleanup)
@@ -652,19 +793,30 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
652 if (gpio_is_valid(sc->ext_cd_gpio)) 793 if (gpio_is_valid(sc->ext_cd_gpio))
653 gpio_free(sc->ext_cd_gpio); 794 gpio_free(sc->ext_cd_gpio);
654 795
796#ifdef CONFIG_PM_RUNTIME
797 clk_enable(sc->clk_io);
798#endif
655 sdhci_remove_host(host, 1); 799 sdhci_remove_host(host, 1);
656 800
801 pm_runtime_dont_use_autosuspend(&pdev->dev);
657 pm_runtime_disable(&pdev->dev); 802 pm_runtime_disable(&pdev->dev);
658 803
659 for (ptr = 0; ptr < 3; ptr++) { 804#ifndef CONFIG_PM_RUNTIME
805 clk_disable(sc->clk_bus[sc->cur_clk]);
806#endif
807 for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
660 if (sc->clk_bus[ptr]) { 808 if (sc->clk_bus[ptr]) {
661 clk_disable(sc->clk_bus[ptr]);
662 clk_put(sc->clk_bus[ptr]); 809 clk_put(sc->clk_bus[ptr]);
663 } 810 }
664 } 811 }
665 clk_disable(sc->clk_io); 812 clk_disable(sc->clk_io);
666 clk_put(sc->clk_io); 813 clk_put(sc->clk_io);
667 814
815 if (pdev->dev.of_node) {
816 for (ptr = 0; ptr < NUM_GPIOS(sc->pdata->max_width); ptr++)
817 gpio_free(sc->gpios[ptr]);
818 }
819
668 sdhci_free_host(host); 820 sdhci_free_host(host);
669 platform_set_drvdata(pdev, NULL); 821 platform_set_drvdata(pdev, NULL);
670 822
@@ -691,15 +843,28 @@ static int sdhci_s3c_resume(struct device *dev)
691static int sdhci_s3c_runtime_suspend(struct device *dev) 843static int sdhci_s3c_runtime_suspend(struct device *dev)
692{ 844{
693 struct sdhci_host *host = dev_get_drvdata(dev); 845 struct sdhci_host *host = dev_get_drvdata(dev);
846 struct sdhci_s3c *ourhost = to_s3c(host);
847 struct clk *busclk = ourhost->clk_io;
848 int ret;
849
850 ret = sdhci_runtime_suspend_host(host);
694 851
695 return sdhci_runtime_suspend_host(host); 852 clk_disable(ourhost->clk_bus[ourhost->cur_clk]);
853 clk_disable(busclk);
854 return ret;
696} 855}
697 856
698static int sdhci_s3c_runtime_resume(struct device *dev) 857static int sdhci_s3c_runtime_resume(struct device *dev)
699{ 858{
700 struct sdhci_host *host = dev_get_drvdata(dev); 859 struct sdhci_host *host = dev_get_drvdata(dev);
860 struct sdhci_s3c *ourhost = to_s3c(host);
861 struct clk *busclk = ourhost->clk_io;
862 int ret;
701 863
702 return sdhci_runtime_resume_host(host); 864 clk_enable(busclk);
865 clk_enable(ourhost->clk_bus[ourhost->cur_clk]);
866 ret = sdhci_runtime_resume_host(host);
867 return ret;
703} 868}
704#endif 869#endif
705 870
@@ -737,6 +902,16 @@ static struct platform_device_id sdhci_s3c_driver_ids[] = {
737}; 902};
738MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids); 903MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids);
739 904
905#ifdef CONFIG_OF
906static const struct of_device_id sdhci_s3c_dt_match[] = {
907 { .compatible = "samsung,s3c6410-sdhci", },
908 { .compatible = "samsung,exynos4210-sdhci",
909 .data = (void *)EXYNOS4_SDHCI_DRV_DATA },
910 {},
911};
912MODULE_DEVICE_TABLE(of, sdhci_s3c_dt_match);
913#endif
914
740static struct platform_driver sdhci_s3c_driver = { 915static struct platform_driver sdhci_s3c_driver = {
741 .probe = sdhci_s3c_probe, 916 .probe = sdhci_s3c_probe,
742 .remove = __devexit_p(sdhci_s3c_remove), 917 .remove = __devexit_p(sdhci_s3c_remove),
@@ -744,6 +919,7 @@ static struct platform_driver sdhci_s3c_driver = {
744 .driver = { 919 .driver = {
745 .owner = THIS_MODULE, 920 .owner = THIS_MODULE,
746 .name = "s3c-sdhci", 921 .name = "s3c-sdhci",
922 .of_match_table = of_match_ptr(sdhci_s3c_dt_match),
747 .pm = SDHCI_S3C_PMOPS, 923 .pm = SDHCI_S3C_PMOPS,
748 }, 924 },
749}; 925};
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 423da8194cd8..6be89c032deb 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -20,6 +20,8 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/irq.h> 22#include <linux/irq.h>
23#include <linux/of.h>
24#include <linux/of_gpio.h>
23#include <linux/platform_device.h> 25#include <linux/platform_device.h>
24#include <linux/pm.h> 26#include <linux/pm.h>
25#include <linux/slab.h> 27#include <linux/slab.h>
@@ -68,8 +70,42 @@ static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id)
68 return IRQ_HANDLED; 70 return IRQ_HANDLED;
69} 71}
70 72
73#ifdef CONFIG_OF
74static struct sdhci_plat_data * __devinit
75sdhci_probe_config_dt(struct platform_device *pdev)
76{
77 struct device_node *np = pdev->dev.of_node;
78 struct sdhci_plat_data *pdata = NULL;
79 int cd_gpio;
80
81 cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
82 if (!gpio_is_valid(cd_gpio))
83 cd_gpio = -1;
84
85 /* If pdata is required */
86 if (cd_gpio != -1) {
87 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
88 if (!pdata) {
89 dev_err(&pdev->dev, "DT: kzalloc failed\n");
90 return ERR_PTR(-ENOMEM);
91 }
92 }
93
94 pdata->card_int_gpio = cd_gpio;
95
96 return pdata;
97}
98#else
99static struct sdhci_plat_data * __devinit
100sdhci_probe_config_dt(struct platform_device *pdev)
101{
102 return ERR_PTR(-ENOSYS);
103}
104#endif
105
71static int __devinit sdhci_probe(struct platform_device *pdev) 106static int __devinit sdhci_probe(struct platform_device *pdev)
72{ 107{
108 struct device_node *np = pdev->dev.of_node;
73 struct sdhci_host *host; 109 struct sdhci_host *host;
74 struct resource *iomem; 110 struct resource *iomem;
75 struct spear_sdhci *sdhci; 111 struct spear_sdhci *sdhci;
@@ -104,14 +140,22 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
104 goto err; 140 goto err;
105 } 141 }
106 142
107 ret = clk_enable(sdhci->clk); 143 ret = clk_prepare_enable(sdhci->clk);
108 if (ret) { 144 if (ret) {
109 dev_dbg(&pdev->dev, "Error enabling clock\n"); 145 dev_dbg(&pdev->dev, "Error enabling clock\n");
110 goto put_clk; 146 goto put_clk;
111 } 147 }
112 148
113 /* overwrite platform_data */ 149 if (np) {
114 sdhci->data = dev_get_platdata(&pdev->dev); 150 sdhci->data = sdhci_probe_config_dt(pdev);
151 if (IS_ERR(sdhci->data)) {
152 dev_err(&pdev->dev, "DT: Failed to get pdata\n");
153 return -ENODEV;
154 }
155 } else {
156 sdhci->data = dev_get_platdata(&pdev->dev);
157 }
158
115 pdev->dev.platform_data = sdhci; 159 pdev->dev.platform_data = sdhci;
116 160
117 if (pdev->dev.parent) 161 if (pdev->dev.parent)
@@ -216,7 +260,7 @@ set_drvdata:
216free_host: 260free_host:
217 sdhci_free_host(host); 261 sdhci_free_host(host);
218disable_clk: 262disable_clk:
219 clk_disable(sdhci->clk); 263 clk_disable_unprepare(sdhci->clk);
220put_clk: 264put_clk:
221 clk_put(sdhci->clk); 265 clk_put(sdhci->clk);
222err: 266err:
@@ -238,7 +282,7 @@ static int __devexit sdhci_remove(struct platform_device *pdev)
238 282
239 sdhci_remove_host(host, dead); 283 sdhci_remove_host(host, dead);
240 sdhci_free_host(host); 284 sdhci_free_host(host);
241 clk_disable(sdhci->clk); 285 clk_disable_unprepare(sdhci->clk);
242 clk_put(sdhci->clk); 286 clk_put(sdhci->clk);
243 287
244 return 0; 288 return 0;
@@ -253,7 +297,7 @@ static int sdhci_suspend(struct device *dev)
253 297
254 ret = sdhci_suspend_host(host); 298 ret = sdhci_suspend_host(host);
255 if (!ret) 299 if (!ret)
256 clk_disable(sdhci->clk); 300 clk_disable_unprepare(sdhci->clk);
257 301
258 return ret; 302 return ret;
259} 303}
@@ -264,7 +308,7 @@ static int sdhci_resume(struct device *dev)
264 struct spear_sdhci *sdhci = dev_get_platdata(dev); 308 struct spear_sdhci *sdhci = dev_get_platdata(dev);
265 int ret; 309 int ret;
266 310
267 ret = clk_enable(sdhci->clk); 311 ret = clk_prepare_enable(sdhci->clk);
268 if (ret) { 312 if (ret) {
269 dev_dbg(dev, "Resume: Error enabling clock\n"); 313 dev_dbg(dev, "Resume: Error enabling clock\n");
270 return ret; 314 return ret;
@@ -276,11 +320,20 @@ static int sdhci_resume(struct device *dev)
276 320
277static SIMPLE_DEV_PM_OPS(sdhci_pm_ops, sdhci_suspend, sdhci_resume); 321static SIMPLE_DEV_PM_OPS(sdhci_pm_ops, sdhci_suspend, sdhci_resume);
278 322
323#ifdef CONFIG_OF
324static const struct of_device_id sdhci_spear_id_table[] = {
325 { .compatible = "st,spear300-sdhci" },
326 {}
327};
328MODULE_DEVICE_TABLE(of, sdhci_spear_id_table);
329#endif
330
279static struct platform_driver sdhci_driver = { 331static struct platform_driver sdhci_driver = {
280 .driver = { 332 .driver = {
281 .name = "sdhci", 333 .name = "sdhci",
282 .owner = THIS_MODULE, 334 .owner = THIS_MODULE,
283 .pm = &sdhci_pm_ops, 335 .pm = &sdhci_pm_ops,
336 .of_match_table = of_match_ptr(sdhci_spear_id_table),
284 }, 337 },
285 .probe = sdhci_probe, 338 .probe = sdhci_probe,
286 .remove = __devexit_p(sdhci_remove), 339 .remove = __devexit_p(sdhci_remove),
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index d43e7462941f..f9eb91623701 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -27,7 +27,6 @@
27 27
28#include <asm/gpio.h> 28#include <asm/gpio.h>
29 29
30#include <mach/gpio-tegra.h>
31#include <linux/platform_data/mmc-sdhci-tegra.h> 30#include <linux/platform_data/mmc-sdhci-tegra.h>
32 31
33#include "sdhci-pltfm.h" 32#include "sdhci-pltfm.h"
@@ -257,10 +256,9 @@ static int __devinit sdhci_tegra_probe(struct platform_device *pdev)
257 int rc; 256 int rc;
258 257
259 match = of_match_device(sdhci_tegra_dt_match, &pdev->dev); 258 match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
260 if (match) 259 if (!match)
261 soc_data = match->data; 260 return -EINVAL;
262 else 261 soc_data = match->data;
263 soc_data = &soc_data_tegra20;
264 262
265 host = sdhci_pltfm_init(pdev, soc_data->pdata); 263 host = sdhci_pltfm_init(pdev, soc_data->pdata);
266 if (IS_ERR(host)) 264 if (IS_ERR(host))
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9a11dc39921c..7922adb42386 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -28,6 +28,7 @@
28#include <linux/mmc/mmc.h> 28#include <linux/mmc/mmc.h>
29#include <linux/mmc/host.h> 29#include <linux/mmc/host.h>
30#include <linux/mmc/card.h> 30#include <linux/mmc/card.h>
31#include <linux/mmc/slot-gpio.h>
31 32
32#include "sdhci.h" 33#include "sdhci.h"
33 34
@@ -1293,6 +1294,13 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1293 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 1294 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
1294 SDHCI_CARD_PRESENT; 1295 SDHCI_CARD_PRESENT;
1295 1296
1297 /* If we're using a cd-gpio, testing the presence bit might fail. */
1298 if (!present) {
1299 int ret = mmc_gpio_get_cd(host->mmc);
1300 if (ret > 0)
1301 present = true;
1302 }
1303
1296 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1304 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1297 host->mrq->cmd->error = -ENOMEDIUM; 1305 host->mrq->cmd->error = -ENOMEDIUM;
1298 tasklet_schedule(&host->finish_tasklet); 1306 tasklet_schedule(&host->finish_tasklet);
@@ -1597,57 +1605,65 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1597 spin_unlock_irqrestore(&host->lock, flags); 1605 spin_unlock_irqrestore(&host->lock, flags);
1598} 1606}
1599 1607
1600static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, 1608static int sdhci_do_3_3v_signal_voltage_switch(struct sdhci_host *host,
1601 struct mmc_ios *ios) 1609 u16 ctrl)
1602{ 1610{
1603 u8 pwr; 1611 int ret;
1604 u16 clk, ctrl;
1605 u32 present_state;
1606 1612
1607 /* 1613 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1608 * Signal Voltage Switching is only applicable for Host Controllers 1614 ctrl &= ~SDHCI_CTRL_VDD_180;
1609 * v3.00 and above. 1615 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1610 */
1611 if (host->version < SDHCI_SPEC_300)
1612 return 0;
1613 1616
1614 /* 1617 if (host->vqmmc) {
1615 * We first check whether the request is to set signalling voltage 1618 ret = regulator_set_voltage(host->vqmmc, 3300000, 3300000);
1616 * to 3.3V. If so, we change the voltage to 3.3V and return quickly. 1619 if (ret) {
1617 */ 1620 pr_warning("%s: Switching to 3.3V signalling voltage "
1621 " failed\n", mmc_hostname(host->mmc));
1622 return -EIO;
1623 }
1624 }
1625 /* Wait for 5ms */
1626 usleep_range(5000, 5500);
1627
1628 /* 3.3V regulator output should be stable within 5 ms */
1618 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1629 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1619 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { 1630 if (!(ctrl & SDHCI_CTRL_VDD_180))
1620 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 1631 return 0;
1621 ctrl &= ~SDHCI_CTRL_VDD_180;
1622 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1623 1632
1624 /* Wait for 5ms */ 1633 pr_warning("%s: 3.3V regulator output did not became stable\n",
1625 usleep_range(5000, 5500); 1634 mmc_hostname(host->mmc));
1626 1635
1627 /* 3.3V regulator output should be stable within 5 ms */ 1636 return -EIO;
1628 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1637}
1629 if (!(ctrl & SDHCI_CTRL_VDD_180))
1630 return 0;
1631 else {
1632 pr_info(DRIVER_NAME ": Switching to 3.3V "
1633 "signalling voltage failed\n");
1634 return -EIO;
1635 }
1636 } else if (!(ctrl & SDHCI_CTRL_VDD_180) &&
1637 (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)) {
1638 /* Stop SDCLK */
1639 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1640 clk &= ~SDHCI_CLOCK_CARD_EN;
1641 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1642 1638
1643 /* Check whether DAT[3:0] is 0000 */ 1639static int sdhci_do_1_8v_signal_voltage_switch(struct sdhci_host *host,
1644 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 1640 u16 ctrl)
1645 if (!((present_state & SDHCI_DATA_LVL_MASK) >> 1641{
1646 SDHCI_DATA_LVL_SHIFT)) { 1642 u8 pwr;
1647 /* 1643 u16 clk;
1648 * Enable 1.8V Signal Enable in the Host Control2 1644 u32 present_state;
1649 * register 1645 int ret;
1650 */ 1646
1647 /* Stop SDCLK */
1648 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1649 clk &= ~SDHCI_CLOCK_CARD_EN;
1650 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1651
1652 /* Check whether DAT[3:0] is 0000 */
1653 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1654 if (!((present_state & SDHCI_DATA_LVL_MASK) >>
1655 SDHCI_DATA_LVL_SHIFT)) {
1656 /*
1657 * Enable 1.8V Signal Enable in the Host Control2
1658 * register
1659 */
1660 if (host->vqmmc)
1661 ret = regulator_set_voltage(host->vqmmc,
1662 1800000, 1800000);
1663 else
1664 ret = 0;
1665
1666 if (!ret) {
1651 ctrl |= SDHCI_CTRL_VDD_180; 1667 ctrl |= SDHCI_CTRL_VDD_180;
1652 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1668 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1653 1669
@@ -1656,7 +1672,7 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
1656 1672
1657 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1673 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1658 if (ctrl & SDHCI_CTRL_VDD_180) { 1674 if (ctrl & SDHCI_CTRL_VDD_180) {
1659 /* Provide SDCLK again and wait for 1ms*/ 1675 /* Provide SDCLK again and wait for 1ms */
1660 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1676 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1661 clk |= SDHCI_CLOCK_CARD_EN; 1677 clk |= SDHCI_CLOCK_CARD_EN;
1662 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1678 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
@@ -1673,29 +1689,55 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
1673 return 0; 1689 return 0;
1674 } 1690 }
1675 } 1691 }
1692 }
1676 1693
1677 /* 1694 /*
1678 * If we are here, that means the switch to 1.8V signaling 1695 * If we are here, that means the switch to 1.8V signaling
1679 * failed. We power cycle the card, and retry initialization 1696 * failed. We power cycle the card, and retry initialization
1680 * sequence by setting S18R to 0. 1697 * sequence by setting S18R to 0.
1681 */ 1698 */
1682 pwr = sdhci_readb(host, SDHCI_POWER_CONTROL); 1699 pwr = sdhci_readb(host, SDHCI_POWER_CONTROL);
1683 pwr &= ~SDHCI_POWER_ON; 1700 pwr &= ~SDHCI_POWER_ON;
1684 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1701 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1685 if (host->vmmc) 1702 if (host->vmmc)
1686 regulator_disable(host->vmmc); 1703 regulator_disable(host->vmmc);
1687 1704
1688 /* Wait for 1ms as per the spec */ 1705 /* Wait for 1ms as per the spec */
1689 usleep_range(1000, 1500); 1706 usleep_range(1000, 1500);
1690 pwr |= SDHCI_POWER_ON; 1707 pwr |= SDHCI_POWER_ON;
1691 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1708 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1692 if (host->vmmc) 1709 if (host->vmmc)
1693 regulator_enable(host->vmmc); 1710 regulator_enable(host->vmmc);
1694 1711
1695 pr_info(DRIVER_NAME ": Switching to 1.8V signalling " 1712 pr_warning("%s: Switching to 1.8V signalling voltage failed, "
1696 "voltage failed, retrying with S18R set to 0\n"); 1713 "retrying with S18R set to 0\n", mmc_hostname(host->mmc));
1697 return -EAGAIN; 1714
1698 } else 1715 return -EAGAIN;
1716}
1717
1718static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
1719 struct mmc_ios *ios)
1720{
1721 u16 ctrl;
1722
1723 /*
1724 * Signal Voltage Switching is only applicable for Host Controllers
1725 * v3.00 and above.
1726 */
1727 if (host->version < SDHCI_SPEC_300)
1728 return 0;
1729
1730 /*
1731 * We first check whether the request is to set signalling voltage
1732 * to 3.3V. If so, we change the voltage to 3.3V and return quickly.
1733 */
1734 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1735 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1736 return sdhci_do_3_3v_signal_voltage_switch(host, ctrl);
1737 else if (!(ctrl & SDHCI_CTRL_VDD_180) &&
1738 (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180))
1739 return sdhci_do_1_8v_signal_voltage_switch(host, ctrl);
1740 else
1699 /* No signal voltage switch required */ 1741 /* No signal voltage switch required */
1700 return 0; 1742 return 0;
1701} 1743}
@@ -2802,6 +2844,18 @@ int sdhci_add_host(struct sdhci_host *host)
2802 !(host->mmc->caps & MMC_CAP_NONREMOVABLE)) 2844 !(host->mmc->caps & MMC_CAP_NONREMOVABLE))
2803 mmc->caps |= MMC_CAP_NEEDS_POLL; 2845 mmc->caps |= MMC_CAP_NEEDS_POLL;
2804 2846
2847 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
2848 host->vqmmc = regulator_get(mmc_dev(mmc), "vqmmc");
2849 if (IS_ERR(host->vqmmc)) {
2850 pr_info("%s: no vqmmc regulator found\n", mmc_hostname(mmc));
2851 host->vqmmc = NULL;
2852 }
2853 else if (regulator_is_supported_voltage(host->vqmmc, 1800000, 1800000))
2854 regulator_enable(host->vqmmc);
2855 else
2856 caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
2857 SDHCI_SUPPORT_DDR50);
2858
2805 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ 2859 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
2806 if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 2860 if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
2807 SDHCI_SUPPORT_DDR50)) 2861 SDHCI_SUPPORT_DDR50))
@@ -2832,15 +2886,6 @@ int sdhci_add_host(struct sdhci_host *host)
2832 if (caps[1] & SDHCI_DRIVER_TYPE_D) 2886 if (caps[1] & SDHCI_DRIVER_TYPE_D)
2833 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 2887 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
2834 2888
2835 /*
2836 * If Power Off Notify capability is enabled by the host,
2837 * set notify to short power off notify timeout value.
2838 */
2839 if (mmc->caps2 & MMC_CAP2_POWEROFF_NOTIFY)
2840 mmc->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
2841 else
2842 mmc->power_notify_type = MMC_HOST_PW_NOTIFY_NONE;
2843
2844 /* Initial value for re-tuning timer count */ 2889 /* Initial value for re-tuning timer count */
2845 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >> 2890 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
2846 SDHCI_RETUNING_TIMER_COUNT_SHIFT; 2891 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
@@ -2862,7 +2907,8 @@ int sdhci_add_host(struct sdhci_host *host)
2862 if (IS_ERR(host->vmmc)) { 2907 if (IS_ERR(host->vmmc)) {
2863 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc)); 2908 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
2864 host->vmmc = NULL; 2909 host->vmmc = NULL;
2865 } 2910 } else
2911 regulator_enable(host->vmmc);
2866 2912
2867#ifdef CONFIG_REGULATOR 2913#ifdef CONFIG_REGULATOR
2868 if (host->vmmc) { 2914 if (host->vmmc) {
@@ -3119,8 +3165,15 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
3119 tasklet_kill(&host->card_tasklet); 3165 tasklet_kill(&host->card_tasklet);
3120 tasklet_kill(&host->finish_tasklet); 3166 tasklet_kill(&host->finish_tasklet);
3121 3167
3122 if (host->vmmc) 3168 if (host->vmmc) {
3169 regulator_disable(host->vmmc);
3123 regulator_put(host->vmmc); 3170 regulator_put(host->vmmc);
3171 }
3172
3173 if (host->vqmmc) {
3174 regulator_disable(host->vqmmc);
3175 regulator_put(host->vqmmc);
3176 }
3124 3177
3125 kfree(host->adma_desc); 3178 kfree(host->adma_desc);
3126 kfree(host->align_buffer); 3179 kfree(host->align_buffer);
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 5d8142773fac..11d2bc3b51d5 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1213,7 +1213,9 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
1213 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE); 1213 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE);
1214 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); 1214 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
1215 } else if (state & INT_DTRANE) { 1215 } else if (state & INT_DTRANE) {
1216 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_DTRANE); 1216 sh_mmcif_writel(host->addr, MMCIF_CE_INT,
1217 ~(INT_CMD12DRE | INT_CMD12RBE |
1218 INT_CMD12CRE | INT_DTRANE));
1217 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); 1219 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
1218 } else if (state & INT_CMD12RBE) { 1220 } else if (state & INT_CMD12RBE) {
1219 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 1221 sh_mmcif_writel(host->addr, MMCIF_CE_INT,
@@ -1229,6 +1231,10 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
1229 host->sd_error = true; 1231 host->sd_error = true;
1230 dev_dbg(&host->pd->dev, "int err state = %08x\n", state); 1232 dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
1231 } 1233 }
1234 if (host->state == STATE_IDLE) {
1235 dev_info(&host->pd->dev, "Spurious IRQ status 0x%x", state);
1236 return IRQ_HANDLED;
1237 }
1232 if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) { 1238 if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
1233 if (!host->dma_active) 1239 if (!host->dma_active)
1234 return IRQ_WAKE_THREAD; 1240 return IRQ_WAKE_THREAD;
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 4b83c43f950d..f18becef156d 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1337,21 +1337,7 @@ static struct pci_driver via_sd_driver = {
1337 .resume = via_sd_resume, 1337 .resume = via_sd_resume,
1338}; 1338};
1339 1339
1340static int __init via_sd_drv_init(void) 1340module_pci_driver(via_sd_driver);
1341{
1342 pr_info(DRV_NAME ": VIA SD/MMC Card Reader driver "
1343 "(C) 2008 VIA Technologies, Inc.\n");
1344
1345 return pci_register_driver(&via_sd_driver);
1346}
1347
1348static void __exit via_sd_drv_exit(void)
1349{
1350 pci_unregister_driver(&via_sd_driver);
1351}
1352
1353module_init(via_sd_drv_init);
1354module_exit(via_sd_drv_exit);
1355 1341
1356MODULE_LICENSE("GPL"); 1342MODULE_LICENSE("GPL");
1357MODULE_AUTHOR("VIA Technologies Inc."); 1343MODULE_AUTHOR("VIA Technologies Inc.");
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index 58eab9ac1d01..d5655a63eda4 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -2358,9 +2358,9 @@ error5:
2358 * which is contained at the end of struct mmc 2358 * which is contained at the end of struct mmc
2359 */ 2359 */
2360error4: 2360error4:
2361 usb_free_urb(command_out_urb);
2362error1:
2363 usb_free_urb(command_res_urb); 2361 usb_free_urb(command_res_urb);
2362error1:
2363 usb_free_urb(command_out_urb);
2364error0: 2364error0:
2365 return retval; 2365 return retval;
2366} 2366}
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 8f52fc858e48..5a5cd2ace4a6 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -240,7 +240,7 @@ static int parse_cmdline(char *devname, char *szstart, char *szlength)
240 240
241 if (*(szlength) != '+') { 241 if (*(szlength) != '+') {
242 devlength = simple_strtoul(szlength, &buffer, 0); 242 devlength = simple_strtoul(szlength, &buffer, 0);
243 devlength = handle_unit(devlength, buffer) - devstart; 243 devlength = handle_unit(devlength, buffer);
244 if (devlength < devstart) 244 if (devlength < devstart)
245 goto err_out; 245 goto err_out;
246 246
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index d5ece6ea6f98..1a03b7f673ce 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2990,6 +2990,7 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
2990 * ID to decide what to do. 2990 * ID to decide what to do.
2991 */ 2991 */
2992 if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG && 2992 if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
2993 (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
2993 id_data[5] != 0x00) { 2994 id_data[5] != 0x00) {
2994 /* Calc pagesize */ 2995 /* Calc pagesize */
2995 mtd->writesize = 2048 << (extid & 0x03); 2996 mtd->writesize = 2048 << (extid & 0x03);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 7153e0d27101..b3f41f200622 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -3694,7 +3694,7 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int
3694 * flexonenand_set_boundary - Writes the SLC boundary 3694 * flexonenand_set_boundary - Writes the SLC boundary
3695 * @param mtd - mtd info structure 3695 * @param mtd - mtd info structure
3696 */ 3696 */
3697int flexonenand_set_boundary(struct mtd_info *mtd, int die, 3697static int flexonenand_set_boundary(struct mtd_info *mtd, int die,
3698 int boundary, int lock) 3698 int boundary, int lock)
3699{ 3699{
3700 struct onenand_chip *this = mtd->priv; 3700 struct onenand_chip *this = mtd->priv;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b721902bb6b4..b2530b002125 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1519,7 +1519,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1519 /* no need to lock since we're protected by rtnl_lock */ 1519 /* no need to lock since we're protected by rtnl_lock */
1520 if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { 1520 if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
1521 pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); 1521 pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
1522 if (bond_vlan_used(bond)) { 1522 if (vlan_uses_dev(bond_dev)) {
1523 pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n", 1523 pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
1524 bond_dev->name, slave_dev->name, bond_dev->name); 1524 bond_dev->name, slave_dev->name, bond_dev->name);
1525 return -EPERM; 1525 return -EPERM;
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index dc15d248443f..ef8d2a080d17 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1060,7 +1060,7 @@ static ssize_t bonding_store_primary(struct device *d,
1060 goto out; 1060 goto out;
1061 } 1061 }
1062 1062
1063 sscanf(buf, "%16s", ifname); /* IFNAMSIZ */ 1063 sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
1064 1064
1065 /* check to see if we are clearing primary */ 1065 /* check to see if we are clearing primary */
1066 if (!strlen(ifname) || buf[0] == '\n') { 1066 if (!strlen(ifname) || buf[0] == '\n') {
@@ -1237,7 +1237,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
1237 goto out; 1237 goto out;
1238 } 1238 }
1239 1239
1240 sscanf(buf, "%16s", ifname); /* IFNAMSIZ */ 1240 sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
1241 1241
1242 /* check to see if we are clearing active */ 1242 /* check to see if we are clearing active */
1243 if (!strlen(ifname) || buf[0] == '\n') { 1243 if (!strlen(ifname) || buf[0] == '\n') {
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index c78ecfca1e45..a412bf6d73ef 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -144,9 +144,22 @@
144 144
145#define FLEXCAN_MB_CODE_MASK (0xf0ffffff) 145#define FLEXCAN_MB_CODE_MASK (0xf0ffffff)
146 146
147/* FLEXCAN hardware feature flags */ 147/*
148 * FLEXCAN hardware feature flags
149 *
150 * Below is some version info we got:
151 * SOC Version IP-Version Glitch- [TR]WRN_INT
152 * Filter? connected?
153 * MX25 FlexCAN2 03.00.00.00 no no
154 * MX28 FlexCAN2 03.00.04.00 yes yes
155 * MX35 FlexCAN2 03.00.00.00 no no
156 * MX53 FlexCAN2 03.00.00.00 yes no
157 * MX6s FlexCAN3 10.00.12.00 yes yes
158 *
159 * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
160 */
148#define FLEXCAN_HAS_V10_FEATURES BIT(1) /* For core version >= 10 */ 161#define FLEXCAN_HAS_V10_FEATURES BIT(1) /* For core version >= 10 */
149#define FLEXCAN_HAS_BROKEN_ERR_STATE BIT(2) /* Broken error state handling */ 162#define FLEXCAN_HAS_BROKEN_ERR_STATE BIT(2) /* [TR]WRN_INT not connected */
150 163
151/* Structure of the message buffer */ 164/* Structure of the message buffer */
152struct flexcan_mb { 165struct flexcan_mb {
@@ -205,7 +218,7 @@ static struct flexcan_devtype_data fsl_p1010_devtype_data = {
205}; 218};
206static struct flexcan_devtype_data fsl_imx28_devtype_data; 219static struct flexcan_devtype_data fsl_imx28_devtype_data;
207static struct flexcan_devtype_data fsl_imx6q_devtype_data = { 220static struct flexcan_devtype_data fsl_imx6q_devtype_data = {
208 .features = FLEXCAN_HAS_V10_FEATURES | FLEXCAN_HAS_BROKEN_ERR_STATE, 221 .features = FLEXCAN_HAS_V10_FEATURES,
209}; 222};
210 223
211static const struct can_bittiming_const flexcan_bittiming_const = { 224static const struct can_bittiming_const flexcan_bittiming_const = {
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index f5b82aeb2540..6525dbcca4e3 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -30,9 +30,10 @@
30 30
31#include "sja1000.h" 31#include "sja1000.h"
32 32
33MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); 33MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
34MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI family cards"); 34MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI family cards");
35MODULE_SUPPORTED_DEVICE("PEAK PCAN PCI/PCIe/PCIeC miniPCI CAN cards"); 35MODULE_SUPPORTED_DEVICE("PEAK PCAN PCI/PCIe/PCIeC miniPCI CAN cards");
36MODULE_SUPPORTED_DEVICE("PEAK PCAN miniPCIe/cPCI PC/104+ PCI/104e CAN Cards");
36MODULE_LICENSE("GPL v2"); 37MODULE_LICENSE("GPL v2");
37 38
38#define DRV_NAME "peak_pci" 39#define DRV_NAME "peak_pci"
@@ -64,7 +65,11 @@ struct peak_pci_chan {
64#define PEAK_PCI_DEVICE_ID 0x0001 /* for PCI/PCIe slot cards */ 65#define PEAK_PCI_DEVICE_ID 0x0001 /* for PCI/PCIe slot cards */
65#define PEAK_PCIEC_DEVICE_ID 0x0002 /* for ExpressCard slot cards */ 66#define PEAK_PCIEC_DEVICE_ID 0x0002 /* for ExpressCard slot cards */
66#define PEAK_PCIE_DEVICE_ID 0x0003 /* for nextgen PCIe slot cards */ 67#define PEAK_PCIE_DEVICE_ID 0x0003 /* for nextgen PCIe slot cards */
67#define PEAK_MPCI_DEVICE_ID 0x0008 /* The miniPCI slot cards */ 68#define PEAK_CPCI_DEVICE_ID 0x0004 /* for nextgen cPCI slot cards */
69#define PEAK_MPCI_DEVICE_ID 0x0005 /* for nextgen miniPCI slot cards */
70#define PEAK_PC_104P_DEVICE_ID 0x0006 /* PCAN-PC/104+ cards */
71#define PEAK_PCI_104E_DEVICE_ID 0x0007 /* PCAN-PCI/104 Express cards */
72#define PEAK_MPCIE_DEVICE_ID 0x0008 /* The miniPCIe slot cards */
68 73
69#define PEAK_PCI_CHAN_MAX 4 74#define PEAK_PCI_CHAN_MAX 4
70 75
@@ -76,6 +81,10 @@ static DEFINE_PCI_DEVICE_TABLE(peak_pci_tbl) = {
76 {PEAK_PCI_VENDOR_ID, PEAK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, 81 {PEAK_PCI_VENDOR_ID, PEAK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
77 {PEAK_PCI_VENDOR_ID, PEAK_PCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, 82 {PEAK_PCI_VENDOR_ID, PEAK_PCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
78 {PEAK_PCI_VENDOR_ID, PEAK_MPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, 83 {PEAK_PCI_VENDOR_ID, PEAK_MPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
84 {PEAK_PCI_VENDOR_ID, PEAK_MPCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
85 {PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
86 {PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
87 {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
79#ifdef CONFIG_CAN_PEAK_PCIEC 88#ifdef CONFIG_CAN_PEAK_PCIEC
80 {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, 89 {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
81#endif 90#endif
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 64d0d9c1afa2..3491d4312fc9 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1845,6 +1845,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
1845 if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){ 1845 if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){
1846 printk(KERN_ERR "amd8111e: No Power Management capability, " 1846 printk(KERN_ERR "amd8111e: No Power Management capability, "
1847 "exiting.\n"); 1847 "exiting.\n");
1848 err = -ENODEV;
1848 goto err_free_reg; 1849 goto err_free_reg;
1849 } 1850 }
1850 1851
@@ -1852,6 +1853,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
1852 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) { 1853 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) {
1853 printk(KERN_ERR "amd8111e: DMA not supported," 1854 printk(KERN_ERR "amd8111e: DMA not supported,"
1854 "exiting.\n"); 1855 "exiting.\n");
1856 err = -ENODEV;
1855 goto err_free_reg; 1857 goto err_free_reg;
1856 } 1858 }
1857 1859
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 397596b078d9..f195acfa2df7 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1174,8 +1174,10 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1174 snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 1174 snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1175 pdev->name, aup->mac_id); 1175 pdev->name, aup->mac_id);
1176 aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 1176 aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1177 if (aup->mii_bus->irq == NULL) 1177 if (aup->mii_bus->irq == NULL) {
1178 err = -ENOMEM;
1178 goto err_out; 1179 goto err_out;
1180 }
1179 1181
1180 for (i = 0; i < PHY_MAX_ADDR; ++i) 1182 for (i = 0; i < PHY_MAX_ADDR; ++i)
1181 aup->mii_bus->irq[i] = PHY_POLL; 1183 aup->mii_bus->irq[i] = PHY_POLL;
@@ -1190,7 +1192,8 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1190 goto err_mdiobus_reg; 1192 goto err_mdiobus_reg;
1191 } 1193 }
1192 1194
1193 if (au1000_mii_probe(dev) != 0) 1195 err = au1000_mii_probe(dev);
1196 if (err != 0)
1194 goto err_out; 1197 goto err_out;
1195 1198
1196 pDBfree = NULL; 1199 pDBfree = NULL;
@@ -1205,6 +1208,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1205 } 1208 }
1206 aup->pDBfree = pDBfree; 1209 aup->pDBfree = pDBfree;
1207 1210
1211 err = -ENODEV;
1208 for (i = 0; i < NUM_RX_DMA; i++) { 1212 for (i = 0; i < NUM_RX_DMA; i++) {
1209 pDB = au1000_GetFreeDB(aup); 1213 pDB = au1000_GetFreeDB(aup);
1210 if (!pDB) 1214 if (!pDB)
@@ -1213,6 +1217,8 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1213 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; 1217 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1214 aup->rx_db_inuse[i] = pDB; 1218 aup->rx_db_inuse[i] = pDB;
1215 } 1219 }
1220
1221 err = -ENODEV;
1216 for (i = 0; i < NUM_TX_DMA; i++) { 1222 for (i = 0; i < NUM_TX_DMA; i++) {
1217 pDB = au1000_GetFreeDB(aup); 1223 pDB = au1000_GetFreeDB(aup);
1218 if (!pDB) 1224 if (!pDB)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 24220992413f..4833b6a9031c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2957,9 +2957,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2957 skb_shinfo(skb)->nr_frags + 2957 skb_shinfo(skb)->nr_frags +
2958 BDS_PER_TX_PKT + 2958 BDS_PER_TX_PKT +
2959 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) { 2959 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2960 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; 2960 /* Handle special storage cases separately */
2961 netif_tx_stop_queue(txq); 2961 if (txdata->tx_ring_size != 0) {
2962 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); 2962 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2963 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
2964 netif_tx_stop_queue(txq);
2965 }
2966
2963 return NETDEV_TX_BUSY; 2967 return NETDEV_TX_BUSY;
2964 } 2968 }
2965 2969
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index e2e45ee5df33..6dd0dd076cc5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -137,7 +137,16 @@
137#define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD 137#define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD
138#define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD 138#define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD
139 139
140 140#define LINK_UPDATE_MASK \
141 (LINK_STATUS_SPEED_AND_DUPLEX_MASK | \
142 LINK_STATUS_LINK_UP | \
143 LINK_STATUS_PHYSICAL_LINK_FLAG | \
144 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | \
145 LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | \
146 LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | \
147 LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK | \
148 LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE | \
149 LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
141 150
142#define SFP_EEPROM_CON_TYPE_ADDR 0x2 151#define SFP_EEPROM_CON_TYPE_ADDR 0x2
143 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 152 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
@@ -3295,6 +3304,21 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
3295 DEFAULT_PHY_DEV_ADDR); 3304 DEFAULT_PHY_DEV_ADDR);
3296} 3305}
3297 3306
3307static void bnx2x_xgxs_specific_func(struct bnx2x_phy *phy,
3308 struct link_params *params,
3309 u32 action)
3310{
3311 struct bnx2x *bp = params->bp;
3312 switch (action) {
3313 case PHY_INIT:
3314 /* Set correct devad */
3315 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + params->port*0x18, 0);
3316 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18,
3317 phy->def_md_devad);
3318 break;
3319 }
3320}
3321
3298static void bnx2x_xgxs_deassert(struct link_params *params) 3322static void bnx2x_xgxs_deassert(struct link_params *params)
3299{ 3323{
3300 struct bnx2x *bp = params->bp; 3324 struct bnx2x *bp = params->bp;
@@ -3309,10 +3333,8 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
3309 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); 3333 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3310 udelay(500); 3334 udelay(500);
3311 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); 3335 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
3312 3336 bnx2x_xgxs_specific_func(&params->phy[INT_PHY], params,
3313 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0); 3337 PHY_INIT);
3314 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
3315 params->phy[INT_PHY].def_md_devad);
3316} 3338}
3317 3339
3318static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, 3340static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
@@ -3545,14 +3567,11 @@ static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
3545static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, 3567static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3546 struct link_params *params, 3568 struct link_params *params,
3547 struct link_vars *vars) { 3569 struct link_vars *vars) {
3548 u16 val16 = 0, lane, i; 3570 u16 lane, i, cl72_ctrl, an_adv = 0;
3571 u16 ucode_ver;
3549 struct bnx2x *bp = params->bp; 3572 struct bnx2x *bp = params->bp;
3550 static struct bnx2x_reg_set reg_set[] = { 3573 static struct bnx2x_reg_set reg_set[] = {
3551 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, 3574 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
3552 {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
3553 {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0},
3554 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff},
3555 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555},
3556 {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0}, 3575 {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0},
3557 {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415}, 3576 {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415},
3558 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190}, 3577 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190},
@@ -3565,12 +3584,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3565 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, 3584 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3566 reg_set[i].val); 3585 reg_set[i].val);
3567 3586
3587 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3588 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl);
3589 cl72_ctrl &= 0xf8ff;
3590 cl72_ctrl |= 0x3800;
3591 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3592 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl);
3593
3568 /* Check adding advertisement for 1G KX */ 3594 /* Check adding advertisement for 1G KX */
3569 if (((vars->line_speed == SPEED_AUTO_NEG) && 3595 if (((vars->line_speed == SPEED_AUTO_NEG) &&
3570 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || 3596 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
3571 (vars->line_speed == SPEED_1000)) { 3597 (vars->line_speed == SPEED_1000)) {
3572 u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2; 3598 u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
3573 val16 |= (1<<5); 3599 an_adv |= (1<<5);
3574 3600
3575 /* Enable CL37 1G Parallel Detect */ 3601 /* Enable CL37 1G Parallel Detect */
3576 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1); 3602 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1);
@@ -3580,11 +3606,14 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3580 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || 3606 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
3581 (vars->line_speed == SPEED_10000)) { 3607 (vars->line_speed == SPEED_10000)) {
3582 /* Check adding advertisement for 10G KR */ 3608 /* Check adding advertisement for 10G KR */
3583 val16 |= (1<<7); 3609 an_adv |= (1<<7);
3584 /* Enable 10G Parallel Detect */ 3610 /* Enable 10G Parallel Detect */
3611 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
3612 MDIO_AER_BLOCK_AER_REG, 0);
3613
3585 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3614 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3586 MDIO_WC_REG_PAR_DET_10G_CTRL, 1); 3615 MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
3587 3616 bnx2x_set_aer_mmd(params, phy);
3588 DP(NETIF_MSG_LINK, "Advertize 10G\n"); 3617 DP(NETIF_MSG_LINK, "Advertize 10G\n");
3589 } 3618 }
3590 3619
@@ -3604,7 +3633,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3604 3633
3605 /* Advertised speeds */ 3634 /* Advertised speeds */
3606 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3635 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3607 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); 3636 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, an_adv);
3608 3637
3609 /* Advertised and set FEC (Forward Error Correction) */ 3638 /* Advertised and set FEC (Forward Error Correction) */
3610 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3639 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
@@ -3628,9 +3657,10 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3628 /* Set KR Autoneg Work-Around flag for Warpcore version older than D108 3657 /* Set KR Autoneg Work-Around flag for Warpcore version older than D108
3629 */ 3658 */
3630 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3659 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3631 MDIO_WC_REG_UC_INFO_B1_VERSION, &val16); 3660 MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver);
3632 if (val16 < 0xd108) { 3661 if (ucode_ver < 0xd108) {
3633 DP(NETIF_MSG_LINK, "Enable AN KR work-around\n"); 3662 DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n",
3663 ucode_ver);
3634 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; 3664 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
3635 } 3665 }
3636 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3666 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
@@ -3651,21 +3681,16 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
3651 struct link_vars *vars) 3681 struct link_vars *vars)
3652{ 3682{
3653 struct bnx2x *bp = params->bp; 3683 struct bnx2x *bp = params->bp;
3654 u16 i; 3684 u16 val16, i, lane;
3655 static struct bnx2x_reg_set reg_set[] = { 3685 static struct bnx2x_reg_set reg_set[] = {
3656 /* Disable Autoneg */ 3686 /* Disable Autoneg */
3657 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, 3687 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
3658 {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
3659 {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 3688 {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
3660 0x3f00}, 3689 0x3f00},
3661 {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0}, 3690 {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0},
3662 {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0}, 3691 {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0},
3663 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1}, 3692 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1},
3664 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa}, 3693 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa},
3665 /* Disable CL36 PCS Tx */
3666 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0},
3667 /* Double Wide Single Data Rate @ pll rate */
3668 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF},
3669 /* Leave cl72 training enable, needed for KR */ 3694 /* Leave cl72 training enable, needed for KR */
3670 {MDIO_PMA_DEVAD, 3695 {MDIO_PMA_DEVAD,
3671 MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150, 3696 MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
@@ -3676,11 +3701,24 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
3676 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, 3701 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3677 reg_set[i].val); 3702 reg_set[i].val);
3678 3703
3679 /* Leave CL72 enabled */ 3704 lane = bnx2x_get_warpcore_lane(phy, params);
3680 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3705 /* Global registers */
3681 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 3706 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
3682 0x3800); 3707 MDIO_AER_BLOCK_AER_REG, 0);
3708 /* Disable CL36 PCS Tx */
3709 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3710 MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16);
3711 val16 &= ~(0x0011 << lane);
3712 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3713 MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16);
3683 3714
3715 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3716 MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16);
3717 val16 |= (0x0303 << (lane << 1));
3718 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3719 MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16);
3720 /* Restore AER */
3721 bnx2x_set_aer_mmd(params, phy);
3684 /* Set speed via PMA/PMD register */ 3722 /* Set speed via PMA/PMD register */
3685 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 3723 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
3686 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); 3724 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
@@ -4303,7 +4341,7 @@ static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
4303 struct link_params *params) 4341 struct link_params *params)
4304{ 4342{
4305 struct bnx2x *bp = params->bp; 4343 struct bnx2x *bp = params->bp;
4306 u16 val16; 4344 u16 val16, lane;
4307 bnx2x_sfp_e3_set_transmitter(params, phy, 0); 4345 bnx2x_sfp_e3_set_transmitter(params, phy, 0);
4308 bnx2x_set_mdio_clk(bp, params->chip_id, params->port); 4346 bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
4309 bnx2x_set_aer_mmd(params, phy); 4347 bnx2x_set_aer_mmd(params, phy);
@@ -4340,6 +4378,30 @@ static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
4340 MDIO_WC_REG_XGXSBLK1_LANECTRL2, 4378 MDIO_WC_REG_XGXSBLK1_LANECTRL2,
4341 val16 & 0xff00); 4379 val16 & 0xff00);
4342 4380
4381 lane = bnx2x_get_warpcore_lane(phy, params);
4382 /* Disable CL36 PCS Tx */
4383 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4384 MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16);
4385 val16 |= (0x11 << lane);
4386 if (phy->flags & FLAGS_WC_DUAL_MODE)
4387 val16 |= (0x22 << lane);
4388 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4389 MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16);
4390
4391 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4392 MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16);
4393 val16 &= ~(0x0303 << (lane << 1));
4394 val16 |= (0x0101 << (lane << 1));
4395 if (phy->flags & FLAGS_WC_DUAL_MODE) {
4396 val16 &= ~(0x0c0c << (lane << 1));
4397 val16 |= (0x0404 << (lane << 1));
4398 }
4399
4400 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4401 MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16);
4402 /* Restore AER */
4403 bnx2x_set_aer_mmd(params, phy);
4404
4343} 4405}
4344 4406
4345static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy, 4407static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
@@ -6296,15 +6358,7 @@ static int bnx2x_update_link_down(struct link_params *params,
6296 vars->mac_type = MAC_TYPE_NONE; 6358 vars->mac_type = MAC_TYPE_NONE;
6297 6359
6298 /* Update shared memory */ 6360 /* Update shared memory */
6299 vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK | 6361 vars->link_status &= ~LINK_UPDATE_MASK;
6300 LINK_STATUS_LINK_UP |
6301 LINK_STATUS_PHYSICAL_LINK_FLAG |
6302 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE |
6303 LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK |
6304 LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK |
6305 LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK |
6306 LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE |
6307 LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE);
6308 vars->line_speed = 0; 6362 vars->line_speed = 0;
6309 bnx2x_update_mng(params, vars->link_status); 6363 bnx2x_update_mng(params, vars->link_status);
6310 6364
@@ -6452,6 +6506,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6452 u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed; 6506 u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
6453 u8 active_external_phy = INT_PHY; 6507 u8 active_external_phy = INT_PHY;
6454 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; 6508 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
6509 vars->link_status &= ~LINK_UPDATE_MASK;
6455 for (phy_index = INT_PHY; phy_index < params->num_phys; 6510 for (phy_index = INT_PHY; phy_index < params->num_phys;
6456 phy_index++) { 6511 phy_index++) {
6457 phy_vars[phy_index].flow_ctrl = 0; 6512 phy_vars[phy_index].flow_ctrl = 0;
@@ -7579,7 +7634,7 @@ static void bnx2x_warpcore_power_module(struct link_params *params,
7579static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, 7634static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7580 struct link_params *params, 7635 struct link_params *params,
7581 u16 addr, u8 byte_cnt, 7636 u16 addr, u8 byte_cnt,
7582 u8 *o_buf) 7637 u8 *o_buf, u8 is_init)
7583{ 7638{
7584 int rc = 0; 7639 int rc = 0;
7585 u8 i, j = 0, cnt = 0; 7640 u8 i, j = 0, cnt = 0;
@@ -7596,10 +7651,10 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7596 /* 4 byte aligned address */ 7651 /* 4 byte aligned address */
7597 addr32 = addr & (~0x3); 7652 addr32 = addr & (~0x3);
7598 do { 7653 do {
7599 if (cnt == I2C_WA_PWR_ITER) { 7654 if ((!is_init) && (cnt == I2C_WA_PWR_ITER)) {
7600 bnx2x_warpcore_power_module(params, phy, 0); 7655 bnx2x_warpcore_power_module(params, phy, 0);
7601 /* Note that 100us are not enough here */ 7656 /* Note that 100us are not enough here */
7602 usleep_range(1000,1000); 7657 usleep_range(1000, 2000);
7603 bnx2x_warpcore_power_module(params, phy, 1); 7658 bnx2x_warpcore_power_module(params, phy, 1);
7604 } 7659 }
7605 rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt, 7660 rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
@@ -7719,7 +7774,7 @@ int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7719 break; 7774 break;
7720 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 7775 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7721 rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr, 7776 rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr,
7722 byte_cnt, o_buf); 7777 byte_cnt, o_buf, 0);
7723 break; 7778 break;
7724 } 7779 }
7725 return rc; 7780 return rc;
@@ -7923,6 +7978,7 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
7923 7978
7924{ 7979{
7925 u8 val; 7980 u8 val;
7981 int rc;
7926 struct bnx2x *bp = params->bp; 7982 struct bnx2x *bp = params->bp;
7927 u16 timeout; 7983 u16 timeout;
7928 /* Initialization time after hot-plug may take up to 300ms for 7984 /* Initialization time after hot-plug may take up to 300ms for
@@ -7930,8 +7986,14 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
7930 */ 7986 */
7931 7987
7932 for (timeout = 0; timeout < 60; timeout++) { 7988 for (timeout = 0; timeout < 60; timeout++) {
7933 if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val) 7989 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
7934 == 0) { 7990 rc = bnx2x_warpcore_read_sfp_module_eeprom(phy,
7991 params, 1,
7992 1, &val, 1);
7993 else
7994 rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1,
7995 &val);
7996 if (rc == 0) {
7935 DP(NETIF_MSG_LINK, 7997 DP(NETIF_MSG_LINK,
7936 "SFP+ module initialization took %d ms\n", 7998 "SFP+ module initialization took %d ms\n",
7937 timeout * 5); 7999 timeout * 5);
@@ -7939,7 +8001,8 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
7939 } 8001 }
7940 usleep_range(5000, 10000); 8002 usleep_range(5000, 10000);
7941 } 8003 }
7942 return -EINVAL; 8004 rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val);
8005 return rc;
7943} 8006}
7944 8007
7945static void bnx2x_8727_power_module(struct bnx2x *bp, 8008static void bnx2x_8727_power_module(struct bnx2x *bp,
@@ -10993,7 +11056,7 @@ static struct bnx2x_phy phy_xgxs = {
10993 .format_fw_ver = (format_fw_ver_t)NULL, 11056 .format_fw_ver = (format_fw_ver_t)NULL,
10994 .hw_reset = (hw_reset_t)NULL, 11057 .hw_reset = (hw_reset_t)NULL,
10995 .set_link_led = (set_link_led_t)NULL, 11058 .set_link_led = (set_link_led_t)NULL,
10996 .phy_specific_func = (phy_specific_func_t)NULL 11059 .phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func
10997}; 11060};
10998static struct bnx2x_phy phy_warpcore = { 11061static struct bnx2x_phy phy_warpcore = {
10999 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, 11062 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
@@ -11465,6 +11528,11 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
11465 phy->media_type = ETH_PHY_BASE_T; 11528 phy->media_type = ETH_PHY_BASE_T;
11466 break; 11529 break;
11467 case PORT_HW_CFG_NET_SERDES_IF_XFI: 11530 case PORT_HW_CFG_NET_SERDES_IF_XFI:
11531 phy->supported &= (SUPPORTED_1000baseT_Full |
11532 SUPPORTED_10000baseT_Full |
11533 SUPPORTED_FIBRE |
11534 SUPPORTED_Pause |
11535 SUPPORTED_Asym_Pause);
11468 phy->media_type = ETH_PHY_XFP_FIBER; 11536 phy->media_type = ETH_PHY_XFP_FIBER;
11469 break; 11537 break;
11470 case PORT_HW_CFG_NET_SERDES_IF_SFI: 11538 case PORT_HW_CFG_NET_SERDES_IF_SFI:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index d5648fc666bd..bd1fd3d87c24 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -6794,8 +6794,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6794 6794
6795 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 6795 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
6796 6796
6797 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
6798
6797 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { 6799 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
6798 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
6799 6800
6800 if (IS_MF(bp)) 6801 if (IS_MF(bp))
6801 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); 6802 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
@@ -11902,7 +11903,15 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11902 /* disable FCOE L2 queue for E1x */ 11903 /* disable FCOE L2 queue for E1x */
11903 if (CHIP_IS_E1x(bp)) 11904 if (CHIP_IS_E1x(bp))
11904 bp->flags |= NO_FCOE_FLAG; 11905 bp->flags |= NO_FCOE_FLAG;
11905 11906 /* disable FCOE for 57840 device, until FW supports it */
11907 switch (ent->driver_data) {
11908 case BCM57840_O:
11909 case BCM57840_4_10:
11910 case BCM57840_2_20:
11911 case BCM57840_MFO:
11912 case BCM57840_MF:
11913 bp->flags |= NO_FCOE_FLAG;
11914 }
11906#endif 11915#endif
11907 11916
11908 11917
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 71971a161bd1..614981c02264 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -126,7 +126,7 @@ static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
126 /* Check if this request is ok */ 126 /* Check if this request is ok */
127 rc = o->validate(bp, o->owner, elem); 127 rc = o->validate(bp, o->owner, elem);
128 if (rc) { 128 if (rc) {
129 BNX2X_ERR("Preamble failed: %d\n", rc); 129 DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
130 goto free_and_exit; 130 goto free_and_exit;
131 } 131 }
132 } 132 }
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 2b4b4f529ab4..16814b34d4b6 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -375,7 +375,6 @@ struct xgmac_priv {
375 unsigned int tx_tail; 375 unsigned int tx_tail;
376 376
377 void __iomem *base; 377 void __iomem *base;
378 struct sk_buff_head rx_recycle;
379 unsigned int dma_buf_sz; 378 unsigned int dma_buf_sz;
380 dma_addr_t dma_rx_phy; 379 dma_addr_t dma_rx_phy;
381 dma_addr_t dma_tx_phy; 380 dma_addr_t dma_tx_phy;
@@ -672,9 +671,7 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
672 p = priv->dma_rx + entry; 671 p = priv->dma_rx + entry;
673 672
674 if (priv->rx_skbuff[entry] == NULL) { 673 if (priv->rx_skbuff[entry] == NULL) {
675 skb = __skb_dequeue(&priv->rx_recycle); 674 skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
676 if (skb == NULL)
677 skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
678 if (unlikely(skb == NULL)) 675 if (unlikely(skb == NULL))
679 break; 676 break;
680 677
@@ -887,17 +884,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
887 desc_get_buf_len(p), DMA_TO_DEVICE); 884 desc_get_buf_len(p), DMA_TO_DEVICE);
888 } 885 }
889 886
890 /* 887 dev_kfree_skb(skb);
891 * If there's room in the queue (limit it to size)
892 * we add this skb back into the pool,
893 * if it's the right size.
894 */
895 if ((skb_queue_len(&priv->rx_recycle) <
896 DMA_RX_RING_SZ) &&
897 skb_recycle_check(skb, priv->dma_buf_sz))
898 __skb_queue_head(&priv->rx_recycle, skb);
899 else
900 dev_kfree_skb(skb);
901 } 888 }
902 889
903 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > 890 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
@@ -1016,7 +1003,6 @@ static int xgmac_open(struct net_device *dev)
1016 dev->dev_addr); 1003 dev->dev_addr);
1017 } 1004 }
1018 1005
1019 skb_queue_head_init(&priv->rx_recycle);
1020 memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats)); 1006 memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
1021 1007
1022 /* Initialize the XGMAC and descriptors */ 1008 /* Initialize the XGMAC and descriptors */
@@ -1053,7 +1039,6 @@ static int xgmac_stop(struct net_device *dev)
1053 napi_disable(&priv->napi); 1039 napi_disable(&priv->napi);
1054 1040
1055 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 1041 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1056 skb_queue_purge(&priv->rx_recycle);
1057 1042
1058 /* Disable the MAC core */ 1043 /* Disable the MAC core */
1059 xgmac_mac_disable(priv->base); 1044 xgmac_mac_disable(priv->base);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 31752b24434e..378988b5709a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -251,6 +251,8 @@ struct adapter_params {
251 unsigned char rev; /* chip revision */ 251 unsigned char rev; /* chip revision */
252 unsigned char offload; 252 unsigned char offload;
253 253
254 unsigned char bypass;
255
254 unsigned int ofldq_wr_cred; 256 unsigned int ofldq_wr_cred;
255}; 257};
256 258
@@ -642,6 +644,23 @@ extern int dbfifo_int_thresh;
642#define for_each_port(adapter, iter) \ 644#define for_each_port(adapter, iter) \
643 for (iter = 0; iter < (adapter)->params.nports; ++iter) 645 for (iter = 0; iter < (adapter)->params.nports; ++iter)
644 646
647static inline int is_bypass(struct adapter *adap)
648{
649 return adap->params.bypass;
650}
651
652static inline int is_bypass_device(int device)
653{
654 /* this should be set based upon device capabilities */
655 switch (device) {
656 case 0x440b:
657 case 0x440c:
658 return 1;
659 default:
660 return 0;
661 }
662}
663
645static inline unsigned int core_ticks_per_usec(const struct adapter *adap) 664static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
646{ 665{
647 return adap->params.vpd.cclk / 1000; 666 return adap->params.vpd.cclk / 1000;
@@ -696,6 +715,7 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable);
696int get_vpd_params(struct adapter *adapter, struct vpd_params *p); 715int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
697int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); 716int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
698unsigned int t4_flash_cfg_addr(struct adapter *adapter); 717unsigned int t4_flash_cfg_addr(struct adapter *adapter);
718int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
699int t4_check_fw_version(struct adapter *adapter); 719int t4_check_fw_version(struct adapter *adapter);
700int t4_prep_adapter(struct adapter *adapter); 720int t4_prep_adapter(struct adapter *adapter);
701int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); 721int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6b9f6bb2f7ed..0df1284df497 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -443,7 +443,10 @@ int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
443module_param(dbfifo_int_thresh, int, 0644); 443module_param(dbfifo_int_thresh, int, 0644);
444MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold"); 444MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
445 445
446int dbfifo_drain_delay = 1000; /* usecs to sleep while draining the dbfifo */ 446/*
447 * usecs to sleep while draining the dbfifo
448 */
449static int dbfifo_drain_delay = 1000;
447module_param(dbfifo_drain_delay, int, 0644); 450module_param(dbfifo_drain_delay, int, 0644);
448MODULE_PARM_DESC(dbfifo_drain_delay, 451MODULE_PARM_DESC(dbfifo_drain_delay,
449 "usecs to sleep while draining the dbfifo"); 452 "usecs to sleep while draining the dbfifo");
@@ -636,7 +639,7 @@ static void name_msix_vecs(struct adapter *adap)
636static int request_msix_queue_irqs(struct adapter *adap) 639static int request_msix_queue_irqs(struct adapter *adap)
637{ 640{
638 struct sge *s = &adap->sge; 641 struct sge *s = &adap->sge;
639 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2; 642 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
640 643
641 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, 644 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
642 adap->msix_info[1].desc, &s->fw_evtq); 645 adap->msix_info[1].desc, &s->fw_evtq);
@@ -644,56 +647,60 @@ static int request_msix_queue_irqs(struct adapter *adap)
644 return err; 647 return err;
645 648
646 for_each_ethrxq(s, ethqidx) { 649 for_each_ethrxq(s, ethqidx) {
647 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, 650 err = request_irq(adap->msix_info[msi_index].vec,
648 adap->msix_info[msi].desc, 651 t4_sge_intr_msix, 0,
652 adap->msix_info[msi_index].desc,
649 &s->ethrxq[ethqidx].rspq); 653 &s->ethrxq[ethqidx].rspq);
650 if (err) 654 if (err)
651 goto unwind; 655 goto unwind;
652 msi++; 656 msi_index++;
653 } 657 }
654 for_each_ofldrxq(s, ofldqidx) { 658 for_each_ofldrxq(s, ofldqidx) {
655 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, 659 err = request_irq(adap->msix_info[msi_index].vec,
656 adap->msix_info[msi].desc, 660 t4_sge_intr_msix, 0,
661 adap->msix_info[msi_index].desc,
657 &s->ofldrxq[ofldqidx].rspq); 662 &s->ofldrxq[ofldqidx].rspq);
658 if (err) 663 if (err)
659 goto unwind; 664 goto unwind;
660 msi++; 665 msi_index++;
661 } 666 }
662 for_each_rdmarxq(s, rdmaqidx) { 667 for_each_rdmarxq(s, rdmaqidx) {
663 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, 668 err = request_irq(adap->msix_info[msi_index].vec,
664 adap->msix_info[msi].desc, 669 t4_sge_intr_msix, 0,
670 adap->msix_info[msi_index].desc,
665 &s->rdmarxq[rdmaqidx].rspq); 671 &s->rdmarxq[rdmaqidx].rspq);
666 if (err) 672 if (err)
667 goto unwind; 673 goto unwind;
668 msi++; 674 msi_index++;
669 } 675 }
670 return 0; 676 return 0;
671 677
672unwind: 678unwind:
673 while (--rdmaqidx >= 0) 679 while (--rdmaqidx >= 0)
674 free_irq(adap->msix_info[--msi].vec, 680 free_irq(adap->msix_info[--msi_index].vec,
675 &s->rdmarxq[rdmaqidx].rspq); 681 &s->rdmarxq[rdmaqidx].rspq);
676 while (--ofldqidx >= 0) 682 while (--ofldqidx >= 0)
677 free_irq(adap->msix_info[--msi].vec, 683 free_irq(adap->msix_info[--msi_index].vec,
678 &s->ofldrxq[ofldqidx].rspq); 684 &s->ofldrxq[ofldqidx].rspq);
679 while (--ethqidx >= 0) 685 while (--ethqidx >= 0)
680 free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq); 686 free_irq(adap->msix_info[--msi_index].vec,
687 &s->ethrxq[ethqidx].rspq);
681 free_irq(adap->msix_info[1].vec, &s->fw_evtq); 688 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
682 return err; 689 return err;
683} 690}
684 691
685static void free_msix_queue_irqs(struct adapter *adap) 692static void free_msix_queue_irqs(struct adapter *adap)
686{ 693{
687 int i, msi = 2; 694 int i, msi_index = 2;
688 struct sge *s = &adap->sge; 695 struct sge *s = &adap->sge;
689 696
690 free_irq(adap->msix_info[1].vec, &s->fw_evtq); 697 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
691 for_each_ethrxq(s, i) 698 for_each_ethrxq(s, i)
692 free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq); 699 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
693 for_each_ofldrxq(s, i) 700 for_each_ofldrxq(s, i)
694 free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq); 701 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
695 for_each_rdmarxq(s, i) 702 for_each_rdmarxq(s, i)
696 free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq); 703 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
697} 704}
698 705
699/** 706/**
@@ -2535,9 +2542,8 @@ static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
2535 2542
2536 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8); 2543 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
2537 if (!ret) { 2544 if (!ret) {
2538 indices = be64_to_cpu(indices); 2545 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
2539 *cidx = (indices >> 25) & 0xffff; 2546 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
2540 *pidx = (indices >> 9) & 0xffff;
2541 } 2547 }
2542 return ret; 2548 return ret;
2543} 2549}
@@ -3410,16 +3416,6 @@ static int adap_init0_config(struct adapter *adapter, int reset)
3410 finicsum, cfcsum); 3416 finicsum, cfcsum);
3411 3417
3412 /* 3418 /*
3413 * If we're a pure NIC driver then disable all offloading facilities.
3414 * This will allow the firmware to optimize aspects of the hardware
3415 * configuration which will result in improved performance.
3416 */
3417 caps_cmd.ofldcaps = 0;
3418 caps_cmd.iscsicaps = 0;
3419 caps_cmd.rdmacaps = 0;
3420 caps_cmd.fcoecaps = 0;
3421
3422 /*
3423 * And now tell the firmware to use the configuration we just loaded. 3419 * And now tell the firmware to use the configuration we just loaded.
3424 */ 3420 */
3425 caps_cmd.op_to_write = 3421 caps_cmd.op_to_write =
@@ -3507,18 +3503,6 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
3507 if (ret < 0) 3503 if (ret < 0)
3508 goto bye; 3504 goto bye;
3509 3505
3510#ifndef CONFIG_CHELSIO_T4_OFFLOAD
3511 /*
3512 * If we're a pure NIC driver then disable all offloading facilities.
3513 * This will allow the firmware to optimize aspects of the hardware
3514 * configuration which will result in improved performance.
3515 */
3516 caps_cmd.ofldcaps = 0;
3517 caps_cmd.iscsicaps = 0;
3518 caps_cmd.rdmacaps = 0;
3519 caps_cmd.fcoecaps = 0;
3520#endif
3521
3522 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { 3506 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
3523 if (!vf_acls) 3507 if (!vf_acls)
3524 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); 3508 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
@@ -3634,10 +3618,10 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
3634 * field selections will fit in the 36-bit budget. 3618 * field selections will fit in the 36-bit budget.
3635 */ 3619 */
3636 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) { 3620 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
3637 int i, bits = 0; 3621 int j, bits = 0;
3638 3622
3639 for (i = TP_VLAN_PRI_MAP_FIRST; i <= TP_VLAN_PRI_MAP_LAST; i++) 3623 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
3640 switch (tp_vlan_pri_map & (1 << i)) { 3624 switch (tp_vlan_pri_map & (1 << j)) {
3641 case 0: 3625 case 0:
3642 /* compressed filter field not enabled */ 3626 /* compressed filter field not enabled */
3643 break; 3627 break;
@@ -3739,6 +3723,7 @@ static int adap_init0(struct adapter *adap)
3739 u32 v, port_vec; 3723 u32 v, port_vec;
3740 enum dev_state state; 3724 enum dev_state state;
3741 u32 params[7], val[7]; 3725 u32 params[7], val[7];
3726 struct fw_caps_config_cmd caps_cmd;
3742 int reset = 1, j; 3727 int reset = 1, j;
3743 3728
3744 /* 3729 /*
@@ -3892,6 +3877,9 @@ static int adap_init0(struct adapter *adap)
3892 goto bye; 3877 goto bye;
3893 } 3878 }
3894 3879
3880 if (is_bypass_device(adap->pdev->device))
3881 adap->params.bypass = 1;
3882
3895 /* 3883 /*
3896 * Grab some of our basic fundamental operating parameters. 3884 * Grab some of our basic fundamental operating parameters.
3897 */ 3885 */
@@ -3934,13 +3922,12 @@ static int adap_init0(struct adapter *adap)
3934 adap->tids.aftid_end = val[1]; 3922 adap->tids.aftid_end = val[1];
3935 } 3923 }
3936 3924
3937#ifdef CONFIG_CHELSIO_T4_OFFLOAD
3938 /* 3925 /*
3939 * Get device capabilities so we can determine what resources we need 3926 * Get device capabilities so we can determine what resources we need
3940 * to manage. 3927 * to manage.
3941 */ 3928 */
3942 memset(&caps_cmd, 0, sizeof(caps_cmd)); 3929 memset(&caps_cmd, 0, sizeof(caps_cmd));
3943 caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3930 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3944 FW_CMD_REQUEST | FW_CMD_READ); 3931 FW_CMD_REQUEST | FW_CMD_READ);
3945 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd)); 3932 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
3946 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), 3933 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
@@ -3985,15 +3972,6 @@ static int adap_init0(struct adapter *adap)
3985 adap->vres.ddp.size = val[4] - val[3] + 1; 3972 adap->vres.ddp.size = val[4] - val[3] + 1;
3986 adap->params.ofldq_wr_cred = val[5]; 3973 adap->params.ofldq_wr_cred = val[5];
3987 3974
3988 params[0] = FW_PARAM_PFVF(ETHOFLD_START);
3989 params[1] = FW_PARAM_PFVF(ETHOFLD_END);
3990 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
3991 params, val);
3992 if ((val[0] != val[1]) && (ret >= 0)) {
3993 adap->tids.uotid_base = val[0];
3994 adap->tids.nuotids = val[1] - val[0] + 1;
3995 }
3996
3997 adap->params.offload = 1; 3975 adap->params.offload = 1;
3998 } 3976 }
3999 if (caps_cmd.rdmacaps) { 3977 if (caps_cmd.rdmacaps) {
@@ -4042,7 +4020,6 @@ static int adap_init0(struct adapter *adap)
4042 } 4020 }
4043#undef FW_PARAM_PFVF 4021#undef FW_PARAM_PFVF
4044#undef FW_PARAM_DEV 4022#undef FW_PARAM_DEV
4045#endif /* CONFIG_CHELSIO_T4_OFFLOAD */
4046 4023
4047 /* 4024 /*
4048 * These are finalized by FW initialization, load their values now. 4025 * These are finalized by FW initialization, load their values now.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 1b899fea1a91..39bec73ff87c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -102,6 +102,9 @@ struct tid_info {
102 unsigned int ftid_base; 102 unsigned int ftid_base;
103 unsigned int aftid_base; 103 unsigned int aftid_base;
104 unsigned int aftid_end; 104 unsigned int aftid_end;
105 /* Server filter region */
106 unsigned int sftid_base;
107 unsigned int nsftids;
105 108
106 spinlock_t atid_lock ____cacheline_aligned_in_smp; 109 spinlock_t atid_lock ____cacheline_aligned_in_smp;
107 union aopen_entry *afree; 110 union aopen_entry *afree;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 137a24438d9c..32eec15fe4c2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -380,9 +380,11 @@ static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
380 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */ 380 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
381 for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) { 381 for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) {
382 if (dir) 382 if (dir)
383 *data++ = t4_read_reg(adap, (MEMWIN0_BASE + i)); 383 *data++ = (__force __be32) t4_read_reg(adap,
384 (MEMWIN0_BASE + i));
384 else 385 else
385 t4_write_reg(adap, (MEMWIN0_BASE + i), *data++); 386 t4_write_reg(adap, (MEMWIN0_BASE + i),
387 (__force u32) *data++);
386 } 388 }
387 389
388 return 0; 390 return 0;
@@ -417,7 +419,7 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
417 if ((addr & 0x3) || (len & 0x3)) 419 if ((addr & 0x3) || (len & 0x3))
418 return -EINVAL; 420 return -EINVAL;
419 421
420 data = vmalloc(MEMWIN0_APERTURE/sizeof(__be32)); 422 data = vmalloc(MEMWIN0_APERTURE);
421 if (!data) 423 if (!data)
422 return -ENOMEM; 424 return -ENOMEM;
423 425
@@ -744,7 +746,7 @@ static int t4_read_flash(struct adapter *adapter, unsigned int addr,
744 if (ret) 746 if (ret)
745 return ret; 747 return ret;
746 if (byte_oriented) 748 if (byte_oriented)
747 *data = htonl(*data); 749 *data = (__force __u32) (htonl(*data));
748 } 750 }
749 return 0; 751 return 0;
750} 752}
@@ -992,7 +994,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
992 int ret, addr; 994 int ret, addr;
993 unsigned int i; 995 unsigned int i;
994 u8 first_page[SF_PAGE_SIZE]; 996 u8 first_page[SF_PAGE_SIZE];
995 const u32 *p = (const u32 *)fw_data; 997 const __be32 *p = (const __be32 *)fw_data;
996 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 998 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
997 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 999 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
998 unsigned int fw_img_start = adap->params.sf_fw_start; 1000 unsigned int fw_img_start = adap->params.sf_fw_start;
@@ -2315,7 +2317,8 @@ int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
2315 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); 2317 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
2316 2318
2317 for (i = 0; i < len; i += 4) 2319 for (i = 0; i < len; i += 4)
2318 *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i)); 2320 *data++ = (__force __be32) t4_read_reg(adap,
2321 (MEMWIN0_BASE + off + i));
2319 2322
2320 return 0; 2323 return 0;
2321} 2324}
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 4d6fe604fa64..d23755ea9bc7 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -446,13 +446,17 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
446 /* Allocate Tx/Rx descriptor memory */ 446 /* Allocate Tx/Rx descriptor memory */
447 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * 447 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
448 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); 448 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
449 if (!db->desc_pool_ptr) 449 if (!db->desc_pool_ptr) {
450 err = -ENOMEM;
450 goto err_out_res; 451 goto err_out_res;
452 }
451 453
452 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * 454 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
453 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); 455 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
454 if (!db->buf_pool_ptr) 456 if (!db->buf_pool_ptr) {
457 err = -ENOMEM;
455 goto err_out_free_desc; 458 goto err_out_free_desc;
459 }
456 460
457 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; 461 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
458 db->first_tx_desc_dma = db->desc_pool_dma_ptr; 462 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
@@ -462,8 +466,10 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
462 db->chip_id = ent->driver_data; 466 db->chip_id = ent->driver_data;
463 /* IO type range. */ 467 /* IO type range. */
464 db->ioaddr = pci_iomap(pdev, 0, 0); 468 db->ioaddr = pci_iomap(pdev, 0, 0);
465 if (!db->ioaddr) 469 if (!db->ioaddr) {
470 err = -ENOMEM;
466 goto err_out_free_buf; 471 goto err_out_free_buf;
472 }
467 473
468 db->chip_revision = pdev->revision; 474 db->chip_revision = pdev->revision;
469 db->wol_mode = 0; 475 db->wol_mode = 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index eb3f2cb3b93b..d1b6cc587639 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2129,8 +2129,11 @@ void be_detect_error(struct be_adapter *adapter)
2129 ue_hi = (ue_hi & ~ue_hi_mask); 2129 ue_hi = (ue_hi & ~ue_hi_mask);
2130 } 2130 }
2131 2131
2132 if (ue_lo || ue_hi || 2132 /* On certain platforms BE hardware can indicate spurious UEs.
2133 sliport_status & SLIPORT_STATUS_ERR_MASK) { 2133 * Allow the h/w to stop working completely in case of a real UE.
2134 * Hence not setting the hw_error for UE detection.
2135 */
2136 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2134 adapter->hw_error = true; 2137 adapter->hw_error = true;
2135 dev_err(&adapter->pdev->dev, 2138 dev_err(&adapter->pdev->dev,
2136 "Error detected in the card\n"); 2139 "Error detected in the card\n");
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index a1b52ec3b930..1d03dcdd5e56 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1765,7 +1765,6 @@ static void free_skb_resources(struct gfar_private *priv)
1765 sizeof(struct rxbd8) * priv->total_rx_ring_size, 1765 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1766 priv->tx_queue[0]->tx_bd_base, 1766 priv->tx_queue[0]->tx_bd_base,
1767 priv->tx_queue[0]->tx_bd_dma_base); 1767 priv->tx_queue[0]->tx_bd_dma_base);
1768 skb_queue_purge(&priv->rx_recycle);
1769} 1768}
1770 1769
1771void gfar_start(struct net_device *dev) 1770void gfar_start(struct net_device *dev)
@@ -1943,8 +1942,6 @@ static int gfar_enet_open(struct net_device *dev)
1943 1942
1944 enable_napi(priv); 1943 enable_napi(priv);
1945 1944
1946 skb_queue_head_init(&priv->rx_recycle);
1947
1948 /* Initialize a bunch of registers */ 1945 /* Initialize a bunch of registers */
1949 init_registers(dev); 1946 init_registers(dev);
1950 1947
@@ -2533,16 +2530,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2533 2530
2534 bytes_sent += skb->len; 2531 bytes_sent += skb->len;
2535 2532
2536 /* If there's room in the queue (limit it to rx_buffer_size) 2533 dev_kfree_skb_any(skb);
2537 * we add this skb back into the pool, if it's the right size
2538 */
2539 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
2540 skb_recycle_check(skb, priv->rx_buffer_size +
2541 RXBUF_ALIGNMENT)) {
2542 gfar_align_skb(skb);
2543 skb_queue_head(&priv->rx_recycle, skb);
2544 } else
2545 dev_kfree_skb_any(skb);
2546 2534
2547 tx_queue->tx_skbuff[skb_dirtytx] = NULL; 2535 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2548 2536
@@ -2608,7 +2596,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2608static struct sk_buff *gfar_alloc_skb(struct net_device *dev) 2596static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2609{ 2597{
2610 struct gfar_private *priv = netdev_priv(dev); 2598 struct gfar_private *priv = netdev_priv(dev);
2611 struct sk_buff *skb = NULL; 2599 struct sk_buff *skb;
2612 2600
2613 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); 2601 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2614 if (!skb) 2602 if (!skb)
@@ -2621,14 +2609,7 @@ static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2621 2609
2622struct sk_buff *gfar_new_skb(struct net_device *dev) 2610struct sk_buff *gfar_new_skb(struct net_device *dev)
2623{ 2611{
2624 struct gfar_private *priv = netdev_priv(dev); 2612 return gfar_alloc_skb(dev);
2625 struct sk_buff *skb = NULL;
2626
2627 skb = skb_dequeue(&priv->rx_recycle);
2628 if (!skb)
2629 skb = gfar_alloc_skb(dev);
2630
2631 return skb;
2632} 2613}
2633 2614
2634static inline void count_errors(unsigned short status, struct net_device *dev) 2615static inline void count_errors(unsigned short status, struct net_device *dev)
@@ -2787,7 +2768,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2787 if (unlikely(!newskb)) 2768 if (unlikely(!newskb))
2788 newskb = skb; 2769 newskb = skb;
2789 else if (skb) 2770 else if (skb)
2790 skb_queue_head(&priv->rx_recycle, skb); 2771 dev_kfree_skb(skb);
2791 } else { 2772 } else {
2792 /* Increment the number of packets */ 2773 /* Increment the number of packets */
2793 rx_queue->stats.rx_packets++; 2774 rx_queue->stats.rx_packets++;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 4141ef2ddafc..22eabc13ca99 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1080,8 +1080,6 @@ struct gfar_private {
1080 1080
1081 u32 cur_filer_idx; 1081 u32 cur_filer_idx;
1082 1082
1083 struct sk_buff_head rx_recycle;
1084
1085 /* RX queue filer rule set*/ 1083 /* RX queue filer rule set*/
1086 struct ethtool_rx_list rx_list; 1084 struct ethtool_rx_list rx_list;
1087 struct mutex rx_queue_access; 1085 struct mutex rx_queue_access;
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index b9db0e040563..2e5daee0438a 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -478,7 +478,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
478 pr_err("no resource\n"); 478 pr_err("no resource\n");
479 goto no_resource; 479 goto no_resource;
480 } 480 }
481 if (request_resource(&ioport_resource, etsects->rsrc)) { 481 if (request_resource(&iomem_resource, etsects->rsrc)) {
482 pr_err("resource busy\n"); 482 pr_err("resource busy\n");
483 goto no_resource; 483 goto no_resource;
484 } 484 }
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 164288439220..0a70bb55d1b0 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -209,14 +209,12 @@ static struct list_head *dequeue(struct list_head *lh)
209static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, 209static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
210 u8 __iomem *bd) 210 u8 __iomem *bd)
211{ 211{
212 struct sk_buff *skb = NULL; 212 struct sk_buff *skb;
213 213
214 skb = __skb_dequeue(&ugeth->rx_recycle); 214 skb = netdev_alloc_skb(ugeth->ndev,
215 ugeth->ug_info->uf_info.max_rx_buf_length +
216 UCC_GETH_RX_DATA_BUF_ALIGNMENT);
215 if (!skb) 217 if (!skb)
216 skb = netdev_alloc_skb(ugeth->ndev,
217 ugeth->ug_info->uf_info.max_rx_buf_length +
218 UCC_GETH_RX_DATA_BUF_ALIGNMENT);
219 if (skb == NULL)
220 return NULL; 218 return NULL;
221 219
222 /* We need the data buffer to be aligned properly. We will reserve 220 /* We need the data buffer to be aligned properly. We will reserve
@@ -2020,8 +2018,6 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
2020 iounmap(ugeth->ug_regs); 2018 iounmap(ugeth->ug_regs);
2021 ugeth->ug_regs = NULL; 2019 ugeth->ug_regs = NULL;
2022 } 2020 }
2023
2024 skb_queue_purge(&ugeth->rx_recycle);
2025} 2021}
2026 2022
2027static void ucc_geth_set_multi(struct net_device *dev) 2023static void ucc_geth_set_multi(struct net_device *dev)
@@ -2230,8 +2226,6 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2230 return -ENOMEM; 2226 return -ENOMEM;
2231 } 2227 }
2232 2228
2233 skb_queue_head_init(&ugeth->rx_recycle);
2234
2235 return 0; 2229 return 0;
2236} 2230}
2237 2231
@@ -3274,12 +3268,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3274 if (netif_msg_rx_err(ugeth)) 3268 if (netif_msg_rx_err(ugeth))
3275 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", 3269 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
3276 __func__, __LINE__, (u32) skb); 3270 __func__, __LINE__, (u32) skb);
3277 if (skb) { 3271 dev_kfree_skb(skb);
3278 skb->data = skb->head + NET_SKB_PAD;
3279 skb->len = 0;
3280 skb_reset_tail_pointer(skb);
3281 __skb_queue_head(&ugeth->rx_recycle, skb);
3282 }
3283 3272
3284 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; 3273 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
3285 dev->stats.rx_dropped++; 3274 dev->stats.rx_dropped++;
@@ -3349,13 +3338,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3349 3338
3350 dev->stats.tx_packets++; 3339 dev->stats.tx_packets++;
3351 3340
3352 if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && 3341 dev_kfree_skb(skb);
3353 skb_recycle_check(skb,
3354 ugeth->ug_info->uf_info.max_rx_buf_length +
3355 UCC_GETH_RX_DATA_BUF_ALIGNMENT))
3356 __skb_queue_head(&ugeth->rx_recycle, skb);
3357 else
3358 dev_kfree_skb(skb);
3359 3342
3360 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; 3343 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
3361 ugeth->skb_dirtytx[txQ] = 3344 ugeth->skb_dirtytx[txQ] =
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index f71b3e7b12de..75f337163ce3 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -1214,8 +1214,6 @@ struct ucc_geth_private {
1214 /* index of the first skb which hasn't been transmitted yet. */ 1214 /* index of the first skb which hasn't been transmitted yet. */
1215 u16 skb_dirtytx[NUM_TX_QUEUES]; 1215 u16 skb_dirtytx[NUM_TX_QUEUES];
1216 1216
1217 struct sk_buff_head rx_recycle;
1218
1219 struct ugeth_mii_info *mii_info; 1217 struct ugeth_mii_info *mii_info;
1220 struct phy_device *phydev; 1218 struct phy_device *phydev;
1221 phy_interface_t phy_interface; 1219 phy_interface_t phy_interface;
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index cb3356c9af80..04668b47a1df 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -175,13 +175,13 @@ struct e1000_info;
175/* 175/*
176 * in the case of WTHRESH, it appears at least the 82571/2 hardware 176 * in the case of WTHRESH, it appears at least the 82571/2 hardware
177 * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when 177 * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
178 * WTHRESH=4, and since we want 64 bytes at a time written back, set 178 * WTHRESH=4, so a setting of 5 gives the most efficient bus
179 * it to 5 179 * utilization but to avoid possible Tx stalls, set it to 1
180 */ 180 */
181#define E1000_TXDCTL_DMA_BURST_ENABLE \ 181#define E1000_TXDCTL_DMA_BURST_ENABLE \
182 (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \ 182 (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
183 E1000_TXDCTL_COUNT_DESC | \ 183 E1000_TXDCTL_COUNT_DESC | \
184 (5 << 16) | /* wthresh must be +1 more than desired */\ 184 (1 << 16) | /* wthresh must be +1 more than desired */\
185 (1 << 8) | /* hthresh */ \ 185 (1 << 8) | /* hthresh */ \
186 0x1f) /* pthresh */ 186 0x1f) /* pthresh */
187 187
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index ed5b40985edb..d37bfd96c987 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -412,6 +412,8 @@ enum e1e_registers {
412#define E1000_DEV_ID_PCH2_LV_V 0x1503 412#define E1000_DEV_ID_PCH2_LV_V 0x1503
413#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A 413#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A
414#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B 414#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B
415#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A
416#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559
415 417
416#define E1000_REVISION_4 4 418#define E1000_REVISION_4 4
417 419
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index fb659dd8db03..f444eb0b76d8 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2831,7 +2831,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2831 * set up some performance related parameters to encourage the 2831 * set up some performance related parameters to encourage the
2832 * hardware to use the bus more efficiently in bursts, depends 2832 * hardware to use the bus more efficiently in bursts, depends
2833 * on the tx_int_delay to be enabled, 2833 * on the tx_int_delay to be enabled,
2834 * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time 2834 * wthresh = 1 ==> burst write is disabled to avoid Tx stalls
2835 * hthresh = 1 ==> prefetch when one or more available 2835 * hthresh = 1 ==> prefetch when one or more available
2836 * pthresh = 0x1f ==> prefetch if internal cache 31 or less 2836 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
2837 * BEWARE: this seems to work but should be considered first if 2837 * BEWARE: this seems to work but should be considered first if
@@ -6558,6 +6558,8 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
6558 6558
6559 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt }, 6559 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
6560 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt }, 6560 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
6561 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
6562 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
6561 6563
6562 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ 6564 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
6563}; 6565};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 5bd26763554c..30efc9f0f47a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -410,7 +410,7 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
410#define IXGBE_TX_CTXTDESC(R, i) \ 410#define IXGBE_TX_CTXTDESC(R, i) \
411 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) 411 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
412 412
413#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 413#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */
414#ifdef IXGBE_FCOE 414#ifdef IXGBE_FCOE
415/* Use 3K as the baby jumbo frame size for FCoE */ 415/* Use 3K as the baby jumbo frame size for FCoE */
416#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072 416#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 56b20d17d0e4..116f0e901bee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2673,6 +2673,9 @@ static int ixgbe_get_ts_info(struct net_device *dev,
2673 case ixgbe_mac_X540: 2673 case ixgbe_mac_X540:
2674 case ixgbe_mac_82599EB: 2674 case ixgbe_mac_82599EB:
2675 info->so_timestamping = 2675 info->so_timestamping =
2676 SOF_TIMESTAMPING_TX_SOFTWARE |
2677 SOF_TIMESTAMPING_RX_SOFTWARE |
2678 SOF_TIMESTAMPING_SOFTWARE |
2676 SOF_TIMESTAMPING_TX_HARDWARE | 2679 SOF_TIMESTAMPING_TX_HARDWARE |
2677 SOF_TIMESTAMPING_RX_HARDWARE | 2680 SOF_TIMESTAMPING_RX_HARDWARE |
2678 SOF_TIMESTAMPING_RAW_HARDWARE; 2681 SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 383b4e1cd175..4a9c9c285685 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -175,7 +175,7 @@ struct ixgbevf_q_vector {
175#define IXGBEVF_TX_CTXTDESC(R, i) \ 175#define IXGBEVF_TX_CTXTDESC(R, i) \
176 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) 176 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
177 177
178#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 178#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */
179 179
180#define OTHER_VECTOR 1 180#define OTHER_VECTOR 1
181#define NON_Q_VECTORS (OTHER_VECTOR) 181#define NON_Q_VECTORS (OTHER_VECTOR)
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 0ee9bd4819f4..de1ad506665d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1747,6 +1747,7 @@ err_tx_ring_allocation:
1747 **/ 1747 **/
1748static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) 1748static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1749{ 1749{
1750 struct net_device *netdev = adapter->netdev;
1750 int err = 0; 1751 int err = 0;
1751 int vector, v_budget; 1752 int vector, v_budget;
1752 1753
@@ -1775,6 +1776,12 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1775 1776
1776 ixgbevf_acquire_msix_vectors(adapter, v_budget); 1777 ixgbevf_acquire_msix_vectors(adapter, v_budget);
1777 1778
1779 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
1780 if (err)
1781 goto out;
1782
1783 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
1784
1778out: 1785out:
1779 return err; 1786 return err;
1780} 1787}
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index c911d883c27e..f8064df10cc4 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -27,6 +27,7 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/pci.h> 29#include <linux/pci.h>
30#include <linux/pci-aspm.h>
30#include <linux/netdevice.h> 31#include <linux/netdevice.h>
31#include <linux/etherdevice.h> 32#include <linux/etherdevice.h>
32#include <linux/ethtool.h> 33#include <linux/ethtool.h>
@@ -2973,6 +2974,9 @@ jme_init_one(struct pci_dev *pdev,
2973 /* 2974 /*
2974 * set up PCI device basics 2975 * set up PCI device basics
2975 */ 2976 */
2977 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
2978 PCIE_LINK_STATE_CLKPM);
2979
2976 rc = pci_enable_device(pdev); 2980 rc = pci_enable_device(pdev);
2977 if (rc) { 2981 if (rc) {
2978 pr_err("Cannot enable PCI device\n"); 2982 pr_err("Cannot enable PCI device\n");
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 087b9e0669f1..84c13263c514 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -412,7 +412,6 @@ struct mv643xx_eth_private {
412 u8 work_rx_refill; 412 u8 work_rx_refill;
413 413
414 int skb_size; 414 int skb_size;
415 struct sk_buff_head rx_recycle;
416 415
417 /* 416 /*
418 * RX state. 417 * RX state.
@@ -673,9 +672,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
673 struct rx_desc *rx_desc; 672 struct rx_desc *rx_desc;
674 int size; 673 int size;
675 674
676 skb = __skb_dequeue(&mp->rx_recycle); 675 skb = netdev_alloc_skb(mp->dev, mp->skb_size);
677 if (skb == NULL)
678 skb = netdev_alloc_skb(mp->dev, mp->skb_size);
679 676
680 if (skb == NULL) { 677 if (skb == NULL) {
681 mp->oom = 1; 678 mp->oom = 1;
@@ -989,14 +986,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
989 desc->byte_cnt, DMA_TO_DEVICE); 986 desc->byte_cnt, DMA_TO_DEVICE);
990 } 987 }
991 988
992 if (skb != NULL) { 989 dev_kfree_skb(skb);
993 if (skb_queue_len(&mp->rx_recycle) <
994 mp->rx_ring_size &&
995 skb_recycle_check(skb, mp->skb_size))
996 __skb_queue_head(&mp->rx_recycle, skb);
997 else
998 dev_kfree_skb(skb);
999 }
1000 } 990 }
1001 991
1002 __netif_tx_unlock(nq); 992 __netif_tx_unlock(nq);
@@ -2349,8 +2339,6 @@ static int mv643xx_eth_open(struct net_device *dev)
2349 2339
2350 napi_enable(&mp->napi); 2340 napi_enable(&mp->napi);
2351 2341
2352 skb_queue_head_init(&mp->rx_recycle);
2353
2354 mp->int_mask = INT_EXT; 2342 mp->int_mask = INT_EXT;
2355 2343
2356 for (i = 0; i < mp->rxq_count; i++) { 2344 for (i = 0; i < mp->rxq_count; i++) {
@@ -2445,8 +2433,6 @@ static int mv643xx_eth_stop(struct net_device *dev)
2445 mib_counters_update(mp); 2433 mib_counters_update(mp);
2446 del_timer_sync(&mp->mib_counters_timer); 2434 del_timer_sync(&mp->mib_counters_timer);
2447 2435
2448 skb_queue_purge(&mp->rx_recycle);
2449
2450 for (i = 0; i < mp->rxq_count; i++) 2436 for (i = 0; i < mp->rxq_count; i++)
2451 rxq_deinit(mp->rxq + i); 2437 rxq_deinit(mp->rxq + i);
2452 for (i = 0; i < mp->txq_count; i++) 2438 for (i = 0; i < mp->txq_count; i++)
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 5a30bf823099..9b9c2ac5c4c2 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3189,7 +3189,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
3189 if (work_done < to_do) { 3189 if (work_done < to_do) {
3190 unsigned long flags; 3190 unsigned long flags;
3191 3191
3192 napi_gro_flush(napi); 3192 napi_gro_flush(napi, false);
3193 spin_lock_irqsave(&hw->hw_lock, flags); 3193 spin_lock_irqsave(&hw->hw_lock, flags);
3194 __napi_complete(napi); 3194 __napi_complete(napi);
3195 hw->intr_mask |= napimask[skge->port]; 3195 hw->intr_mask |= napimask[skge->port];
@@ -3945,8 +3945,10 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3945 skge_board_name(hw), hw->chip_rev); 3945 skge_board_name(hw), hw->chip_rev);
3946 3946
3947 dev = skge_devinit(hw, 0, using_dac); 3947 dev = skge_devinit(hw, 0, using_dac);
3948 if (!dev) 3948 if (!dev) {
3949 err = -ENOMEM;
3949 goto err_out_led_off; 3950 goto err_out_led_off;
3951 }
3950 3952
3951 /* Some motherboards are broken and has zero in ROM. */ 3953 /* Some motherboards are broken and has zero in ROM. */
3952 if (!is_valid_ether_addr(dev->dev_addr)) 3954 if (!is_valid_ether_addr(dev->dev_addr))
@@ -4153,6 +4155,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = {
4153 DMI_MATCH(DMI_BOARD_NAME, "nForce"), 4155 DMI_MATCH(DMI_BOARD_NAME, "nForce"),
4154 }, 4156 },
4155 }, 4157 },
4158 {
4159 .ident = "ASUS P5NSLI",
4160 .matches = {
4161 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
4162 DMI_MATCH(DMI_BOARD_NAME, "P5NSLI")
4163 },
4164 },
4156 {} 4165 {}
4157}; 4166};
4158 4167
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 2b0748dba8b8..78946feab4a2 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4924,6 +4924,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4924 4924
4925 if (~reg == 0) { 4925 if (~reg == 0) {
4926 dev_err(&pdev->dev, "PCI configuration read error\n"); 4926 dev_err(&pdev->dev, "PCI configuration read error\n");
4927 err = -EIO;
4927 goto err_out; 4928 goto err_out;
4928 } 4929 }
4929 4930
@@ -4993,8 +4994,10 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4993 hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING); 4994 hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING);
4994 hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), 4995 hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
4995 &hw->st_dma); 4996 &hw->st_dma);
4996 if (!hw->st_le) 4997 if (!hw->st_le) {
4998 err = -ENOMEM;
4997 goto err_out_reset; 4999 goto err_out_reset;
5000 }
4998 5001
4999 dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n", 5002 dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n",
5000 sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev); 5003 sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c10e3a6de09f..b35094c590ba 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -143,7 +143,6 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
143 mlx4_bf_free(mdev->dev, &ring->bf); 143 mlx4_bf_free(mdev->dev, &ring->bf);
144 mlx4_qp_remove(mdev->dev, &ring->qp); 144 mlx4_qp_remove(mdev->dev, &ring->qp);
145 mlx4_qp_free(mdev->dev, &ring->qp); 145 mlx4_qp_free(mdev->dev, &ring->qp);
146 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
147 mlx4_en_unmap_buffer(&ring->wqres.buf); 146 mlx4_en_unmap_buffer(&ring->wqres.buf);
148 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 147 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
149 kfree(ring->bounce_buf); 148 kfree(ring->bounce_buf);
@@ -712,7 +711,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
712 if (bounce) 711 if (bounce)
713 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); 712 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
714 713
715 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { 714 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) {
716 *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); 715 *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
717 op_own |= htonl((bf_index & 0xffff) << 8); 716 op_own |= htonl((bf_index & 0xffff) << 8);
718 /* Ensure new descirptor hits memory 717 /* Ensure new descirptor hits memory
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 51c764901ad2..b84a88bc44dc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -329,9 +329,6 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
329 ctx = &priv->mfunc.master.slave_state[slave]; 329 ctx = &priv->mfunc.master.slave_state[slave];
330 spin_lock_irqsave(&ctx->lock, flags); 330 spin_lock_irqsave(&ctx->lock, flags);
331 331
332 mlx4_dbg(dev, "%s: slave: %d, current state: %d new event :%d\n",
333 __func__, slave, cur_state, event);
334
335 switch (cur_state) { 332 switch (cur_state) {
336 case SLAVE_PORT_DOWN: 333 case SLAVE_PORT_DOWN:
337 if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event) 334 if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event)
@@ -366,9 +363,6 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
366 goto out; 363 goto out;
367 } 364 }
368 ret = mlx4_get_slave_port_state(dev, slave, port); 365 ret = mlx4_get_slave_port_state(dev, slave, port);
369 mlx4_dbg(dev, "%s: slave: %d, current state: %d new event"
370 " :%d gen_event: %d\n",
371 __func__, slave, cur_state, event, *gen_event);
372 366
373out: 367out:
374 spin_unlock_irqrestore(&ctx->lock, flags); 368 spin_unlock_irqrestore(&ctx->lock, flags);
@@ -843,6 +837,18 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
843 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); 837 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
844} 838}
845 839
840static void mlx4_unmap_uar(struct mlx4_dev *dev)
841{
842 struct mlx4_priv *priv = mlx4_priv(dev);
843 int i;
844
845 for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
846 if (priv->eq_table.uar_map[i]) {
847 iounmap(priv->eq_table.uar_map[i]);
848 priv->eq_table.uar_map[i] = NULL;
849 }
850}
851
846static int mlx4_create_eq(struct mlx4_dev *dev, int nent, 852static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
847 u8 intr, struct mlx4_eq *eq) 853 u8 intr, struct mlx4_eq *eq)
848{ 854{
@@ -1207,6 +1213,7 @@ err_out_unmap:
1207 mlx4_free_irqs(dev); 1213 mlx4_free_irqs(dev);
1208 1214
1209err_out_bitmap: 1215err_out_bitmap:
1216 mlx4_unmap_uar(dev);
1210 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 1217 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
1211 1218
1212err_out_free: 1219err_out_free:
@@ -1231,10 +1238,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
1231 if (!mlx4_is_slave(dev)) 1238 if (!mlx4_is_slave(dev))
1232 mlx4_unmap_clr_int(dev); 1239 mlx4_unmap_clr_int(dev);
1233 1240
1234 for (i = 0; i < mlx4_num_eq_uar(dev); ++i) 1241 mlx4_unmap_uar(dev);
1235 if (priv->eq_table.uar_map[i])
1236 iounmap(priv->eq_table.uar_map[i]);
1237
1238 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 1242 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
1239 1243
1240 kfree(priv->eq_table.uar_map); 1244 kfree(priv->eq_table.uar_map);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 80df2ab0177c..2aa80afd98d2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1405,7 +1405,10 @@ unmap_bf:
1405 unmap_bf_area(dev); 1405 unmap_bf_area(dev);
1406 1406
1407err_close: 1407err_close:
1408 mlx4_close_hca(dev); 1408 if (mlx4_is_slave(dev))
1409 mlx4_slave_exit(dev);
1410 else
1411 mlx4_CLOSE_HCA(dev, 0);
1409 1412
1410err_free_icm: 1413err_free_icm:
1411 if (!mlx4_is_slave(dev)) 1414 if (!mlx4_is_slave(dev))
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 926c911c0ac4..b05705f50f0f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -330,9 +330,6 @@ static void update_pkey_index(struct mlx4_dev *dev, int slave,
330 330
331 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index]; 331 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
332 *(u8 *)(inbox->buf + 35) = new_index; 332 *(u8 *)(inbox->buf + 35) = new_index;
333
334 mlx4_dbg(dev, "port = %d, orig pkey index = %d, "
335 "new pkey index = %d\n", port, orig_index, new_index);
336} 333}
337 334
338static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, 335static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
@@ -351,9 +348,6 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
351 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) 348 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
352 qp_ctx->alt_path.mgid_index = slave & 0x7F; 349 qp_ctx->alt_path.mgid_index = slave & 0x7F;
353 } 350 }
354
355 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
356 slave, qp_ctx->pri_path.mgid_index);
357} 351}
358 352
359static int mpt_mask(struct mlx4_dev *dev) 353static int mpt_mask(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 5b61d12f8b91..dbaaa99a0d43 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -947,8 +947,8 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
947 i = register_netdev(dev); 947 i = register_netdev(dev);
948 if (i) 948 if (i)
949 goto err_register_netdev; 949 goto err_register_netdev;
950 950 i = NATSEMI_CREATE_FILE(pdev, dspcfg_workaround);
951 if (NATSEMI_CREATE_FILE(pdev, dspcfg_workaround)) 951 if (i)
952 goto err_create_file; 952 goto err_create_file;
953 953
954 if (netif_msg_drv(np)) { 954 if (netif_msg_drv(np)) {
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index e01c0a07a93a..7dfe88398d7d 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -205,6 +205,7 @@ static int __init sonic_probe1(struct net_device *dev)
205 if (lp->descriptors == NULL) { 205 if (lp->descriptors == NULL) {
206 printk(KERN_ERR "%s: couldn't alloc DMA memory for " 206 printk(KERN_ERR "%s: couldn't alloc DMA memory for "
207 " descriptors.\n", dev_name(lp->device)); 207 " descriptors.\n", dev_name(lp->device));
208 err = -ENOMEM;
208 goto out; 209 goto out;
209 } 210 }
210 211
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 53743f7a2ca9..af8b4142088c 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1524,6 +1524,7 @@ static int lpc_eth_drv_remove(struct platform_device *pdev)
1524 pldat->dma_buff_base_p); 1524 pldat->dma_buff_base_p);
1525 free_irq(ndev->irq, ndev); 1525 free_irq(ndev->irq, ndev);
1526 iounmap(pldat->net_base); 1526 iounmap(pldat->net_base);
1527 mdiobus_unregister(pldat->mii_bus);
1527 mdiobus_free(pldat->mii_bus); 1528 mdiobus_free(pldat->mii_bus);
1528 clk_disable(pldat->clk); 1529 clk_disable(pldat->clk);
1529 clk_put(pldat->clk); 1530 clk_put(pldat->clk);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index 97302419a377..5296cc8d3cba 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -26,6 +26,9 @@ if PCH_GBE
26config PCH_PTP 26config PCH_PTP
27 bool "PCH PTP clock support" 27 bool "PCH PTP clock support"
28 default n 28 default n
29 depends on EXPERIMENTAL
30 select PPS
31 select PTP_1588_CLOCK
29 select PTP_1588_CLOCK_PCH 32 select PTP_1588_CLOCK_PCH
30 ---help--- 33 ---help---
31 Say Y here if you want to use Precision Time Protocol (PTP) in the 34 Say Y here if you want to use Precision Time Protocol (PTP) in the
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index b2a94d02a521..4c4fe5b1a29a 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -339,26 +339,6 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
339} 339}
340 340
341/** 341/**
342 * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
343 * @reg: Pointer of register
344 * @busy: Busy bit
345 */
346static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
347{
348 u32 tmp;
349 int ret = -1;
350 /* wait busy */
351 tmp = 20;
352 while ((ioread32(reg) & bit) && --tmp)
353 udelay(5);
354 if (!tmp)
355 pr_err("Error: busy bit is not cleared\n");
356 else
357 ret = 0;
358 return ret;
359}
360
361/**
362 * pch_gbe_mac_mar_set - Set MAC address register 342 * pch_gbe_mac_mar_set - Set MAC address register
363 * @hw: Pointer to the HW structure 343 * @hw: Pointer to the HW structure
364 * @addr: Pointer to the MAC address 344 * @addr: Pointer to the MAC address
@@ -409,15 +389,20 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
409 return; 389 return;
410} 390}
411 391
412static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw) 392static void pch_gbe_disable_mac_rx(struct pch_gbe_hw *hw)
413{ 393{
414 /* Read the MAC addresses. and store to the private data */ 394 u32 rctl;
415 pch_gbe_mac_read_mac_addr(hw); 395 /* Disables Receive MAC */
416 iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET); 396 rctl = ioread32(&hw->reg->MAC_RX_EN);
417 pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST); 397 iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
418 /* Setup the MAC addresses */ 398}
419 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); 399
420 return; 400static void pch_gbe_enable_mac_rx(struct pch_gbe_hw *hw)
401{
402 u32 rctl;
403 /* Enables Receive MAC */
404 rctl = ioread32(&hw->reg->MAC_RX_EN);
405 iowrite32((rctl | PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
421} 406}
422 407
423/** 408/**
@@ -913,7 +898,7 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
913static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter) 898static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
914{ 899{
915 struct pch_gbe_hw *hw = &adapter->hw; 900 struct pch_gbe_hw *hw = &adapter->hw;
916 u32 rdba, rdlen, rctl, rxdma; 901 u32 rdba, rdlen, rxdma;
917 902
918 pr_debug("dma adr = 0x%08llx size = 0x%08x\n", 903 pr_debug("dma adr = 0x%08llx size = 0x%08x\n",
919 (unsigned long long)adapter->rx_ring->dma, 904 (unsigned long long)adapter->rx_ring->dma,
@@ -921,9 +906,7 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
921 906
922 pch_gbe_mac_force_mac_fc(hw); 907 pch_gbe_mac_force_mac_fc(hw);
923 908
924 /* Disables Receive MAC */ 909 pch_gbe_disable_mac_rx(hw);
925 rctl = ioread32(&hw->reg->MAC_RX_EN);
926 iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
927 910
928 /* Disables Receive DMA */ 911 /* Disables Receive DMA */
929 rxdma = ioread32(&hw->reg->DMA_CTRL); 912 rxdma = ioread32(&hw->reg->DMA_CTRL);
@@ -1316,38 +1299,17 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1316 spin_unlock_irqrestore(&adapter->stats_lock, flags); 1299 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1317} 1300}
1318 1301
1319static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter) 1302static void pch_gbe_disable_dma_rx(struct pch_gbe_hw *hw)
1320{ 1303{
1321 struct pch_gbe_hw *hw = &adapter->hw;
1322 u32 rxdma; 1304 u32 rxdma;
1323 u16 value;
1324 int ret;
1325 1305
1326 /* Disable Receive DMA */ 1306 /* Disable Receive DMA */
1327 rxdma = ioread32(&hw->reg->DMA_CTRL); 1307 rxdma = ioread32(&hw->reg->DMA_CTRL);
1328 rxdma &= ~PCH_GBE_RX_DMA_EN; 1308 rxdma &= ~PCH_GBE_RX_DMA_EN;
1329 iowrite32(rxdma, &hw->reg->DMA_CTRL); 1309 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1330 /* Wait Rx DMA BUS is IDLE */
1331 ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
1332 if (ret) {
1333 /* Disable Bus master */
1334 pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
1335 value &= ~PCI_COMMAND_MASTER;
1336 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1337 /* Stop Receive */
1338 pch_gbe_mac_reset_rx(hw);
1339 /* Enable Bus master */
1340 value |= PCI_COMMAND_MASTER;
1341 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1342 } else {
1343 /* Stop Receive */
1344 pch_gbe_mac_reset_rx(hw);
1345 }
1346 /* reprogram multicast address register after reset */
1347 pch_gbe_set_multi(adapter->netdev);
1348} 1310}
1349 1311
1350static void pch_gbe_start_receive(struct pch_gbe_hw *hw) 1312static void pch_gbe_enable_dma_rx(struct pch_gbe_hw *hw)
1351{ 1313{
1352 u32 rxdma; 1314 u32 rxdma;
1353 1315
@@ -1355,9 +1317,6 @@ static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
1355 rxdma = ioread32(&hw->reg->DMA_CTRL); 1317 rxdma = ioread32(&hw->reg->DMA_CTRL);
1356 rxdma |= PCH_GBE_RX_DMA_EN; 1318 rxdma |= PCH_GBE_RX_DMA_EN;
1357 iowrite32(rxdma, &hw->reg->DMA_CTRL); 1319 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1358 /* Enables Receive */
1359 iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
1360 return;
1361} 1320}
1362 1321
1363/** 1322/**
@@ -1393,7 +1352,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
1393 int_en = ioread32(&hw->reg->INT_EN); 1352 int_en = ioread32(&hw->reg->INT_EN);
1394 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), 1353 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1395 &hw->reg->INT_EN); 1354 &hw->reg->INT_EN);
1396 pch_gbe_stop_receive(adapter); 1355 pch_gbe_disable_dma_rx(&adapter->hw);
1397 int_st |= ioread32(&hw->reg->INT_ST); 1356 int_st |= ioread32(&hw->reg->INT_ST);
1398 int_st = int_st & ioread32(&hw->reg->INT_EN); 1357 int_st = int_st & ioread32(&hw->reg->INT_EN);
1399 } 1358 }
@@ -1971,12 +1930,12 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1971 struct net_device *netdev = adapter->netdev; 1930 struct net_device *netdev = adapter->netdev;
1972 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; 1931 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1973 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; 1932 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1974 int err; 1933 int err = -EINVAL;
1975 1934
1976 /* Ensure we have a valid MAC */ 1935 /* Ensure we have a valid MAC */
1977 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 1936 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1978 pr_err("Error: Invalid MAC address\n"); 1937 pr_err("Error: Invalid MAC address\n");
1979 return -EINVAL; 1938 goto out;
1980 } 1939 }
1981 1940
1982 /* hardware has been reset, we need to reload some things */ 1941 /* hardware has been reset, we need to reload some things */
@@ -1989,18 +1948,19 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1989 1948
1990 err = pch_gbe_request_irq(adapter); 1949 err = pch_gbe_request_irq(adapter);
1991 if (err) { 1950 if (err) {
1992 pr_err("Error: can't bring device up\n"); 1951 pr_err("Error: can't bring device up - irq request failed\n");
1993 return err; 1952 goto out;
1994 } 1953 }
1995 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count); 1954 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1996 if (err) { 1955 if (err) {
1997 pr_err("Error: can't bring device up\n"); 1956 pr_err("Error: can't bring device up - alloc rx buffers pool failed\n");
1998 return err; 1957 goto freeirq;
1999 } 1958 }
2000 pch_gbe_alloc_tx_buffers(adapter, tx_ring); 1959 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
2001 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); 1960 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
2002 adapter->tx_queue_len = netdev->tx_queue_len; 1961 adapter->tx_queue_len = netdev->tx_queue_len;
2003 pch_gbe_start_receive(&adapter->hw); 1962 pch_gbe_enable_dma_rx(&adapter->hw);
1963 pch_gbe_enable_mac_rx(&adapter->hw);
2004 1964
2005 mod_timer(&adapter->watchdog_timer, jiffies); 1965 mod_timer(&adapter->watchdog_timer, jiffies);
2006 1966
@@ -2009,6 +1969,11 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
2009 netif_start_queue(adapter->netdev); 1969 netif_start_queue(adapter->netdev);
2010 1970
2011 return 0; 1971 return 0;
1972
1973freeirq:
1974 pch_gbe_free_irq(adapter);
1975out:
1976 return err;
2012} 1977}
2013 1978
2014/** 1979/**
@@ -2405,7 +2370,6 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2405 int work_done = 0; 2370 int work_done = 0;
2406 bool poll_end_flag = false; 2371 bool poll_end_flag = false;
2407 bool cleaned = false; 2372 bool cleaned = false;
2408 u32 int_en;
2409 2373
2410 pr_debug("budget : %d\n", budget); 2374 pr_debug("budget : %d\n", budget);
2411 2375
@@ -2422,19 +2386,13 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2422 2386
2423 if (poll_end_flag) { 2387 if (poll_end_flag) {
2424 napi_complete(napi); 2388 napi_complete(napi);
2425 if (adapter->rx_stop_flag) {
2426 adapter->rx_stop_flag = false;
2427 pch_gbe_start_receive(&adapter->hw);
2428 }
2429 pch_gbe_irq_enable(adapter); 2389 pch_gbe_irq_enable(adapter);
2430 } else 2390 }
2431 if (adapter->rx_stop_flag) { 2391
2432 adapter->rx_stop_flag = false; 2392 if (adapter->rx_stop_flag) {
2433 pch_gbe_start_receive(&adapter->hw); 2393 adapter->rx_stop_flag = false;
2434 int_en = ioread32(&adapter->hw.reg->INT_EN); 2394 pch_gbe_enable_dma_rx(&adapter->hw);
2435 iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), 2395 }
2436 &adapter->hw.reg->INT_EN);
2437 }
2438 2396
2439 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", 2397 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
2440 poll_end_flag, work_done, budget); 2398 poll_end_flag, work_done, budget);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index df09b1cb742f..6407d0d77e81 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -2525,6 +2525,13 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2525 qdev->req_q_size = 2525 qdev->req_q_size =
2526 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); 2526 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2527 2527
2528 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2529
2530 /* The barrier is required to ensure request and response queue
2531 * addr writes to the registers.
2532 */
2533 wmb();
2534
2528 qdev->req_q_virt_addr = 2535 qdev->req_q_virt_addr =
2529 pci_alloc_consistent(qdev->pdev, 2536 pci_alloc_consistent(qdev->pdev,
2530 (size_t) qdev->req_q_size, 2537 (size_t) qdev->req_q_size,
@@ -2536,8 +2543,6 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2536 return -ENOMEM; 2543 return -ENOMEM;
2537 } 2544 }
2538 2545
2539 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2540
2541 qdev->rsp_q_virt_addr = 2546 qdev->rsp_q_virt_addr =
2542 pci_alloc_consistent(qdev->pdev, 2547 pci_alloc_consistent(qdev->pdev,
2543 (size_t) qdev->rsp_q_size, 2548 (size_t) qdev->rsp_q_size,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 473ce134ca63..24ad17ec7fcd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1601,7 +1601,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1601 adapter->netdev = netdev; 1601 adapter->netdev = netdev;
1602 adapter->pdev = pdev; 1602 adapter->pdev = pdev;
1603 1603
1604 if (qlcnic_alloc_adapter_resources(adapter)) 1604 err = qlcnic_alloc_adapter_resources(adapter);
1605 if (err)
1605 goto err_out_free_netdev; 1606 goto err_out_free_netdev;
1606 1607
1607 adapter->dev_rst_time = jiffies; 1608 adapter->dev_rst_time = jiffies;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 995d0cfc4c06..1c818254b7be 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -563,7 +563,7 @@ rx_next:
563 if (cpr16(IntrStatus) & cp_rx_intr_mask) 563 if (cpr16(IntrStatus) & cp_rx_intr_mask)
564 goto rx_status_loop; 564 goto rx_status_loop;
565 565
566 napi_gro_flush(napi); 566 napi_gro_flush(napi, false);
567 spin_lock_irqsave(&cp->lock, flags); 567 spin_lock_irqsave(&cp->lock, flags);
568 __napi_complete(napi); 568 __napi_complete(napi);
569 cpw16_f(IntrMask, cp_intr_mask); 569 cpw16_f(IntrMask, cp_intr_mask);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index bad8f2eec9b4..c8bfea0524dd 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2438,6 +2438,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2438 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2438 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2439 if (!rtsu) { 2439 if (!rtsu) {
2440 dev_err(&pdev->dev, "Not found TSU resource\n"); 2440 dev_err(&pdev->dev, "Not found TSU resource\n");
2441 ret = -ENODEV;
2441 goto out_release; 2442 goto out_release;
2442 } 2443 }
2443 mdp->tsu_addr = ioremap(rtsu->start, 2444 mdp->tsu_addr = ioremap(rtsu->start,
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 5b3dd028ce85..0767043f44a4 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -640,8 +640,7 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
640 evt = list_entry(cursor, struct efx_ptp_event_rx, 640 evt = list_entry(cursor, struct efx_ptp_event_rx,
641 link); 641 link);
642 if (time_after(jiffies, evt->expiry)) { 642 if (time_after(jiffies, evt->expiry)) {
643 list_del(&evt->link); 643 list_move(&evt->link, &ptp->evt_free_list);
644 list_add(&evt->link, &ptp->evt_free_list);
645 netif_warn(efx, hw, efx->net_dev, 644 netif_warn(efx, hw, efx->net_dev,
646 "PTP rx event dropped\n"); 645 "PTP rx event dropped\n");
647 } 646 }
@@ -684,8 +683,7 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
684 683
685 match->state = PTP_PACKET_STATE_MATCHED; 684 match->state = PTP_PACKET_STATE_MATCHED;
686 rc = PTP_PACKET_STATE_MATCHED; 685 rc = PTP_PACKET_STATE_MATCHED;
687 list_del(&evt->link); 686 list_move(&evt->link, &ptp->evt_free_list);
688 list_add(&evt->link, &ptp->evt_free_list);
689 break; 687 break;
690 } 688 }
691 } 689 }
@@ -820,8 +818,7 @@ static int efx_ptp_stop(struct efx_nic *efx)
820 /* Drop any pending receive events */ 818 /* Drop any pending receive events */
821 spin_lock_bh(&efx->ptp_data->evt_lock); 819 spin_lock_bh(&efx->ptp_data->evt_lock);
822 list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { 820 list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
823 list_del(cursor); 821 list_move(cursor, &efx->ptp_data->evt_free_list);
824 list_add(cursor, &efx->ptp_data->evt_free_list);
825 } 822 }
826 spin_unlock_bh(&efx->ptp_data->evt_lock); 823 spin_unlock_bh(&efx->ptp_data->evt_lock);
827 824
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 203d9c6ec23a..fb9f6b38511f 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -478,8 +478,10 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
478 478
479 /* IO region. */ 479 /* IO region. */
480 ioaddr = pci_iomap(pci_dev, 0, 0); 480 ioaddr = pci_iomap(pci_dev, 0, 0);
481 if (!ioaddr) 481 if (!ioaddr) {
482 ret = -ENOMEM;
482 goto err_out_cleardev; 483 goto err_out_cleardev;
484 }
483 485
484 sis_priv = netdev_priv(net_dev); 486 sis_priv = netdev_priv(net_dev);
485 sis_priv->ioaddr = ioaddr; 487 sis_priv->ioaddr = ioaddr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index e872e1da3137..7d51a65ab099 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -50,7 +50,6 @@ struct stmmac_priv {
50 unsigned int dirty_rx; 50 unsigned int dirty_rx;
51 struct sk_buff **rx_skbuff; 51 struct sk_buff **rx_skbuff;
52 dma_addr_t *rx_skbuff_dma; 52 dma_addr_t *rx_skbuff_dma;
53 struct sk_buff_head rx_recycle;
54 53
55 struct net_device *dev; 54 struct net_device *dev;
56 dma_addr_t dma_rx_phy; 55 dma_addr_t dma_rx_phy;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 3be88331d17a..c6cdbc4eb05e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -747,18 +747,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
747 priv->hw->ring->clean_desc3(p); 747 priv->hw->ring->clean_desc3(p);
748 748
749 if (likely(skb != NULL)) { 749 if (likely(skb != NULL)) {
750 /* 750 dev_kfree_skb(skb);
751 * If there's room in the queue (limit it to size)
752 * we add this skb back into the pool,
753 * if it's the right size.
754 */
755 if ((skb_queue_len(&priv->rx_recycle) <
756 priv->dma_rx_size) &&
757 skb_recycle_check(skb, priv->dma_buf_sz))
758 __skb_queue_head(&priv->rx_recycle, skb);
759 else
760 dev_kfree_skb(skb);
761
762 priv->tx_skbuff[entry] = NULL; 751 priv->tx_skbuff[entry] = NULL;
763 } 752 }
764 753
@@ -1169,7 +1158,6 @@ static int stmmac_open(struct net_device *dev)
1169 priv->eee_enabled = stmmac_eee_init(priv); 1158 priv->eee_enabled = stmmac_eee_init(priv);
1170 1159
1171 napi_enable(&priv->napi); 1160 napi_enable(&priv->napi);
1172 skb_queue_head_init(&priv->rx_recycle);
1173 netif_start_queue(dev); 1161 netif_start_queue(dev);
1174 1162
1175 return 0; 1163 return 0;
@@ -1222,7 +1210,6 @@ static int stmmac_release(struct net_device *dev)
1222 kfree(priv->tm); 1210 kfree(priv->tm);
1223#endif 1211#endif
1224 napi_disable(&priv->napi); 1212 napi_disable(&priv->napi);
1225 skb_queue_purge(&priv->rx_recycle);
1226 1213
1227 /* Free the IRQ lines */ 1214 /* Free the IRQ lines */
1228 free_irq(dev->irq, dev); 1215 free_irq(dev->irq, dev);
@@ -1388,10 +1375,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1388 if (likely(priv->rx_skbuff[entry] == NULL)) { 1375 if (likely(priv->rx_skbuff[entry] == NULL)) {
1389 struct sk_buff *skb; 1376 struct sk_buff *skb;
1390 1377
1391 skb = __skb_dequeue(&priv->rx_recycle); 1378 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
1392 if (skb == NULL)
1393 skb = netdev_alloc_skb_ip_align(priv->dev,
1394 bfsize);
1395 1379
1396 if (unlikely(skb == NULL)) 1380 if (unlikely(skb == NULL))
1397 break; 1381 break;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 8419bf385e08..275b430aeb75 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9788,6 +9788,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
9788 9788
9789 if (!pci_is_pcie(pdev)) { 9789 if (!pci_is_pcie(pdev)) {
9790 dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n"); 9790 dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
9791 err = -ENODEV;
9791 goto err_out_free_res; 9792 goto err_out_free_res;
9792 } 9793 }
9793 9794
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 9ae12d0c9632..6c8695ec7cb9 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2963,7 +2963,8 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
2963 goto err_out_iounmap; 2963 goto err_out_iounmap;
2964 } 2964 }
2965 2965
2966 if (gem_get_device_address(gp)) 2966 err = gem_get_device_address(gp);
2967 if (err)
2967 goto err_out_free_consistent; 2968 goto err_out_free_consistent;
2968 2969
2969 dev->netdev_ops = &gem_netdev_ops; 2970 dev->netdev_ops = &gem_netdev_ops;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index b26cbda5efa9..2c41894d5472 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -5,7 +5,7 @@
5config NET_VENDOR_TI 5config NET_VENDOR_TI
6 bool "Texas Instruments (TI) devices" 6 bool "Texas Instruments (TI) devices"
7 default y 7 default y
8 depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3)) 8 depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX))
9 ---help--- 9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 10 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 11 and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 4e2a1628484d..4e9810013850 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -1334,11 +1334,11 @@ static int tso_count_edescs(struct sk_buff *skb)
1334{ 1334{
1335 struct skb_shared_info *sh = skb_shinfo(skb); 1335 struct skb_shared_info *sh = skb_shinfo(skb);
1336 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1336 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1337 unsigned int data_len = skb->data_len + skb->hdr_len - sh_len; 1337 unsigned int data_len = skb->len - sh_len;
1338 unsigned int p_len = sh->gso_size; 1338 unsigned int p_len = sh->gso_size;
1339 long f_id = -1; /* id of the current fragment */ 1339 long f_id = -1; /* id of the current fragment */
1340 long f_size = skb->hdr_len; /* size of the current fragment */ 1340 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1341 long f_used = sh_len; /* bytes used from the current fragment */ 1341 long f_used = 0; /* bytes used from the current fragment */
1342 long n; /* size of the current piece of payload */ 1342 long n; /* size of the current piece of payload */
1343 int num_edescs = 0; 1343 int num_edescs = 0;
1344 int segment; 1344 int segment;
@@ -1353,7 +1353,7 @@ static int tso_count_edescs(struct sk_buff *skb)
1353 /* Advance as needed. */ 1353 /* Advance as needed. */
1354 while (f_used >= f_size) { 1354 while (f_used >= f_size) {
1355 f_id++; 1355 f_id++;
1356 f_size = sh->frags[f_id].size; 1356 f_size = skb_frag_size(&sh->frags[f_id]);
1357 f_used = 0; 1357 f_used = 0;
1358 } 1358 }
1359 1359
@@ -1384,13 +1384,13 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1384 struct iphdr *ih; 1384 struct iphdr *ih;
1385 struct tcphdr *th; 1385 struct tcphdr *th;
1386 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1386 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1387 unsigned int data_len = skb->data_len + skb->hdr_len - sh_len; 1387 unsigned int data_len = skb->len - sh_len;
1388 unsigned char *data = skb->data; 1388 unsigned char *data = skb->data;
1389 unsigned int ih_off, th_off, p_len; 1389 unsigned int ih_off, th_off, p_len;
1390 unsigned int isum_seed, tsum_seed, id, seq; 1390 unsigned int isum_seed, tsum_seed, id, seq;
1391 long f_id = -1; /* id of the current fragment */ 1391 long f_id = -1; /* id of the current fragment */
1392 long f_size = skb->hdr_len; /* size of the current fragment */ 1392 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1393 long f_used = sh_len; /* bytes used from the current fragment */ 1393 long f_used = 0; /* bytes used from the current fragment */
1394 long n; /* size of the current piece of payload */ 1394 long n; /* size of the current piece of payload */
1395 int segment; 1395 int segment;
1396 1396
@@ -1405,7 +1405,7 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1405 isum_seed = ((0xFFFF - ih->check) + 1405 isum_seed = ((0xFFFF - ih->check) +
1406 (0xFFFF - ih->tot_len) + 1406 (0xFFFF - ih->tot_len) +
1407 (0xFFFF - ih->id)); 1407 (0xFFFF - ih->id));
1408 tsum_seed = th->check + (0xFFFF ^ htons(sh_len + data_len)); 1408 tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
1409 id = ntohs(ih->id); 1409 id = ntohs(ih->id);
1410 seq = ntohl(th->seq); 1410 seq = ntohl(th->seq);
1411 1411
@@ -1444,7 +1444,7 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1444 /* Advance as needed. */ 1444 /* Advance as needed. */
1445 while (f_used >= f_size) { 1445 while (f_used >= f_size) {
1446 f_id++; 1446 f_id++;
1447 f_size = sh->frags[f_id].size; 1447 f_size = skb_frag_size(&sh->frags[f_id]);
1448 f_used = 0; 1448 f_used = 0;
1449 } 1449 }
1450 1450
@@ -1478,14 +1478,14 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1478 struct tile_net_priv *priv = netdev_priv(dev); 1478 struct tile_net_priv *priv = netdev_priv(dev);
1479 struct skb_shared_info *sh = skb_shinfo(skb); 1479 struct skb_shared_info *sh = skb_shinfo(skb);
1480 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1480 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1481 unsigned int data_len = skb->data_len + skb->hdr_len - sh_len; 1481 unsigned int data_len = skb->len - sh_len;
1482 unsigned int p_len = sh->gso_size; 1482 unsigned int p_len = sh->gso_size;
1483 gxio_mpipe_edesc_t edesc_head = { { 0 } }; 1483 gxio_mpipe_edesc_t edesc_head = { { 0 } };
1484 gxio_mpipe_edesc_t edesc_body = { { 0 } }; 1484 gxio_mpipe_edesc_t edesc_body = { { 0 } };
1485 long f_id = -1; /* id of the current fragment */ 1485 long f_id = -1; /* id of the current fragment */
1486 long f_size = skb->hdr_len; /* size of the current fragment */ 1486 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1487 long f_used = sh_len; /* bytes used from the current fragment */ 1487 long f_used = 0; /* bytes used from the current fragment */
1488 void *f_data = skb->data; 1488 void *f_data = skb->data + sh_len;
1489 long n; /* size of the current piece of payload */ 1489 long n; /* size of the current piece of payload */
1490 unsigned long tx_packets = 0, tx_bytes = 0; 1490 unsigned long tx_packets = 0, tx_bytes = 0;
1491 unsigned int csum_start; 1491 unsigned int csum_start;
@@ -1516,15 +1516,18 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1516 1516
1517 /* Egress the payload. */ 1517 /* Egress the payload. */
1518 while (p_used < p_len) { 1518 while (p_used < p_len) {
1519 void *va;
1519 1520
1520 /* Advance as needed. */ 1521 /* Advance as needed. */
1521 while (f_used >= f_size) { 1522 while (f_used >= f_size) {
1522 f_id++; 1523 f_id++;
1523 f_size = sh->frags[f_id].size; 1524 f_size = skb_frag_size(&sh->frags[f_id]);
1524 f_used = 0;
1525 f_data = tile_net_frag_buf(&sh->frags[f_id]); 1525 f_data = tile_net_frag_buf(&sh->frags[f_id]);
1526 f_used = 0;
1526 } 1527 }
1527 1528
1529 va = f_data + f_used;
1530
1528 /* Use bytes from the current fragment. */ 1531 /* Use bytes from the current fragment. */
1529 n = p_len - p_used; 1532 n = p_len - p_used;
1530 if (n > f_size - f_used) 1533 if (n > f_size - f_used)
@@ -1533,7 +1536,7 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1533 p_used += n; 1536 p_used += n;
1534 1537
1535 /* Egress a piece of the payload. */ 1538 /* Egress a piece of the payload. */
1536 edesc_body.va = va_to_tile_io_addr(f_data) + f_used; 1539 edesc_body.va = va_to_tile_io_addr(va);
1537 edesc_body.xfer_size = n; 1540 edesc_body.xfer_size = n;
1538 edesc_body.bound = !(p_used < p_len); 1541 edesc_body.bound = !(p_used < p_len);
1539 gxio_mpipe_equeue_put_at(equeue, edesc_body, slot); 1542 gxio_mpipe_equeue_put_at(equeue, edesc_body, slot);
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 30087ca23a0f..6e4d4b62c9a8 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -459,8 +459,10 @@ static int irtty_open(struct tty_struct *tty)
459 459
460 /* allocate private device info block */ 460 /* allocate private device info block */
461 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 461 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
462 if (!priv) 462 if (!priv) {
463 ret = -ENOMEM;
463 goto out_put; 464 goto out_put;
465 }
464 466
465 priv->magic = IRTTY_MAGIC; 467 priv->magic = IRTTY_MAGIC;
466 priv->tty = tty; 468 priv->tty = tty;
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 1a00b5990cb8..f07c340990da 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -920,8 +920,10 @@ static int mcs_probe(struct usb_interface *intf,
920 920
921 ndev->netdev_ops = &mcs_netdev_ops; 921 ndev->netdev_ops = &mcs_netdev_ops;
922 922
923 if (!intf->cur_altsetting) 923 if (!intf->cur_altsetting) {
924 ret = -ENOMEM;
924 goto error2; 925 goto error2;
926 }
925 927
926 ret = mcs_find_endpoints(mcs, intf->cur_altsetting->endpoint, 928 ret = mcs_find_endpoints(mcs, intf->cur_altsetting->endpoint,
927 intf->cur_altsetting->desc.bNumEndpoints); 929 intf->cur_altsetting->desc.bNumEndpoints);
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 002a442bf73f..858de05bdb7d 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -846,8 +846,10 @@ static int pxa_irda_probe(struct platform_device *pdev)
846 goto err_mem_2; 846 goto err_mem_2;
847 847
848 dev = alloc_irdadev(sizeof(struct pxa_irda)); 848 dev = alloc_irdadev(sizeof(struct pxa_irda));
849 if (!dev) 849 if (!dev) {
850 err = -ENOMEM;
850 goto err_mem_3; 851 goto err_mem_3;
852 }
851 853
852 SET_NETDEV_DEV(dev, &pdev->dev); 854 SET_NETDEV_DEV(dev, &pdev->dev);
853 si = netdev_priv(dev); 855 si = netdev_priv(dev);
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index e25067552b20..42fde9ed23e1 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -940,8 +940,10 @@ static int sa1100_irda_probe(struct platform_device *pdev)
940 goto err_mem_3; 940 goto err_mem_3;
941 941
942 dev = alloc_irdadev(sizeof(struct sa1100_irda)); 942 dev = alloc_irdadev(sizeof(struct sa1100_irda));
943 if (!dev) 943 if (!dev) {
944 err = -ENOMEM;
944 goto err_mem_4; 945 goto err_mem_4;
946 }
945 947
946 SET_NETDEV_DEV(dev, &pdev->dev); 948 SET_NETDEV_DEV(dev, &pdev->dev);
947 949
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index eb315b8d07a3..4b746d9bd8e7 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -808,8 +808,8 @@ static int __devinit sh_irda_probe(struct platform_device *pdev)
808 goto err_mem_4; 808 goto err_mem_4;
809 809
810 platform_set_drvdata(pdev, ndev); 810 platform_set_drvdata(pdev, ndev);
811 811 err = request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self);
812 if (request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self)) { 812 if (err) {
813 dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n"); 813 dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
814 goto err_mem_4; 814 goto err_mem_4;
815 } 815 }
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 795109425568..624ac1939e85 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -741,6 +741,7 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
741 self->clk = clk_get(&pdev->dev, clk_name); 741 self->clk = clk_get(&pdev->dev, clk_name);
742 if (IS_ERR(self->clk)) { 742 if (IS_ERR(self->clk)) {
743 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); 743 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
744 err = -ENODEV;
744 goto err_mem_3; 745 goto err_mem_3;
745 } 746 }
746 747
@@ -760,8 +761,8 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
760 goto err_mem_4; 761 goto err_mem_4;
761 762
762 platform_set_drvdata(pdev, ndev); 763 platform_set_drvdata(pdev, ndev);
763 764 err = request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self);
764 if (request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self)) { 765 if (err) {
765 dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n"); 766 dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
766 goto err_mem_4; 767 goto err_mem_4;
767 } 768 }
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 983bbf4d5ef6..961f0b293913 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -15,6 +15,11 @@ if PHYLIB
15 15
16comment "MII PHY device drivers" 16comment "MII PHY device drivers"
17 17
18config AT803X_PHY
19 tristate "Drivers for Atheros AT803X PHYs"
20 ---help---
21 Currently supports the AT8030 and AT8035 model
22
18config AMD_PHY 23config AMD_PHY
19 tristate "Drivers for the AMD PHYs" 24 tristate "Drivers for the AMD PHYs"
20 ---help--- 25 ---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 426674debae4..9645e389a58d 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_STE10XP) += ste10Xp.o
25obj-$(CONFIG_MICREL_PHY) += micrel.o 25obj-$(CONFIG_MICREL_PHY) += micrel.o
26obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o 26obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
27obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o 27obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
28obj-$(CONFIG_AT803X_PHY) += at803x.o
28obj-$(CONFIG_AMD_PHY) += amd.o 29obj-$(CONFIG_AMD_PHY) += amd.o
29obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o 30obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
30obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o 31obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
new file mode 100644
index 000000000000..45cbc10de01c
--- /dev/null
+++ b/drivers/net/phy/at803x.c
@@ -0,0 +1,176 @@
1/*
2 * drivers/net/phy/at803x.c
3 *
4 * Driver for Atheros 803x PHY
5 *
6 * Author: Matus Ujhelyi <ujhelyi.m@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/phy.h>
15#include <linux/module.h>
16#include <linux/string.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19
20#define AT803X_INTR_ENABLE 0x12
21#define AT803X_INTR_STATUS 0x13
22#define AT803X_WOL_ENABLE 0x01
23#define AT803X_DEVICE_ADDR 0x03
24#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C
25#define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B
26#define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A
27#define AT803X_MMD_ACCESS_CONTROL 0x0D
28#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E
29#define AT803X_FUNC_DATA 0x4003
30
31MODULE_DESCRIPTION("Atheros 803x PHY driver");
32MODULE_AUTHOR("Matus Ujhelyi");
33MODULE_LICENSE("GPL");
34
35static void at803x_set_wol_mac_addr(struct phy_device *phydev)
36{
37 struct net_device *ndev = phydev->attached_dev;
38 const u8 *mac;
39 unsigned int i, offsets[] = {
40 AT803X_LOC_MAC_ADDR_32_47_OFFSET,
41 AT803X_LOC_MAC_ADDR_16_31_OFFSET,
42 AT803X_LOC_MAC_ADDR_0_15_OFFSET,
43 };
44
45 if (!ndev)
46 return;
47
48 mac = (const u8 *) ndev->dev_addr;
49
50 if (!is_valid_ether_addr(mac))
51 return;
52
53 for (i = 0; i < 3; i++) {
54 phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
55 AT803X_DEVICE_ADDR);
56 phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA,
57 offsets[i]);
58 phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
59 AT803X_FUNC_DATA);
60 phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA,
61 mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
62 }
63}
64
65static int at803x_config_init(struct phy_device *phydev)
66{
67 int val;
68 u32 features;
69 int status;
70
71 features = SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_AUI |
72 SUPPORTED_FIBRE | SUPPORTED_BNC;
73
74 val = phy_read(phydev, MII_BMSR);
75 if (val < 0)
76 return val;
77
78 if (val & BMSR_ANEGCAPABLE)
79 features |= SUPPORTED_Autoneg;
80 if (val & BMSR_100FULL)
81 features |= SUPPORTED_100baseT_Full;
82 if (val & BMSR_100HALF)
83 features |= SUPPORTED_100baseT_Half;
84 if (val & BMSR_10FULL)
85 features |= SUPPORTED_10baseT_Full;
86 if (val & BMSR_10HALF)
87 features |= SUPPORTED_10baseT_Half;
88
89 if (val & BMSR_ESTATEN) {
90 val = phy_read(phydev, MII_ESTATUS);
91 if (val < 0)
92 return val;
93
94 if (val & ESTATUS_1000_TFULL)
95 features |= SUPPORTED_1000baseT_Full;
96 if (val & ESTATUS_1000_THALF)
97 features |= SUPPORTED_1000baseT_Half;
98 }
99
100 phydev->supported = features;
101 phydev->advertising = features;
102
103 /* enable WOL */
104 at803x_set_wol_mac_addr(phydev);
105 status = phy_write(phydev, AT803X_INTR_ENABLE, AT803X_WOL_ENABLE);
106 status = phy_read(phydev, AT803X_INTR_STATUS);
107
108 return 0;
109}
110
111/* ATHEROS 8035 */
112static struct phy_driver at8035_driver = {
113 .phy_id = 0x004dd072,
114 .name = "Atheros 8035 ethernet",
115 .phy_id_mask = 0xffffffef,
116 .config_init = at803x_config_init,
117 .features = PHY_GBIT_FEATURES,
118 .flags = PHY_HAS_INTERRUPT,
119 .config_aneg = &genphy_config_aneg,
120 .read_status = &genphy_read_status,
121 .driver = {
122 .owner = THIS_MODULE,
123 },
124};
125
126/* ATHEROS 8030 */
127static struct phy_driver at8030_driver = {
128 .phy_id = 0x004dd076,
129 .name = "Atheros 8030 ethernet",
130 .phy_id_mask = 0xffffffef,
131 .config_init = at803x_config_init,
132 .features = PHY_GBIT_FEATURES,
133 .flags = PHY_HAS_INTERRUPT,
134 .config_aneg = &genphy_config_aneg,
135 .read_status = &genphy_read_status,
136 .driver = {
137 .owner = THIS_MODULE,
138 },
139};
140
141static int __init atheros_init(void)
142{
143 int ret;
144
145 ret = phy_driver_register(&at8035_driver);
146 if (ret)
147 goto err1;
148
149 ret = phy_driver_register(&at8030_driver);
150 if (ret)
151 goto err2;
152
153 return 0;
154
155err2:
156 phy_driver_unregister(&at8035_driver);
157err1:
158 return ret;
159}
160
161static void __exit atheros_exit(void)
162{
163 phy_driver_unregister(&at8035_driver);
164 phy_driver_unregister(&at8030_driver);
165}
166
167module_init(atheros_init);
168module_exit(atheros_exit);
169
170static struct mdio_device_id __maybe_unused atheros_tbl[] = {
171 { 0x004dd076, 0xffffffef },
172 { 0x004dd072, 0xffffffef },
173 { }
174};
175
176MODULE_DEVICE_TABLE(mdio, atheros_tbl);
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
index daec9b05d168..6428fcbbdd4b 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/phy/mdio-bitbang.c
@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
234 struct mdiobb_ctrl *ctrl = bus->priv; 234 struct mdiobb_ctrl *ctrl = bus->priv;
235 235
236 module_put(ctrl->ops->owner); 236 module_put(ctrl->ops->owner);
237 mdiobus_unregister(bus);
237 mdiobus_free(bus); 238 mdiobus_free(bus);
238} 239}
239EXPORT_SYMBOL(free_mdio_bitbang); 240EXPORT_SYMBOL(free_mdio_bitbang);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 170eb411ab5d..c1ef3000ea60 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -26,6 +26,7 @@
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/device.h> 27#include <linux/device.h>
28#include <linux/of_device.h> 28#include <linux/of_device.h>
29#include <linux/of_mdio.h>
29#include <linux/netdevice.h> 30#include <linux/netdevice.h>
30#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
31#include <linux/skbuff.h> 32#include <linux/skbuff.h>
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 434d5af8e6fb..c81e278629ff 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -244,8 +244,12 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
244 * - suspend: peripheral ready to suspend 244 * - suspend: peripheral ready to suspend
245 * - response: suggest N millisec polling 245 * - response: suggest N millisec polling
246 * - response complete: suggest N sec polling 246 * - response complete: suggest N sec polling
247 *
248 * Suspend is reported and maybe heeded.
247 */ 249 */
248 case 2: /* Suspend hint */ 250 case 2: /* Suspend hint */
251 usbnet_device_suggests_idle(dev);
252 continue;
249 case 3: /* Response hint */ 253 case 3: /* Response hint */
250 case 4: /* Response complete hint */ 254 case 4: /* Response complete hint */
251 continue; 255 continue;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index a03de7197049..d0129827602b 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -592,6 +592,32 @@ static const struct usb_device_id products [] = {
592 .driver_info = 0, 592 .driver_info = 0,
593}, 593},
594 594
595/* Novatel USB551L and MC551 - handled by qmi_wwan */
596{
597 .match_flags = USB_DEVICE_ID_MATCH_VENDOR
598 | USB_DEVICE_ID_MATCH_PRODUCT
599 | USB_DEVICE_ID_MATCH_INT_INFO,
600 .idVendor = NOVATEL_VENDOR_ID,
601 .idProduct = 0xB001,
602 .bInterfaceClass = USB_CLASS_COMM,
603 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
604 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
605 .driver_info = 0,
606},
607
608/* Novatel E362 - handled by qmi_wwan */
609{
610 .match_flags = USB_DEVICE_ID_MATCH_VENDOR
611 | USB_DEVICE_ID_MATCH_PRODUCT
612 | USB_DEVICE_ID_MATCH_INT_INFO,
613 .idVendor = NOVATEL_VENDOR_ID,
614 .idProduct = 0x9010,
615 .bInterfaceClass = USB_CLASS_COMM,
616 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
617 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
618 .driver_info = 0,
619},
620
595/* 621/*
596 * WHITELIST!!! 622 * WHITELIST!!!
597 * 623 *
@@ -604,21 +630,6 @@ static const struct usb_device_id products [] = {
604 * because of bugs/quirks in a given product (like Zaurus, above). 630 * because of bugs/quirks in a given product (like Zaurus, above).
605 */ 631 */
606{ 632{
607 /* Novatel USB551L */
608 /* This match must come *before* the generic CDC-ETHER match so that
609 * we get FLAG_WWAN set on the device, since it's descriptors are
610 * generic CDC-ETHER.
611 */
612 .match_flags = USB_DEVICE_ID_MATCH_VENDOR
613 | USB_DEVICE_ID_MATCH_PRODUCT
614 | USB_DEVICE_ID_MATCH_INT_INFO,
615 .idVendor = NOVATEL_VENDOR_ID,
616 .idProduct = 0xB001,
617 .bInterfaceClass = USB_CLASS_COMM,
618 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
619 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
620 .driver_info = (unsigned long)&wwan_info,
621}, {
622 /* ZTE (Vodafone) K3805-Z */ 633 /* ZTE (Vodafone) K3805-Z */
623 .match_flags = USB_DEVICE_ID_MATCH_VENDOR 634 .match_flags = USB_DEVICE_ID_MATCH_VENDOR
624 | USB_DEVICE_ID_MATCH_PRODUCT 635 | USB_DEVICE_ID_MATCH_PRODUCT
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index a28a983d465e..534d8becbbdc 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -62,6 +62,7 @@
62#define USB_PRODUCT_IPAD 0x129a 62#define USB_PRODUCT_IPAD 0x129a
63#define USB_PRODUCT_IPHONE_4_VZW 0x129c 63#define USB_PRODUCT_IPHONE_4_VZW 0x129c
64#define USB_PRODUCT_IPHONE_4S 0x12a0 64#define USB_PRODUCT_IPHONE_4S 0x12a0
65#define USB_PRODUCT_IPHONE_5 0x12a8
65 66
66#define IPHETH_USBINTF_CLASS 255 67#define IPHETH_USBINTF_CLASS 255
67#define IPHETH_USBINTF_SUBCLASS 253 68#define IPHETH_USBINTF_SUBCLASS 253
@@ -113,6 +114,10 @@ static struct usb_device_id ipheth_table[] = {
113 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S, 114 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S,
114 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 115 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
115 IPHETH_USBINTF_PROTO) }, 116 IPHETH_USBINTF_PROTO) },
117 { USB_DEVICE_AND_INTERFACE_INFO(
118 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_5,
119 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
120 IPHETH_USBINTF_PROTO) },
116 { } 121 { }
117}; 122};
118MODULE_DEVICE_TABLE(usb, ipheth_table); 123MODULE_DEVICE_TABLE(usb, ipheth_table);
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index c75e11e1b385..afb117c16d2d 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -424,7 +424,7 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth,
424 424
425 netdev_dbg(kaweth->net, 425 netdev_dbg(kaweth->net,
426 "Downloading firmware at %p to kaweth device at %p\n", 426 "Downloading firmware at %p to kaweth device at %p\n",
427 fw->data, kaweth); 427 kaweth->firmware_buf, kaweth);
428 netdev_dbg(kaweth->net, "Firmware length: %d\n", data_len); 428 netdev_dbg(kaweth->net, "Firmware length: %d\n", data_len);
429 429
430 return kaweth_control(kaweth, 430 return kaweth_control(kaweth,
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 03c2d8d653df..cc7e72010ac3 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -117,6 +117,7 @@ enum {
117struct mcs7830_data { 117struct mcs7830_data {
118 u8 multi_filter[8]; 118 u8 multi_filter[8];
119 u8 config; 119 u8 config;
120 u8 link_counter;
120}; 121};
121 122
122static const char driver_name[] = "MOSCHIP usb-ethernet driver"; 123static const char driver_name[] = "MOSCHIP usb-ethernet driver";
@@ -632,20 +633,31 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
632static void mcs7830_status(struct usbnet *dev, struct urb *urb) 633static void mcs7830_status(struct usbnet *dev, struct urb *urb)
633{ 634{
634 u8 *buf = urb->transfer_buffer; 635 u8 *buf = urb->transfer_buffer;
635 bool link; 636 bool link, link_changed;
637 struct mcs7830_data *data = mcs7830_get_data(dev);
636 638
637 if (urb->actual_length < 16) 639 if (urb->actual_length < 16)
638 return; 640 return;
639 641
640 link = !(buf[1] & 0x20); 642 link = !(buf[1] & 0x20);
641 if (netif_carrier_ok(dev->net) != link) { 643 link_changed = netif_carrier_ok(dev->net) != link;
642 if (link) { 644 if (link_changed) {
643 netif_carrier_on(dev->net); 645 data->link_counter++;
644 usbnet_defer_kevent(dev, EVENT_LINK_RESET); 646 /*
645 } else 647 track link state 20 times to guard against erroneous
646 netif_carrier_off(dev->net); 648 link state changes reported sometimes by the chip
647 netdev_dbg(dev->net, "Link Status is: %d\n", link); 649 */
648 } 650 if (data->link_counter > 20) {
651 data->link_counter = 0;
652 if (link) {
653 netif_carrier_on(dev->net);
654 usbnet_defer_kevent(dev, EVENT_LINK_RESET);
655 } else
656 netif_carrier_off(dev->net);
657 netdev_dbg(dev->net, "Link Status is: %d\n", link);
658 }
659 } else
660 data->link_counter = 0;
649} 661}
650 662
651static const struct driver_info moschip_info = { 663static const struct driver_info moschip_info = {
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 6883c371c59f..3b566fa0f8e6 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -369,18 +369,73 @@ static const struct usb_device_id products[] = {
369 USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff), 369 USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff),
370 .driver_info = (unsigned long)&qmi_wwan_info, 370 .driver_info = (unsigned long)&qmi_wwan_info,
371 }, 371 },
372 { /* Novatel USB551L and MC551 */
373 USB_DEVICE_AND_INTERFACE_INFO(0x1410, 0xb001,
374 USB_CLASS_COMM,
375 USB_CDC_SUBCLASS_ETHERNET,
376 USB_CDC_PROTO_NONE),
377 .driver_info = (unsigned long)&qmi_wwan_info,
378 },
379 { /* Novatel E362 */
380 USB_DEVICE_AND_INTERFACE_INFO(0x1410, 0x9010,
381 USB_CLASS_COMM,
382 USB_CDC_SUBCLASS_ETHERNET,
383 USB_CDC_PROTO_NONE),
384 .driver_info = (unsigned long)&qmi_wwan_info,
385 },
372 386
373 /* 3. Combined interface devices matching on interface number */ 387 /* 3. Combined interface devices matching on interface number */
388 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
389 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
390 {QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
391 {QMI_FIXED_INTF(0x19d2, 0x0021, 4)},
392 {QMI_FIXED_INTF(0x19d2, 0x0025, 1)},
393 {QMI_FIXED_INTF(0x19d2, 0x0031, 4)},
394 {QMI_FIXED_INTF(0x19d2, 0x0042, 4)},
395 {QMI_FIXED_INTF(0x19d2, 0x0049, 5)},
396 {QMI_FIXED_INTF(0x19d2, 0x0052, 4)},
374 {QMI_FIXED_INTF(0x19d2, 0x0055, 1)}, /* ZTE (Vodafone) K3520-Z */ 397 {QMI_FIXED_INTF(0x19d2, 0x0055, 1)}, /* ZTE (Vodafone) K3520-Z */
398 {QMI_FIXED_INTF(0x19d2, 0x0058, 4)},
375 {QMI_FIXED_INTF(0x19d2, 0x0063, 4)}, /* ZTE (Vodafone) K3565-Z */ 399 {QMI_FIXED_INTF(0x19d2, 0x0063, 4)}, /* ZTE (Vodafone) K3565-Z */
376 {QMI_FIXED_INTF(0x19d2, 0x0104, 4)}, /* ZTE (Vodafone) K4505-Z */ 400 {QMI_FIXED_INTF(0x19d2, 0x0104, 4)}, /* ZTE (Vodafone) K4505-Z */
401 {QMI_FIXED_INTF(0x19d2, 0x0113, 5)},
402 {QMI_FIXED_INTF(0x19d2, 0x0118, 5)},
403 {QMI_FIXED_INTF(0x19d2, 0x0121, 5)},
404 {QMI_FIXED_INTF(0x19d2, 0x0123, 4)},
405 {QMI_FIXED_INTF(0x19d2, 0x0124, 5)},
406 {QMI_FIXED_INTF(0x19d2, 0x0125, 6)},
407 {QMI_FIXED_INTF(0x19d2, 0x0126, 5)},
408 {QMI_FIXED_INTF(0x19d2, 0x0130, 1)},
409 {QMI_FIXED_INTF(0x19d2, 0x0133, 3)},
410 {QMI_FIXED_INTF(0x19d2, 0x0141, 5)},
377 {QMI_FIXED_INTF(0x19d2, 0x0157, 5)}, /* ZTE MF683 */ 411 {QMI_FIXED_INTF(0x19d2, 0x0157, 5)}, /* ZTE MF683 */
412 {QMI_FIXED_INTF(0x19d2, 0x0158, 3)},
378 {QMI_FIXED_INTF(0x19d2, 0x0167, 4)}, /* ZTE MF820D */ 413 {QMI_FIXED_INTF(0x19d2, 0x0167, 4)}, /* ZTE MF820D */
414 {QMI_FIXED_INTF(0x19d2, 0x0168, 4)},
415 {QMI_FIXED_INTF(0x19d2, 0x0176, 3)},
416 {QMI_FIXED_INTF(0x19d2, 0x0178, 3)},
417 {QMI_FIXED_INTF(0x19d2, 0x0191, 4)}, /* ZTE EuFi890 */
418 {QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */
419 {QMI_FIXED_INTF(0x19d2, 0x0200, 1)},
420 {QMI_FIXED_INTF(0x19d2, 0x0257, 3)}, /* ZTE MF821 */
379 {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */ 421 {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
380 {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */ 422 {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
381 {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */ 423 {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
424 {QMI_FIXED_INTF(0x19d2, 0x1012, 4)},
382 {QMI_FIXED_INTF(0x19d2, 0x1018, 3)}, /* ZTE (Vodafone) K5006-Z */ 425 {QMI_FIXED_INTF(0x19d2, 0x1018, 3)}, /* ZTE (Vodafone) K5006-Z */
426 {QMI_FIXED_INTF(0x19d2, 0x1021, 2)},
427 {QMI_FIXED_INTF(0x19d2, 0x1245, 4)},
428 {QMI_FIXED_INTF(0x19d2, 0x1247, 4)},
429 {QMI_FIXED_INTF(0x19d2, 0x1252, 4)},
430 {QMI_FIXED_INTF(0x19d2, 0x1254, 4)},
431 {QMI_FIXED_INTF(0x19d2, 0x1255, 3)},
432 {QMI_FIXED_INTF(0x19d2, 0x1255, 4)},
433 {QMI_FIXED_INTF(0x19d2, 0x1256, 4)},
434 {QMI_FIXED_INTF(0x19d2, 0x1401, 2)},
383 {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */ 435 {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */
436 {QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
437 {QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
438 {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
384 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ 439 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
385 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ 440 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
386 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 441 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index fc9f578a1e25..cb04f900cc46 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1158,6 +1158,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1158 usb_anchor_urb(urb, &dev->deferred); 1158 usb_anchor_urb(urb, &dev->deferred);
1159 /* no use to process more packets */ 1159 /* no use to process more packets */
1160 netif_stop_queue(net); 1160 netif_stop_queue(net);
1161 usb_put_urb(urb);
1161 spin_unlock_irqrestore(&dev->txq.lock, flags); 1162 spin_unlock_irqrestore(&dev->txq.lock, flags);
1162 netdev_dbg(dev->net, "Delaying transmission for resumption\n"); 1163 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
1163 goto deferred; 1164 goto deferred;
@@ -1310,6 +1311,8 @@ void usbnet_disconnect (struct usb_interface *intf)
1310 1311
1311 cancel_work_sync(&dev->kevent); 1312 cancel_work_sync(&dev->kevent);
1312 1313
1314 usb_scuttle_anchored_urbs(&dev->deferred);
1315
1313 if (dev->driver_info->unbind) 1316 if (dev->driver_info->unbind)
1314 dev->driver_info->unbind (dev, intf); 1317 dev->driver_info->unbind (dev, intf);
1315 1318
@@ -1588,10 +1591,27 @@ int usbnet_resume (struct usb_interface *intf)
1588 tasklet_schedule (&dev->bh); 1591 tasklet_schedule (&dev->bh);
1589 } 1592 }
1590 } 1593 }
1594
1595 if (test_and_clear_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags))
1596 usb_autopm_get_interface_no_resume(intf);
1597
1591 return 0; 1598 return 0;
1592} 1599}
1593EXPORT_SYMBOL_GPL(usbnet_resume); 1600EXPORT_SYMBOL_GPL(usbnet_resume);
1594 1601
1602/*
1603 * Either a subdriver implements manage_power, then it is assumed to always
1604 * be ready to be suspended or it reports the readiness to be suspended
1605 * explicitly
1606 */
1607void usbnet_device_suggests_idle(struct usbnet *dev)
1608{
1609 if (!test_and_set_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) {
1610 dev->intf->needs_remote_wakeup = 1;
1611 usb_autopm_put_interface_async(dev->intf);
1612 }
1613}
1614EXPORT_SYMBOL(usbnet_device_suggests_idle);
1595 1615
1596/*-------------------------------------------------------------------------*/ 1616/*-------------------------------------------------------------------------*/
1597 1617
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index ce9d4f2c9776..0ae1bcc6da73 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -744,28 +744,43 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
744 744
745 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 745 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
746 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 746 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
747 u32 buf_size;
747 748
748 tbi = tq->buf_info + tq->tx_ring.next2fill; 749 buf_offset = 0;
749 tbi->map_type = VMXNET3_MAP_PAGE; 750 len = skb_frag_size(frag);
750 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, 751 while (len) {
751 0, skb_frag_size(frag), 752 tbi = tq->buf_info + tq->tx_ring.next2fill;
752 DMA_TO_DEVICE); 753 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
754 buf_size = len;
755 dw2 |= len;
756 } else {
757 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
758 /* spec says that for TxDesc.len, 0 == 2^14 */
759 }
760 tbi->map_type = VMXNET3_MAP_PAGE;
761 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
762 buf_offset, buf_size,
763 DMA_TO_DEVICE);
753 764
754 tbi->len = skb_frag_size(frag); 765 tbi->len = buf_size;
755 766
756 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 767 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
757 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 768 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
758 769
759 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); 770 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
760 gdesc->dword[2] = cpu_to_le32(dw2 | skb_frag_size(frag)); 771 gdesc->dword[2] = cpu_to_le32(dw2);
761 gdesc->dword[3] = 0; 772 gdesc->dword[3] = 0;
762 773
763 dev_dbg(&adapter->netdev->dev, 774 dev_dbg(&adapter->netdev->dev,
764 "txd[%u]: 0x%llu %u %u\n", 775 "txd[%u]: 0x%llu %u %u\n",
765 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), 776 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
766 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); 777 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
767 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 778 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
768 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 779 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
780
781 len -= buf_size;
782 buf_offset += buf_size;
783 }
769 } 784 }
770 785
771 ctx->eop_txd = gdesc; 786 ctx->eop_txd = gdesc;
@@ -886,6 +901,18 @@ vmxnet3_prepare_tso(struct sk_buff *skb,
886 } 901 }
887} 902}
888 903
904static int txd_estimate(const struct sk_buff *skb)
905{
906 int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
907 int i;
908
909 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
910 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
911
912 count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
913 }
914 return count;
915}
889 916
890/* 917/*
891 * Transmits a pkt thru a given tq 918 * Transmits a pkt thru a given tq
@@ -914,9 +941,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
914 union Vmxnet3_GenericDesc tempTxDesc; 941 union Vmxnet3_GenericDesc tempTxDesc;
915#endif 942#endif
916 943
917 /* conservatively estimate # of descriptors to use */ 944 count = txd_estimate(skb);
918 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
919 skb_shinfo(skb)->nr_frags + 1;
920 945
921 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP)); 946 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
922 947
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 51de9edb55f5..7b4adde93c01 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -28,7 +28,6 @@
28#include <linux/igmp.h> 28#include <linux/igmp.h>
29#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
30#include <linux/if_ether.h> 30#include <linux/if_ether.h>
31#include <linux/version.h>
32#include <linux/hash.h> 31#include <linux/hash.h>
33#include <net/ip.h> 32#include <net/ip.h>
34#include <net/icmp.h> 33#include <net/icmp.h>
@@ -107,6 +106,8 @@ struct vxlan_dev {
107 __be32 gaddr; /* multicast group */ 106 __be32 gaddr; /* multicast group */
108 __be32 saddr; /* source address */ 107 __be32 saddr; /* source address */
109 unsigned int link; /* link to multicast over */ 108 unsigned int link; /* link to multicast over */
109 __u16 port_min; /* source port range */
110 __u16 port_max;
110 __u8 tos; /* TOS override */ 111 __u8 tos; /* TOS override */
111 __u8 ttl; 112 __u8 ttl;
112 bool learn; 113 bool learn;
@@ -229,9 +230,9 @@ static u32 eth_hash(const unsigned char *addr)
229 230
230 /* only want 6 bytes */ 231 /* only want 6 bytes */
231#ifdef __BIG_ENDIAN 232#ifdef __BIG_ENDIAN
232 value <<= 16;
233#else
234 value >>= 16; 233 value >>= 16;
234#else
235 value <<= 16;
235#endif 236#endif
236 return hash_64(value, FDB_HASH_BITS); 237 return hash_64(value, FDB_HASH_BITS);
237} 238}
@@ -536,7 +537,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
536 } 537 }
537 538
538 __skb_pull(skb, sizeof(struct vxlanhdr)); 539 __skb_pull(skb, sizeof(struct vxlanhdr));
539 skb_postpull_rcsum(skb, eth_hdr(skb), sizeof(struct vxlanhdr));
540 540
541 /* Is this VNI defined? */ 541 /* Is this VNI defined? */
542 vni = ntohl(vxh->vx_vni) >> 8; 542 vni = ntohl(vxh->vx_vni) >> 8;
@@ -555,7 +555,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
555 /* Re-examine inner Ethernet packet */ 555 /* Re-examine inner Ethernet packet */
556 oip = ip_hdr(skb); 556 oip = ip_hdr(skb);
557 skb->protocol = eth_type_trans(skb, vxlan->dev); 557 skb->protocol = eth_type_trans(skb, vxlan->dev);
558 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
559 558
560 /* Ignore packet loops (and multicast echo) */ 559 /* Ignore packet loops (and multicast echo) */
561 if (compare_ether_addr(eth_hdr(skb)->h_source, 560 if (compare_ether_addr(eth_hdr(skb)->h_source,
@@ -567,6 +566,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
567 566
568 __skb_tunnel_rx(skb, vxlan->dev); 567 __skb_tunnel_rx(skb, vxlan->dev);
569 skb_reset_network_header(skb); 568 skb_reset_network_header(skb);
569 skb->ip_summed = CHECKSUM_NONE;
570 570
571 err = IP_ECN_decapsulate(oip, skb); 571 err = IP_ECN_decapsulate(oip, skb);
572 if (unlikely(err)) { 572 if (unlikely(err)) {
@@ -622,46 +622,89 @@ static inline u8 vxlan_ecn_encap(u8 tos,
622 return INET_ECN_encapsulate(tos, inner); 622 return INET_ECN_encapsulate(tos, inner);
623} 623}
624 624
625static __be32 vxlan_find_dst(struct vxlan_dev *vxlan, struct sk_buff *skb)
626{
627 const struct ethhdr *eth = (struct ethhdr *) skb->data;
628 const struct vxlan_fdb *f;
629
630 if (is_multicast_ether_addr(eth->h_dest))
631 return vxlan->gaddr;
632
633 f = vxlan_find_mac(vxlan, eth->h_dest);
634 if (f)
635 return f->remote_ip;
636 else
637 return vxlan->gaddr;
638
639}
640
641static void vxlan_sock_free(struct sk_buff *skb)
642{
643 sock_put(skb->sk);
644}
645
646/* On transmit, associate with the tunnel socket */
647static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
648{
649 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
650 struct sock *sk = vn->sock->sk;
651
652 skb_orphan(skb);
653 sock_hold(sk);
654 skb->sk = sk;
655 skb->destructor = vxlan_sock_free;
656}
657
658/* Compute source port for outgoing packet
659 * first choice to use L4 flow hash since it will spread
660 * better and maybe available from hardware
661 * secondary choice is to use jhash on the Ethernet header
662 */
663static u16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
664{
665 unsigned int range = (vxlan->port_max - vxlan->port_min) + 1;
666 u32 hash;
667
668 hash = skb_get_rxhash(skb);
669 if (!hash)
670 hash = jhash(skb->data, 2 * ETH_ALEN,
671 (__force u32) skb->protocol);
672
673 return (((u64) hash * range) >> 32) + vxlan->port_min;
674}
675
625/* Transmit local packets over Vxlan 676/* Transmit local packets over Vxlan
626 * 677 *
627 * Outer IP header inherits ECN and DF from inner header. 678 * Outer IP header inherits ECN and DF from inner header.
628 * Outer UDP destination is the VXLAN assigned port. 679 * Outer UDP destination is the VXLAN assigned port.
629 * source port is based on hash of flow if available 680 * source port is based on hash of flow
630 * otherwise use a random value
631 */ 681 */
632static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) 682static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
633{ 683{
634 struct vxlan_dev *vxlan = netdev_priv(dev); 684 struct vxlan_dev *vxlan = netdev_priv(dev);
635 struct rtable *rt; 685 struct rtable *rt;
636 const struct ethhdr *eth;
637 const struct iphdr *old_iph; 686 const struct iphdr *old_iph;
638 struct iphdr *iph; 687 struct iphdr *iph;
639 struct vxlanhdr *vxh; 688 struct vxlanhdr *vxh;
640 struct udphdr *uh; 689 struct udphdr *uh;
641 struct flowi4 fl4; 690 struct flowi4 fl4;
642 struct vxlan_fdb *f;
643 unsigned int pkt_len = skb->len; 691 unsigned int pkt_len = skb->len;
644 u32 hash;
645 __be32 dst; 692 __be32 dst;
693 __u16 src_port;
646 __be16 df = 0; 694 __be16 df = 0;
647 __u8 tos, ttl; 695 __u8 tos, ttl;
648 int err; 696 int err;
649 697
698 dst = vxlan_find_dst(vxlan, skb);
699 if (!dst)
700 goto drop;
701
650 /* Need space for new headers (invalidates iph ptr) */ 702 /* Need space for new headers (invalidates iph ptr) */
651 if (skb_cow_head(skb, VXLAN_HEADROOM)) 703 if (skb_cow_head(skb, VXLAN_HEADROOM))
652 goto drop; 704 goto drop;
653 705
654 eth = (void *)skb->data;
655 old_iph = ip_hdr(skb); 706 old_iph = ip_hdr(skb);
656 707
657 if (!is_multicast_ether_addr(eth->h_dest) &&
658 (f = vxlan_find_mac(vxlan, eth->h_dest)))
659 dst = f->remote_ip;
660 else if (vxlan->gaddr) {
661 dst = vxlan->gaddr;
662 } else
663 goto drop;
664
665 ttl = vxlan->ttl; 708 ttl = vxlan->ttl;
666 if (!ttl && IN_MULTICAST(ntohl(dst))) 709 if (!ttl && IN_MULTICAST(ntohl(dst)))
667 ttl = 1; 710 ttl = 1;
@@ -670,11 +713,15 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
670 if (tos == 1) 713 if (tos == 1)
671 tos = vxlan_get_dsfield(old_iph, skb); 714 tos = vxlan_get_dsfield(old_iph, skb);
672 715
673 hash = skb_get_rxhash(skb); 716 src_port = vxlan_src_port(vxlan, skb);
717
718 memset(&fl4, 0, sizeof(fl4));
719 fl4.flowi4_oif = vxlan->link;
720 fl4.flowi4_tos = RT_TOS(tos);
721 fl4.daddr = dst;
722 fl4.saddr = vxlan->saddr;
674 723
675 rt = ip_route_output_gre(dev_net(dev), &fl4, dst, 724 rt = ip_route_output_key(dev_net(dev), &fl4);
676 vxlan->saddr, vxlan->vni,
677 RT_TOS(tos), vxlan->link);
678 if (IS_ERR(rt)) { 725 if (IS_ERR(rt)) {
679 netdev_dbg(dev, "no route to %pI4\n", &dst); 726 netdev_dbg(dev, "no route to %pI4\n", &dst);
680 dev->stats.tx_carrier_errors++; 727 dev->stats.tx_carrier_errors++;
@@ -703,7 +750,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
703 uh = udp_hdr(skb); 750 uh = udp_hdr(skb);
704 751
705 uh->dest = htons(vxlan_port); 752 uh->dest = htons(vxlan_port);
706 uh->source = hash ? :random32(); 753 uh->source = htons(src_port);
707 754
708 uh->len = htons(skb->len); 755 uh->len = htons(skb->len);
709 uh->check = 0; 756 uh->check = 0;
@@ -716,10 +763,12 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
716 iph->frag_off = df; 763 iph->frag_off = df;
717 iph->protocol = IPPROTO_UDP; 764 iph->protocol = IPPROTO_UDP;
718 iph->tos = vxlan_ecn_encap(tos, old_iph, skb); 765 iph->tos = vxlan_ecn_encap(tos, old_iph, skb);
719 iph->daddr = fl4.daddr; 766 iph->daddr = dst;
720 iph->saddr = fl4.saddr; 767 iph->saddr = fl4.saddr;
721 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 768 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
722 769
770 vxlan_set_owner(dev, skb);
771
723 /* See __IPTUNNEL_XMIT */ 772 /* See __IPTUNNEL_XMIT */
724 skb->ip_summed = CHECKSUM_NONE; 773 skb->ip_summed = CHECKSUM_NONE;
725 ip_select_ident(iph, &rt->dst, NULL); 774 ip_select_ident(iph, &rt->dst, NULL);
@@ -767,7 +816,7 @@ static void vxlan_cleanup(unsigned long arg)
767 = container_of(p, struct vxlan_fdb, hlist); 816 = container_of(p, struct vxlan_fdb, hlist);
768 unsigned long timeout; 817 unsigned long timeout;
769 818
770 if (f->state == NUD_PERMANENT) 819 if (f->state & NUD_PERMANENT)
771 continue; 820 continue;
772 821
773 timeout = f->used + vxlan->age_interval * HZ; 822 timeout = f->used + vxlan->age_interval * HZ;
@@ -929,9 +978,11 @@ static void vxlan_setup(struct net_device *dev)
929{ 978{
930 struct vxlan_dev *vxlan = netdev_priv(dev); 979 struct vxlan_dev *vxlan = netdev_priv(dev);
931 unsigned h; 980 unsigned h;
981 int low, high;
932 982
933 eth_hw_addr_random(dev); 983 eth_hw_addr_random(dev);
934 ether_setup(dev); 984 ether_setup(dev);
985 dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
935 986
936 dev->netdev_ops = &vxlan_netdev_ops; 987 dev->netdev_ops = &vxlan_netdev_ops;
937 dev->destructor = vxlan_free; 988 dev->destructor = vxlan_free;
@@ -948,6 +999,10 @@ static void vxlan_setup(struct net_device *dev)
948 vxlan->age_timer.function = vxlan_cleanup; 999 vxlan->age_timer.function = vxlan_cleanup;
949 vxlan->age_timer.data = (unsigned long) vxlan; 1000 vxlan->age_timer.data = (unsigned long) vxlan;
950 1001
1002 inet_get_local_port_range(&low, &high);
1003 vxlan->port_min = low;
1004 vxlan->port_max = high;
1005
951 vxlan->dev = dev; 1006 vxlan->dev = dev;
952 1007
953 for (h = 0; h < FDB_HASH_SIZE; ++h) 1008 for (h = 0; h < FDB_HASH_SIZE; ++h)
@@ -964,6 +1019,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
964 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 }, 1019 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
965 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 }, 1020 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
966 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 }, 1021 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
1022 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
967}; 1023};
968 1024
969static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) 1025static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -996,6 +1052,18 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
996 return -EADDRNOTAVAIL; 1052 return -EADDRNOTAVAIL;
997 } 1053 }
998 } 1054 }
1055
1056 if (data[IFLA_VXLAN_PORT_RANGE]) {
1057 const struct ifla_vxlan_port_range *p
1058 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
1059
1060 if (ntohs(p->high) < ntohs(p->low)) {
1061 pr_debug("port range %u .. %u not valid\n",
1062 ntohs(p->low), ntohs(p->high));
1063 return -EINVAL;
1064 }
1065 }
1066
999 return 0; 1067 return 0;
1000} 1068}
1001 1069
@@ -1022,14 +1090,18 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
1022 if (data[IFLA_VXLAN_LOCAL]) 1090 if (data[IFLA_VXLAN_LOCAL])
1023 vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]); 1091 vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
1024 1092
1025 if (data[IFLA_VXLAN_LINK]) { 1093 if (data[IFLA_VXLAN_LINK] &&
1026 vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]); 1094 (vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
1095 struct net_device *lowerdev
1096 = __dev_get_by_index(net, vxlan->link);
1027 1097
1028 if (!tb[IFLA_MTU]) { 1098 if (!lowerdev) {
1029 struct net_device *lowerdev; 1099 pr_info("ifindex %d does not exist\n", vxlan->link);
1030 lowerdev = __dev_get_by_index(net, vxlan->link); 1100 return -ENODEV;
1031 dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
1032 } 1101 }
1102
1103 if (!tb[IFLA_MTU])
1104 dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
1033 } 1105 }
1034 1106
1035 if (data[IFLA_VXLAN_TOS]) 1107 if (data[IFLA_VXLAN_TOS])
@@ -1046,6 +1118,13 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
1046 if (data[IFLA_VXLAN_LIMIT]) 1118 if (data[IFLA_VXLAN_LIMIT])
1047 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); 1119 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
1048 1120
1121 if (data[IFLA_VXLAN_PORT_RANGE]) {
1122 const struct ifla_vxlan_port_range *p
1123 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
1124 vxlan->port_min = ntohs(p->low);
1125 vxlan->port_max = ntohs(p->high);
1126 }
1127
1049 err = register_netdevice(dev); 1128 err = register_netdevice(dev);
1050 if (!err) 1129 if (!err)
1051 hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni)); 1130 hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni));
@@ -1074,23 +1153,28 @@ static size_t vxlan_get_size(const struct net_device *dev)
1074 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ 1153 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
1075 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ 1154 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
1076 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ 1155 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
1156 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
1077 0; 1157 0;
1078} 1158}
1079 1159
1080static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) 1160static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1081{ 1161{
1082 const struct vxlan_dev *vxlan = netdev_priv(dev); 1162 const struct vxlan_dev *vxlan = netdev_priv(dev);
1163 struct ifla_vxlan_port_range ports = {
1164 .low = htons(vxlan->port_min),
1165 .high = htons(vxlan->port_max),
1166 };
1083 1167
1084 if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni)) 1168 if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni))
1085 goto nla_put_failure; 1169 goto nla_put_failure;
1086 1170
1087 if (vxlan->gaddr && nla_put_u32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr)) 1171 if (vxlan->gaddr && nla_put_be32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr))
1088 goto nla_put_failure; 1172 goto nla_put_failure;
1089 1173
1090 if (vxlan->link && nla_put_u32(skb, IFLA_VXLAN_LINK, vxlan->link)) 1174 if (vxlan->link && nla_put_u32(skb, IFLA_VXLAN_LINK, vxlan->link))
1091 goto nla_put_failure; 1175 goto nla_put_failure;
1092 1176
1093 if (vxlan->saddr && nla_put_u32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr)) 1177 if (vxlan->saddr && nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
1094 goto nla_put_failure; 1178 goto nla_put_failure;
1095 1179
1096 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) || 1180 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
@@ -1100,6 +1184,9 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1100 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax)) 1184 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax))
1101 goto nla_put_failure; 1185 goto nla_put_failure;
1102 1186
1187 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
1188 goto nla_put_failure;
1189
1103 return 0; 1190 return 0;
1104 1191
1105nla_put_failure: 1192nla_put_failure:
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 1a623183cbe5..b6271325f803 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -597,7 +597,7 @@ fst_q_work_item(u64 * queue, int card_index)
597 * bottom half for the card. Note the limitation of 64 cards. 597 * bottom half for the card. Note the limitation of 64 cards.
598 * That ought to be enough 598 * That ought to be enough
599 */ 599 */
600 mask = 1 << card_index; 600 mask = (u64)1 << card_index;
601 *queue |= mask; 601 *queue |= mask;
602 spin_unlock_irqrestore(&fst_work_q_lock, flags); 602 spin_unlock_irqrestore(&fst_work_q_lock, flags);
603} 603}
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 9fd6d9a9942e..9f31cfa56cc0 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1804,7 +1804,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1804{ 1804{
1805 int ret; 1805 int ret;
1806 struct ath5k_hw *ah = hw->priv; 1806 struct ath5k_hw *ah = hw->priv;
1807 struct ath5k_vif *avf = (void *)vif->drv_priv; 1807 struct ath5k_vif *avf;
1808 struct sk_buff *skb; 1808 struct sk_buff *skb;
1809 1809
1810 if (WARN_ON(!vif)) { 1810 if (WARN_ON(!vif)) {
@@ -1819,6 +1819,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1819 goto out; 1819 goto out;
1820 } 1820 }
1821 1821
1822 avf = (void *)vif->drv_priv;
1822 ath5k_txbuf_free_skb(ah, avf->bbuf); 1823 ath5k_txbuf_free_skb(ah, avf->bbuf);
1823 avf->bbuf->skb = skb; 1824 avf->bbuf->skb = skb;
1824 ret = ath5k_beacon_setup(ah, avf->bbuf); 1825 ret = ath5k_beacon_setup(ah, avf->bbuf);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 89bf94d4d8a1..6f7cf49eff4d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -534,107 +534,107 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
534 534
535static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = { 535static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
536 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 536 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
537 {0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352}, 537 {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
538 {0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584}, 538 {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
539 {0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800}, 539 {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
540 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, 540 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
541 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 541 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
542 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 542 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
543 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002}, 543 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
544 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004}, 544 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
545 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200}, 545 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
546 {0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202}, 546 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
547 {0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400}, 547 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
548 {0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402}, 548 {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
549 {0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404}, 549 {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
550 {0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603}, 550 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
551 {0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02}, 551 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
552 {0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04}, 552 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
553 {0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20}, 553 {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
554 {0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20}, 554 {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
555 {0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22}, 555 {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
556 {0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24}, 556 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
557 {0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640}, 557 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
558 {0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660}, 558 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
559 {0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861}, 559 {0x0000a544, 0x52022470, 0x52022470, 0x3f001861, 0x3f001861},
560 {0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81}, 560 {0x0000a548, 0x55022490, 0x55022490, 0x43001a81, 0x43001a81},
561 {0x0000a54c, 0x5a08442e, 0x5a08442e, 0x47001a83, 0x47001a83}, 561 {0x0000a54c, 0x59022492, 0x59022492, 0x47001a83, 0x47001a83},
562 {0x0000a550, 0x5e0a4431, 0x5e0a4431, 0x4a001c84, 0x4a001c84}, 562 {0x0000a550, 0x5d022692, 0x5d022692, 0x4a001c84, 0x4a001c84},
563 {0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3}, 563 {0x0000a554, 0x61022892, 0x61022892, 0x4e001ce3, 0x4e001ce3},
564 {0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5}, 564 {0x0000a558, 0x65024890, 0x65024890, 0x52001ce5, 0x52001ce5},
565 {0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9}, 565 {0x0000a55c, 0x69024892, 0x69024892, 0x56001ce9, 0x56001ce9},
566 {0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb}, 566 {0x0000a560, 0x6e024c92, 0x6e024c92, 0x5a001ceb, 0x5a001ceb},
567 {0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, 567 {0x0000a564, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
568 {0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, 568 {0x0000a568, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
569 {0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, 569 {0x0000a56c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
570 {0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, 570 {0x0000a570, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
571 {0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, 571 {0x0000a574, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
572 {0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, 572 {0x0000a578, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
573 {0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, 573 {0x0000a57c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
574 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000}, 574 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
575 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002}, 575 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
576 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004}, 576 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
577 {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200}, 577 {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
578 {0x0000a590, 0x15800028, 0x15800028, 0x0f800202, 0x0f800202}, 578 {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
579 {0x0000a594, 0x1b80002b, 0x1b80002b, 0x12800400, 0x12800400}, 579 {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
580 {0x0000a598, 0x1f820028, 0x1f820028, 0x16800402, 0x16800402}, 580 {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
581 {0x0000a59c, 0x2582002b, 0x2582002b, 0x19800404, 0x19800404}, 581 {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
582 {0x0000a5a0, 0x2a84002a, 0x2a84002a, 0x1c800603, 0x1c800603}, 582 {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
583 {0x0000a5a4, 0x2e86002a, 0x2e86002a, 0x21800a02, 0x21800a02}, 583 {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
584 {0x0000a5a8, 0x3382202d, 0x3382202d, 0x25800a04, 0x25800a04}, 584 {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
585 {0x0000a5ac, 0x3884202c, 0x3884202c, 0x28800a20, 0x28800a20}, 585 {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
586 {0x0000a5b0, 0x3c86202c, 0x3c86202c, 0x2c800e20, 0x2c800e20}, 586 {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
587 {0x0000a5b4, 0x4188202d, 0x4188202d, 0x30800e22, 0x30800e22}, 587 {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
588 {0x0000a5b8, 0x4586402d, 0x4586402d, 0x34800e24, 0x34800e24}, 588 {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
589 {0x0000a5bc, 0x4986222d, 0x4986222d, 0x38801640, 0x38801640}, 589 {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
590 {0x0000a5c0, 0x4d862231, 0x4d862231, 0x3c801660, 0x3c801660}, 590 {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
591 {0x0000a5c4, 0x50882231, 0x50882231, 0x3f801861, 0x3f801861}, 591 {0x0000a5c4, 0x52822470, 0x52822470, 0x3f801861, 0x3f801861},
592 {0x0000a5c8, 0x5688422e, 0x5688422e, 0x43801a81, 0x43801a81}, 592 {0x0000a5c8, 0x55822490, 0x55822490, 0x43801a81, 0x43801a81},
593 {0x0000a5cc, 0x5a88442e, 0x5a88442e, 0x47801a83, 0x47801a83}, 593 {0x0000a5cc, 0x59822492, 0x59822492, 0x47801a83, 0x47801a83},
594 {0x0000a5d0, 0x5e8a4431, 0x5e8a4431, 0x4a801c84, 0x4a801c84}, 594 {0x0000a5d0, 0x5d822692, 0x5d822692, 0x4a801c84, 0x4a801c84},
595 {0x0000a5d4, 0x648a4432, 0x648a4432, 0x4e801ce3, 0x4e801ce3}, 595 {0x0000a5d4, 0x61822892, 0x61822892, 0x4e801ce3, 0x4e801ce3},
596 {0x0000a5d8, 0x688a4434, 0x688a4434, 0x52801ce5, 0x52801ce5}, 596 {0x0000a5d8, 0x65824890, 0x65824890, 0x52801ce5, 0x52801ce5},
597 {0x0000a5dc, 0x6c8a6434, 0x6c8a6434, 0x56801ce9, 0x56801ce9}, 597 {0x0000a5dc, 0x69824892, 0x69824892, 0x56801ce9, 0x56801ce9},
598 {0x0000a5e0, 0x6f8a6633, 0x6f8a6633, 0x5a801ceb, 0x5a801ceb}, 598 {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x5a801ceb, 0x5a801ceb},
599 {0x0000a5e4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec}, 599 {0x0000a5e4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
600 {0x0000a5e8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec}, 600 {0x0000a5e8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
601 {0x0000a5ec, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec}, 601 {0x0000a5ec, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
602 {0x0000a5f0, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec}, 602 {0x0000a5f0, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
603 {0x0000a5f4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec}, 603 {0x0000a5f4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
604 {0x0000a5f8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec}, 604 {0x0000a5f8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
605 {0x0000a5fc, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec}, 605 {0x0000a5fc, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
606 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 606 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
607 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 607 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
608 {0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000}, 608 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
609 {0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000}, 609 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
610 {0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000}, 610 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
611 {0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000}, 611 {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000},
612 {0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501}, 612 {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501},
613 {0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501}, 613 {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501},
614 {0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03}, 614 {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03},
615 {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04}, 615 {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04},
616 {0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04}, 616 {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04},
617 {0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, 617 {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
618 {0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, 618 {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
619 {0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, 619 {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
620 {0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, 620 {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
621 {0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, 621 {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
622 {0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352}, 622 {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
623 {0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584}, 623 {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
624 {0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800}, 624 {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
625 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, 625 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
626 {0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352}, 626 {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
627 {0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584}, 627 {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
628 {0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800}, 628 {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
629 {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, 629 {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
630 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, 630 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
631 {0x00016048, 0x61200001, 0x61200001, 0x66480001, 0x66480001}, 631 {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
632 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, 632 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
633 {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, 633 {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
634 {0x00016448, 0x61200001, 0x61200001, 0x66480001, 0x66480001}, 634 {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
635 {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, 635 {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
636 {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, 636 {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
637 {0x00016848, 0x61200001, 0x61200001, 0x66480001, 0x66480001}, 637 {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
638 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, 638 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
639}; 639};
640 640
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 76f07d8c272d..1b48414dca95 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -120,7 +120,7 @@ static void ath9k_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
120 120
121 if (ath_tx_start(hw, skb, &txctl) != 0) { 121 if (ath_tx_start(hw, skb, &txctl) != 0) {
122 ath_dbg(common, XMIT, "CABQ TX failed\n"); 122 ath_dbg(common, XMIT, "CABQ TX failed\n");
123 dev_kfree_skb_any(skb); 123 ieee80211_free_txskb(hw, skb);
124 } 124 }
125} 125}
126 126
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 924c4616c3d9..f5dda84176c3 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -38,6 +38,7 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
38 { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */ 38 { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
39 { USB_DEVICE(0x040D, 0x3801) }, /* VIA */ 39 { USB_DEVICE(0x040D, 0x3801) }, /* VIA */
40 { USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */ 40 { USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */
41 { USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */
41 { USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */ 42 { USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
42 43
43 { USB_DEVICE(0x0cf3, 0x7015), 44 { USB_DEVICE(0x0cf3, 0x7015),
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index f9a6ec5cf470..8e1559aba495 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1450,9 +1450,14 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
1450 REG_WRITE(ah, AR_RTC_FORCE_WAKE, 1450 REG_WRITE(ah, AR_RTC_FORCE_WAKE,
1451 AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); 1451 AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
1452 1452
1453 if (!ah->reset_power_on)
1454 type = ATH9K_RESET_POWER_ON;
1455
1453 switch (type) { 1456 switch (type) {
1454 case ATH9K_RESET_POWER_ON: 1457 case ATH9K_RESET_POWER_ON:
1455 ret = ath9k_hw_set_reset_power_on(ah); 1458 ret = ath9k_hw_set_reset_power_on(ah);
1459 if (!ret)
1460 ah->reset_power_on = true;
1456 break; 1461 break;
1457 case ATH9K_RESET_WARM: 1462 case ATH9K_RESET_WARM:
1458 case ATH9K_RESET_COLD: 1463 case ATH9K_RESET_COLD:
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 566a4ce4f156..dbc1b7a4cbfd 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -741,6 +741,7 @@ struct ath_hw {
741 u32 rfkill_polarity; 741 u32 rfkill_polarity;
742 u32 ah_flags; 742 u32 ah_flags;
743 743
744 bool reset_power_on;
744 bool htc_reset_init; 745 bool htc_reset_init;
745 746
746 enum nl80211_iftype opmode; 747 enum nl80211_iftype opmode;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 31ab82e3ba85..dd45edfa6bae 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -639,8 +639,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
639 ath_err(common, 639 ath_err(common,
640 "Unable to reset hardware; reset status %d (freq %u MHz)\n", 640 "Unable to reset hardware; reset status %d (freq %u MHz)\n",
641 r, curchan->center_freq); 641 r, curchan->center_freq);
642 spin_unlock_bh(&sc->sc_pcu_lock); 642 ah->reset_power_on = false;
643 goto mutex_unlock;
644 } 643 }
645 644
646 /* Setup our intr mask. */ 645 /* Setup our intr mask. */
@@ -665,11 +664,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
665 clear_bit(SC_OP_INVALID, &sc->sc_flags); 664 clear_bit(SC_OP_INVALID, &sc->sc_flags);
666 sc->sc_ah->is_monitoring = false; 665 sc->sc_ah->is_monitoring = false;
667 666
668 if (!ath_complete_reset(sc, false)) { 667 if (!ath_complete_reset(sc, false))
669 r = -EIO; 668 ah->reset_power_on = false;
670 spin_unlock_bh(&sc->sc_pcu_lock);
671 goto mutex_unlock;
672 }
673 669
674 if (ah->led_pin >= 0) { 670 if (ah->led_pin >= 0) {
675 ath9k_hw_cfg_output(ah, ah->led_pin, 671 ath9k_hw_cfg_output(ah, ah->led_pin,
@@ -688,12 +684,11 @@ static int ath9k_start(struct ieee80211_hw *hw)
688 if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en) 684 if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
689 common->bus_ops->extn_synch_en(common); 685 common->bus_ops->extn_synch_en(common);
690 686
691mutex_unlock:
692 mutex_unlock(&sc->mutex); 687 mutex_unlock(&sc->mutex);
693 688
694 ath9k_ps_restore(sc); 689 ath9k_ps_restore(sc);
695 690
696 return r; 691 return 0;
697} 692}
698 693
699static void ath9k_tx(struct ieee80211_hw *hw, 694static void ath9k_tx(struct ieee80211_hw *hw,
@@ -770,7 +765,7 @@ static void ath9k_tx(struct ieee80211_hw *hw,
770 765
771 return; 766 return;
772exit: 767exit:
773 dev_kfree_skb_any(skb); 768 ieee80211_free_txskb(hw, skb);
774} 769}
775 770
776static void ath9k_stop(struct ieee80211_hw *hw) 771static void ath9k_stop(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 0e630a99b68b..f088f4bf9a26 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -324,6 +324,10 @@ static int ath_pci_suspend(struct device *device)
324static int ath_pci_resume(struct device *device) 324static int ath_pci_resume(struct device *device)
325{ 325{
326 struct pci_dev *pdev = to_pci_dev(device); 326 struct pci_dev *pdev = to_pci_dev(device);
327 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
328 struct ath_softc *sc = hw->priv;
329 struct ath_hw *ah = sc->sc_ah;
330 struct ath_common *common = ath9k_hw_common(ah);
327 u32 val; 331 u32 val;
328 332
329 /* 333 /*
@@ -335,6 +339,9 @@ static int ath_pci_resume(struct device *device)
335 if ((val & 0x0000ff00) != 0) 339 if ((val & 0x0000ff00) != 0)
336 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); 340 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
337 341
342 ath_pci_aspm_init(common);
343 ah->reset_power_on = false;
344
338 return 0; 345 return 0;
339} 346}
340 347
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 36618e3a5e60..741918a2027b 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -66,8 +66,7 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
66static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, 66static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
67 struct ath_txq *txq, 67 struct ath_txq *txq,
68 struct ath_atx_tid *tid, 68 struct ath_atx_tid *tid,
69 struct sk_buff *skb, 69 struct sk_buff *skb);
70 bool dequeue);
71 70
72enum { 71enum {
73 MCS_HT20, 72 MCS_HT20,
@@ -176,7 +175,15 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
176 fi = get_frame_info(skb); 175 fi = get_frame_info(skb);
177 bf = fi->bf; 176 bf = fi->bf;
178 177
179 if (bf && fi->retries) { 178 if (!bf) {
179 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
180 if (!bf) {
181 ieee80211_free_txskb(sc->hw, skb);
182 continue;
183 }
184 }
185
186 if (fi->retries) {
180 list_add_tail(&bf->list, &bf_head); 187 list_add_tail(&bf->list, &bf_head);
181 ath_tx_update_baw(sc, tid, bf->bf_state.seqno); 188 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
182 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); 189 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
@@ -305,6 +312,7 @@ static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
305 } 312 }
306 313
307 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); 314 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
315 bf->bf_next = NULL;
308 list_del(&bf->list); 316 list_del(&bf->list);
309 317
310 spin_unlock_bh(&sc->tx.txbuflock); 318 spin_unlock_bh(&sc->tx.txbuflock);
@@ -386,7 +394,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
386 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first; 394 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
387 u32 ba[WME_BA_BMP_SIZE >> 5]; 395 u32 ba[WME_BA_BMP_SIZE >> 5];
388 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 396 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
389 bool rc_update = true; 397 bool rc_update = true, isba;
390 struct ieee80211_tx_rate rates[4]; 398 struct ieee80211_tx_rate rates[4];
391 struct ath_frame_info *fi; 399 struct ath_frame_info *fi;
392 int nframes; 400 int nframes;
@@ -430,13 +438,17 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
430 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; 438 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
431 tid = ATH_AN_2_TID(an, tidno); 439 tid = ATH_AN_2_TID(an, tidno);
432 seq_first = tid->seq_start; 440 seq_first = tid->seq_start;
441 isba = ts->ts_flags & ATH9K_TX_BA;
433 442
434 /* 443 /*
435 * The hardware occasionally sends a tx status for the wrong TID. 444 * The hardware occasionally sends a tx status for the wrong TID.
436 * In this case, the BA status cannot be considered valid and all 445 * In this case, the BA status cannot be considered valid and all
437 * subframes need to be retransmitted 446 * subframes need to be retransmitted
447 *
448 * Only BlockAcks have a TID and therefore normal Acks cannot be
449 * checked
438 */ 450 */
439 if (tidno != ts->tid) 451 if (isba && tidno != ts->tid)
440 txok = false; 452 txok = false;
441 453
442 isaggr = bf_isaggr(bf); 454 isaggr = bf_isaggr(bf);
@@ -785,10 +797,13 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
785 fi = get_frame_info(skb); 797 fi = get_frame_info(skb);
786 bf = fi->bf; 798 bf = fi->bf;
787 if (!fi->bf) 799 if (!fi->bf)
788 bf = ath_tx_setup_buffer(sc, txq, tid, skb, true); 800 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
789 801
790 if (!bf) 802 if (!bf) {
803 __skb_unlink(skb, &tid->buf_q);
804 ieee80211_free_txskb(sc->hw, skb);
791 continue; 805 continue;
806 }
792 807
793 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR; 808 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
794 seqno = bf->bf_state.seqno; 809 seqno = bf->bf_state.seqno;
@@ -1731,9 +1746,11 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1731 return; 1746 return;
1732 } 1747 }
1733 1748
1734 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false); 1749 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1735 if (!bf) 1750 if (!bf) {
1751 ieee80211_free_txskb(sc->hw, skb);
1736 return; 1752 return;
1753 }
1737 1754
1738 bf->bf_state.bf_type = BUF_AMPDU; 1755 bf->bf_state.bf_type = BUF_AMPDU;
1739 INIT_LIST_HEAD(&bf_head); 1756 INIT_LIST_HEAD(&bf_head);
@@ -1757,16 +1774,12 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1757 struct ath_buf *bf; 1774 struct ath_buf *bf;
1758 1775
1759 bf = fi->bf; 1776 bf = fi->bf;
1760 if (!bf)
1761 bf = ath_tx_setup_buffer(sc, txq, tid, skb, false);
1762
1763 if (!bf)
1764 return;
1765 1777
1766 INIT_LIST_HEAD(&bf_head); 1778 INIT_LIST_HEAD(&bf_head);
1767 list_add_tail(&bf->list, &bf_head); 1779 list_add_tail(&bf->list, &bf_head);
1768 bf->bf_state.bf_type = 0; 1780 bf->bf_state.bf_type = 0;
1769 1781
1782 bf->bf_next = NULL;
1770 bf->bf_lastbf = bf; 1783 bf->bf_lastbf = bf;
1771 ath_tx_fill_desc(sc, bf, txq, fi->framelen); 1784 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
1772 ath_tx_txqaddbuf(sc, txq, &bf_head, false); 1785 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
@@ -1839,8 +1852,7 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1839static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, 1852static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1840 struct ath_txq *txq, 1853 struct ath_txq *txq,
1841 struct ath_atx_tid *tid, 1854 struct ath_atx_tid *tid,
1842 struct sk_buff *skb, 1855 struct sk_buff *skb)
1843 bool dequeue)
1844{ 1856{
1845 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1857 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1846 struct ath_frame_info *fi = get_frame_info(skb); 1858 struct ath_frame_info *fi = get_frame_info(skb);
@@ -1852,7 +1864,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1852 bf = ath_tx_get_buffer(sc); 1864 bf = ath_tx_get_buffer(sc);
1853 if (!bf) { 1865 if (!bf) {
1854 ath_dbg(common, XMIT, "TX buffers are full\n"); 1866 ath_dbg(common, XMIT, "TX buffers are full\n");
1855 goto error; 1867 return NULL;
1856 } 1868 }
1857 1869
1858 ATH_TXBUF_RESET(bf); 1870 ATH_TXBUF_RESET(bf);
@@ -1881,18 +1893,12 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1881 ath_err(ath9k_hw_common(sc->sc_ah), 1893 ath_err(ath9k_hw_common(sc->sc_ah),
1882 "dma_mapping_error() on TX\n"); 1894 "dma_mapping_error() on TX\n");
1883 ath_tx_return_buffer(sc, bf); 1895 ath_tx_return_buffer(sc, bf);
1884 goto error; 1896 return NULL;
1885 } 1897 }
1886 1898
1887 fi->bf = bf; 1899 fi->bf = bf;
1888 1900
1889 return bf; 1901 return bf;
1890
1891error:
1892 if (dequeue)
1893 __skb_unlink(skb, &tid->buf_q);
1894 dev_kfree_skb_any(skb);
1895 return NULL;
1896} 1902}
1897 1903
1898/* FIXME: tx power */ 1904/* FIXME: tx power */
@@ -1921,9 +1927,14 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
1921 */ 1927 */
1922 ath_tx_send_ampdu(sc, tid, skb, txctl); 1928 ath_tx_send_ampdu(sc, tid, skb, txctl);
1923 } else { 1929 } else {
1924 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false); 1930 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1925 if (!bf) 1931 if (!bf) {
1932 if (txctl->paprd)
1933 dev_kfree_skb_any(skb);
1934 else
1935 ieee80211_free_txskb(sc->hw, skb);
1926 return; 1936 return;
1937 }
1927 1938
1928 bf->bf_state.bfs_paprd = txctl->paprd; 1939 bf->bf_state.bfs_paprd = txctl->paprd;
1929 1940
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 2aa4a59c72c8..2df17f1e49ef 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -303,6 +303,7 @@ struct ar9170 {
303 unsigned long queue_stop_timeout[__AR9170_NUM_TXQ]; 303 unsigned long queue_stop_timeout[__AR9170_NUM_TXQ];
304 unsigned long max_queue_stop_timeout[__AR9170_NUM_TXQ]; 304 unsigned long max_queue_stop_timeout[__AR9170_NUM_TXQ];
305 bool needs_full_reset; 305 bool needs_full_reset;
306 bool force_usb_reset;
306 atomic_t pending_restarts; 307 atomic_t pending_restarts;
307 308
308 /* interface mode settings */ 309 /* interface mode settings */
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 67997b39aba7..25a1e2f4f738 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -465,27 +465,26 @@ static void carl9170_restart_work(struct work_struct *work)
465{ 465{
466 struct ar9170 *ar = container_of(work, struct ar9170, 466 struct ar9170 *ar = container_of(work, struct ar9170,
467 restart_work); 467 restart_work);
468 int err; 468 int err = -EIO;
469 469
470 ar->usedkeys = 0; 470 ar->usedkeys = 0;
471 ar->filter_state = 0; 471 ar->filter_state = 0;
472 carl9170_cancel_worker(ar); 472 carl9170_cancel_worker(ar);
473 473
474 mutex_lock(&ar->mutex); 474 mutex_lock(&ar->mutex);
475 err = carl9170_usb_restart(ar); 475 if (!ar->force_usb_reset) {
476 if (net_ratelimit()) { 476 err = carl9170_usb_restart(ar);
477 if (err) { 477 if (net_ratelimit()) {
478 dev_err(&ar->udev->dev, "Failed to restart device " 478 if (err)
479 " (%d).\n", err); 479 dev_err(&ar->udev->dev, "Failed to restart device (%d).\n", err);
480 } else { 480 else
481 dev_info(&ar->udev->dev, "device restarted " 481 dev_info(&ar->udev->dev, "device restarted successfully.\n");
482 "successfully.\n");
483 } 482 }
484 } 483 }
485
486 carl9170_zap_queues(ar); 484 carl9170_zap_queues(ar);
487 mutex_unlock(&ar->mutex); 485 mutex_unlock(&ar->mutex);
488 if (!err) { 486
487 if (!err && !ar->force_usb_reset) {
489 ar->restart_counter++; 488 ar->restart_counter++;
490 atomic_set(&ar->pending_restarts, 0); 489 atomic_set(&ar->pending_restarts, 0);
491 490
@@ -526,10 +525,10 @@ void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
526 if (!ar->registered) 525 if (!ar->registered)
527 return; 526 return;
528 527
529 if (IS_ACCEPTING_CMD(ar) && !ar->needs_full_reset) 528 if (!IS_ACCEPTING_CMD(ar) || ar->needs_full_reset)
530 ieee80211_queue_work(ar->hw, &ar->restart_work); 529 ar->force_usb_reset = true;
531 else 530
532 carl9170_usb_reset(ar); 531 ieee80211_queue_work(ar->hw, &ar->restart_work);
533 532
534 /* 533 /*
535 * At this point, the device instance might have vanished/disabled. 534 * At this point, the device instance might have vanished/disabled.
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 73730e94e0ac..c5a99c8c8168 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -5404,6 +5404,8 @@ static void b43_bcma_remove(struct bcma_device *core)
5404 cancel_work_sync(&wldev->restart_work); 5404 cancel_work_sync(&wldev->restart_work);
5405 5405
5406 B43_WARN_ON(!wl); 5406 B43_WARN_ON(!wl);
5407 if (!wldev->fw.ucode.data)
5408 return; /* NULL if firmware never loaded */
5407 if (wl->current_dev == wldev && wl->hw_registred) { 5409 if (wl->current_dev == wldev && wl->hw_registred) {
5408 b43_leds_stop(wldev); 5410 b43_leds_stop(wldev);
5409 ieee80211_unregister_hw(wl->hw); 5411 ieee80211_unregister_hw(wl->hw);
@@ -5478,6 +5480,8 @@ static void b43_ssb_remove(struct ssb_device *sdev)
5478 cancel_work_sync(&wldev->restart_work); 5480 cancel_work_sync(&wldev->restart_work);
5479 5481
5480 B43_WARN_ON(!wl); 5482 B43_WARN_ON(!wl);
5483 if (!wldev->fw.ucode.data)
5484 return; /* NULL if firmware never loaded */
5481 if (wl->current_dev == wldev && wl->hw_registred) { 5485 if (wl->current_dev == wldev && wl->hw_registred) {
5482 b43_leds_stop(wldev); 5486 b43_leds_stop(wldev);
5483 ieee80211_unregister_hw(wl->hw); 5487 ieee80211_unregister_hw(wl->hw);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index a2b4b1e71017..7a6dfdc67b6c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -1339,7 +1339,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo,
1339 } 1339 }
1340 1340
1341 ret = brcmf_bus_start(dev); 1341 ret = brcmf_bus_start(dev);
1342 if (ret == -ENOLINK) { 1342 if (ret) {
1343 brcmf_dbg(ERROR, "dongle is not responding\n"); 1343 brcmf_dbg(ERROR, "dongle is not responding\n");
1344 brcmf_detach(dev); 1344 brcmf_detach(dev);
1345 goto fail; 1345 goto fail;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index c1abaa6db59e..a6f1e8166008 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -3569,7 +3569,7 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
3569 3569
3570 if (!request || !request->n_ssids || !request->n_match_sets) { 3570 if (!request || !request->n_ssids || !request->n_match_sets) {
3571 WL_ERR("Invalid sched scan req!! n_ssids:%d\n", 3571 WL_ERR("Invalid sched scan req!! n_ssids:%d\n",
3572 request->n_ssids); 3572 request ? request->n_ssids : 0);
3573 return -EINVAL; 3573 return -EINVAL;
3574 } 3574 }
3575 3575
@@ -3972,7 +3972,7 @@ brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
3972 u8 *iovar_ie_buf; 3972 u8 *iovar_ie_buf;
3973 u8 *curr_ie_buf; 3973 u8 *curr_ie_buf;
3974 u8 *mgmt_ie_buf = NULL; 3974 u8 *mgmt_ie_buf = NULL;
3975 u32 mgmt_ie_buf_len = 0; 3975 int mgmt_ie_buf_len;
3976 u32 *mgmt_ie_len = 0; 3976 u32 *mgmt_ie_len = 0;
3977 u32 del_add_ie_buf_len = 0; 3977 u32 del_add_ie_buf_len = 0;
3978 u32 total_ie_buf_len = 0; 3978 u32 total_ie_buf_len = 0;
@@ -3982,7 +3982,7 @@ brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
3982 struct parsed_vndr_ie_info *vndrie_info; 3982 struct parsed_vndr_ie_info *vndrie_info;
3983 s32 i; 3983 s32 i;
3984 u8 *ptr; 3984 u8 *ptr;
3985 u32 remained_buf_len; 3985 int remained_buf_len;
3986 3986
3987 WL_TRACE("bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag); 3987 WL_TRACE("bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag);
3988 iovar_ie_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL); 3988 iovar_ie_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
@@ -4606,12 +4606,13 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
4606 struct brcmf_cfg80211_profile *profile = cfg->profile; 4606 struct brcmf_cfg80211_profile *profile = cfg->profile;
4607 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg); 4607 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
4608 struct wiphy *wiphy = cfg_to_wiphy(cfg); 4608 struct wiphy *wiphy = cfg_to_wiphy(cfg);
4609 struct brcmf_channel_info_le channel_le; 4609 struct ieee80211_channel *notify_channel = NULL;
4610 struct ieee80211_channel *notify_channel;
4611 struct ieee80211_supported_band *band; 4610 struct ieee80211_supported_band *band;
4611 struct brcmf_bss_info_le *bi;
4612 u32 freq; 4612 u32 freq;
4613 s32 err = 0; 4613 s32 err = 0;
4614 u32 target_channel; 4614 u32 target_channel;
4615 u8 *buf;
4615 4616
4616 WL_TRACE("Enter\n"); 4617 WL_TRACE("Enter\n");
4617 4618
@@ -4619,11 +4620,22 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
4619 memcpy(profile->bssid, e->addr, ETH_ALEN); 4620 memcpy(profile->bssid, e->addr, ETH_ALEN);
4620 brcmf_update_bss_info(cfg); 4621 brcmf_update_bss_info(cfg);
4621 4622
4622 brcmf_exec_dcmd(ndev, BRCMF_C_GET_CHANNEL, &channel_le, 4623 buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
4623 sizeof(channel_le)); 4624 if (buf == NULL) {
4625 err = -ENOMEM;
4626 goto done;
4627 }
4628
4629 /* data sent to dongle has to be little endian */
4630 *(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX);
4631 err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_BSS_INFO, buf, WL_BSS_INFO_MAX);
4632
4633 if (err)
4634 goto done;
4624 4635
4625 target_channel = le32_to_cpu(channel_le.target_channel); 4636 bi = (struct brcmf_bss_info_le *)(buf + 4);
4626 WL_CONN("Roamed to channel %d\n", target_channel); 4637 target_channel = bi->ctl_ch ? bi->ctl_ch :
4638 CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec));
4627 4639
4628 if (target_channel <= CH_MAX_2G_CHANNEL) 4640 if (target_channel <= CH_MAX_2G_CHANNEL)
4629 band = wiphy->bands[IEEE80211_BAND_2GHZ]; 4641 band = wiphy->bands[IEEE80211_BAND_2GHZ];
@@ -4633,6 +4645,8 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
4633 freq = ieee80211_channel_to_frequency(target_channel, band->band); 4645 freq = ieee80211_channel_to_frequency(target_channel, band->band);
4634 notify_channel = ieee80211_get_channel(wiphy, freq); 4646 notify_channel = ieee80211_get_channel(wiphy, freq);
4635 4647
4648done:
4649 kfree(buf);
4636 cfg80211_roamed(ndev, notify_channel, (u8 *)profile->bssid, 4650 cfg80211_roamed(ndev, notify_channel, (u8 *)profile->bssid,
4637 conn_info->req_ie, conn_info->req_ie_len, 4651 conn_info->req_ie, conn_info->req_ie_len,
4638 conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL); 4652 conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
@@ -5186,41 +5200,6 @@ brcmf_cfg80211_event(struct net_device *ndev,
5186 schedule_work(&cfg->event_work); 5200 schedule_work(&cfg->event_work);
5187} 5201}
5188 5202
5189static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype)
5190{
5191 s32 infra = 0;
5192 s32 err = 0;
5193
5194 switch (iftype) {
5195 case NL80211_IFTYPE_MONITOR:
5196 case NL80211_IFTYPE_WDS:
5197 WL_ERR("type (%d) : currently we do not support this mode\n",
5198 iftype);
5199 err = -EINVAL;
5200 return err;
5201 case NL80211_IFTYPE_ADHOC:
5202 infra = 0;
5203 break;
5204 case NL80211_IFTYPE_STATION:
5205 infra = 1;
5206 break;
5207 case NL80211_IFTYPE_AP:
5208 infra = 1;
5209 break;
5210 default:
5211 err = -EINVAL;
5212 WL_ERR("invalid type (%d)\n", iftype);
5213 return err;
5214 }
5215 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra);
5216 if (err) {
5217 WL_ERR("WLC_SET_INFRA error (%d)\n", err);
5218 return err;
5219 }
5220
5221 return 0;
5222}
5223
5224static s32 brcmf_dongle_eventmsg(struct net_device *ndev) 5203static s32 brcmf_dongle_eventmsg(struct net_device *ndev)
5225{ 5204{
5226 /* Room for "event_msgs" + '\0' + bitvec */ 5205 /* Room for "event_msgs" + '\0' + bitvec */
@@ -5439,7 +5418,8 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
5439 WL_BEACON_TIMEOUT); 5418 WL_BEACON_TIMEOUT);
5440 if (err) 5419 if (err)
5441 goto default_conf_out; 5420 goto default_conf_out;
5442 err = brcmf_dongle_mode(ndev, wdev->iftype); 5421 err = brcmf_cfg80211_change_iface(wdev->wiphy, ndev, wdev->iftype,
5422 NULL, NULL);
5443 if (err && err != -EINPROGRESS) 5423 if (err && err != -EINPROGRESS)
5444 goto default_conf_out; 5424 goto default_conf_out;
5445 err = brcmf_dongle_probecap(cfg); 5425 err = brcmf_dongle_probecap(cfg);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 935120fc8c93..768bf612533e 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -10472,7 +10472,7 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10472 } else 10472 } else
10473 len = src->len; 10473 len = src->len;
10474 10474
10475 dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC); 10475 dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC);
10476 if (!dst) 10476 if (!dst)
10477 continue; 10477 continue;
10478 10478
diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index 349c205d5f62..da5862064195 100644
--- a/drivers/net/wireless/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -518,7 +518,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
518 * See iwlagn_mac_channel_switch. 518 * See iwlagn_mac_channel_switch.
519 */ 519 */
520 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 520 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
521 struct iwl6000_channel_switch_cmd cmd; 521 struct iwl6000_channel_switch_cmd *cmd;
522 u32 switch_time_in_usec, ucode_switch_time; 522 u32 switch_time_in_usec, ucode_switch_time;
523 u16 ch; 523 u16 ch;
524 u32 tsf_low; 524 u32 tsf_low;
@@ -527,18 +527,25 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
527 struct ieee80211_vif *vif = ctx->vif; 527 struct ieee80211_vif *vif = ctx->vif;
528 struct iwl_host_cmd hcmd = { 528 struct iwl_host_cmd hcmd = {
529 .id = REPLY_CHANNEL_SWITCH, 529 .id = REPLY_CHANNEL_SWITCH,
530 .len = { sizeof(cmd), }, 530 .len = { sizeof(*cmd), },
531 .flags = CMD_SYNC, 531 .flags = CMD_SYNC,
532 .data = { &cmd, }, 532 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
533 }; 533 };
534 int err;
534 535
535 cmd.band = priv->band == IEEE80211_BAND_2GHZ; 536 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
537 if (!cmd)
538 return -ENOMEM;
539
540 hcmd.data[0] = cmd;
541
542 cmd->band = priv->band == IEEE80211_BAND_2GHZ;
536 ch = ch_switch->channel->hw_value; 543 ch = ch_switch->channel->hw_value;
537 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n", 544 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
538 ctx->active.channel, ch); 545 ctx->active.channel, ch);
539 cmd.channel = cpu_to_le16(ch); 546 cmd->channel = cpu_to_le16(ch);
540 cmd.rxon_flags = ctx->staging.flags; 547 cmd->rxon_flags = ctx->staging.flags;
541 cmd.rxon_filter_flags = ctx->staging.filter_flags; 548 cmd->rxon_filter_flags = ctx->staging.filter_flags;
542 switch_count = ch_switch->count; 549 switch_count = ch_switch->count;
543 tsf_low = ch_switch->timestamp & 0x0ffffffff; 550 tsf_low = ch_switch->timestamp & 0x0ffffffff;
544 /* 551 /*
@@ -554,23 +561,25 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
554 switch_count = 0; 561 switch_count = 0;
555 } 562 }
556 if (switch_count <= 1) 563 if (switch_count <= 1)
557 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); 564 cmd->switch_time = cpu_to_le32(priv->ucode_beacon_time);
558 else { 565 else {
559 switch_time_in_usec = 566 switch_time_in_usec =
560 vif->bss_conf.beacon_int * switch_count * TIME_UNIT; 567 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
561 ucode_switch_time = iwl_usecs_to_beacons(priv, 568 ucode_switch_time = iwl_usecs_to_beacons(priv,
562 switch_time_in_usec, 569 switch_time_in_usec,
563 beacon_interval); 570 beacon_interval);
564 cmd.switch_time = iwl_add_beacon_time(priv, 571 cmd->switch_time = iwl_add_beacon_time(priv,
565 priv->ucode_beacon_time, 572 priv->ucode_beacon_time,
566 ucode_switch_time, 573 ucode_switch_time,
567 beacon_interval); 574 beacon_interval);
568 } 575 }
569 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", 576 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
570 cmd.switch_time); 577 cmd->switch_time);
571 cmd.expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR; 578 cmd->expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR;
572 579
573 return iwl_dvm_send_cmd(priv, &hcmd); 580 err = iwl_dvm_send_cmd(priv, &hcmd);
581 kfree(cmd);
582 return err;
574} 583}
575 584
576struct iwl_lib_ops iwl6000_lib = { 585struct iwl_lib_ops iwl6000_lib = {
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 2691620393ea..780d3e168297 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1596,8 +1596,9 @@ done:
1596 } 1596 }
1597 } 1597 }
1598 1598
1599 if (mwifiex_bss_start(priv, bss, &req_ssid)) 1599 ret = mwifiex_bss_start(priv, bss, &req_ssid);
1600 return -EFAULT; 1600 if (ret)
1601 return ret;
1601 1602
1602 if (mode == NL80211_IFTYPE_ADHOC) { 1603 if (mode == NL80211_IFTYPE_ADHOC) {
1603 /* Inform the BSS information to kernel, otherwise 1604 /* Inform the BSS information to kernel, otherwise
@@ -1652,9 +1653,19 @@ done:
1652 "info: association to bssid %pM failed\n", 1653 "info: association to bssid %pM failed\n",
1653 priv->cfg_bssid); 1654 priv->cfg_bssid);
1654 memset(priv->cfg_bssid, 0, ETH_ALEN); 1655 memset(priv->cfg_bssid, 0, ETH_ALEN);
1656
1657 if (ret > 0)
1658 cfg80211_connect_result(priv->netdev, priv->cfg_bssid,
1659 NULL, 0, NULL, 0, ret,
1660 GFP_KERNEL);
1661 else
1662 cfg80211_connect_result(priv->netdev, priv->cfg_bssid,
1663 NULL, 0, NULL, 0,
1664 WLAN_STATUS_UNSPECIFIED_FAILURE,
1665 GFP_KERNEL);
1655 } 1666 }
1656 1667
1657 return ret; 1668 return 0;
1658} 1669}
1659 1670
1660/* 1671/*
@@ -1802,7 +1813,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
1802{ 1813{
1803 struct net_device *dev = request->wdev->netdev; 1814 struct net_device *dev = request->wdev->netdev;
1804 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 1815 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
1805 int i, offset; 1816 int i, offset, ret;
1806 struct ieee80211_channel *chan; 1817 struct ieee80211_channel *chan;
1807 struct ieee_types_header *ie; 1818 struct ieee_types_header *ie;
1808 1819
@@ -1814,8 +1825,6 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
1814 return -EBUSY; 1825 return -EBUSY;
1815 } 1826 }
1816 1827
1817 priv->scan_request = request;
1818
1819 priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), 1828 priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
1820 GFP_KERNEL); 1829 GFP_KERNEL);
1821 if (!priv->user_scan_cfg) { 1830 if (!priv->user_scan_cfg) {
@@ -1823,6 +1832,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
1823 return -ENOMEM; 1832 return -ENOMEM;
1824 } 1833 }
1825 1834
1835 priv->scan_request = request;
1836
1826 priv->user_scan_cfg->num_ssids = request->n_ssids; 1837 priv->user_scan_cfg->num_ssids = request->n_ssids;
1827 priv->user_scan_cfg->ssid_list = request->ssids; 1838 priv->user_scan_cfg->ssid_list = request->ssids;
1828 1839
@@ -1855,8 +1866,15 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
1855 1866
1856 priv->user_scan_cfg->chan_list[i].scan_time = 0; 1867 priv->user_scan_cfg->chan_list[i].scan_time = 0;
1857 } 1868 }
1858 if (mwifiex_scan_networks(priv, priv->user_scan_cfg)) 1869
1859 return -EFAULT; 1870 ret = mwifiex_scan_networks(priv, priv->user_scan_cfg);
1871 if (ret) {
1872 dev_err(priv->adapter->dev, "scan failed: %d\n", ret);
1873 priv->scan_request = NULL;
1874 kfree(priv->user_scan_cfg);
1875 priv->user_scan_cfg = NULL;
1876 return ret;
1877 }
1860 1878
1861 if (request->ie && request->ie_len) { 1879 if (request->ie && request->ie_len) {
1862 for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) { 1880 for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) {
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 82e63cee1e97..7b0858af8f5d 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1180,16 +1180,18 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
1180 struct mwifiex_adapter *adapter = priv->adapter; 1180 struct mwifiex_adapter *adapter = priv->adapter;
1181 struct host_cmd_ds_802_11_ad_hoc_result *adhoc_result; 1181 struct host_cmd_ds_802_11_ad_hoc_result *adhoc_result;
1182 struct mwifiex_bssdescriptor *bss_desc; 1182 struct mwifiex_bssdescriptor *bss_desc;
1183 u16 reason_code;
1183 1184
1184 adhoc_result = &resp->params.adhoc_result; 1185 adhoc_result = &resp->params.adhoc_result;
1185 1186
1186 bss_desc = priv->attempted_bss_desc; 1187 bss_desc = priv->attempted_bss_desc;
1187 1188
1188 /* Join result code 0 --> SUCCESS */ 1189 /* Join result code 0 --> SUCCESS */
1189 if (le16_to_cpu(resp->result)) { 1190 reason_code = le16_to_cpu(resp->result);
1191 if (reason_code) {
1190 dev_err(priv->adapter->dev, "ADHOC_RESP: failed\n"); 1192 dev_err(priv->adapter->dev, "ADHOC_RESP: failed\n");
1191 if (priv->media_connected) 1193 if (priv->media_connected)
1192 mwifiex_reset_connect_state(priv); 1194 mwifiex_reset_connect_state(priv, reason_code);
1193 1195
1194 memset(&priv->curr_bss_params.bss_descriptor, 1196 memset(&priv->curr_bss_params.bss_descriptor,
1195 0x00, sizeof(struct mwifiex_bssdescriptor)); 1197 0x00, sizeof(struct mwifiex_bssdescriptor));
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index bfb3fa69805c..c2d0ab146af5 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -847,7 +847,7 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
847 struct mwifiex_bssdescriptor *bss_desc); 847 struct mwifiex_bssdescriptor *bss_desc);
848int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, 848int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
849 struct host_cmd_ds_command *resp); 849 struct host_cmd_ds_command *resp);
850void mwifiex_reset_connect_state(struct mwifiex_private *priv); 850void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason);
851u8 mwifiex_band_to_radio_type(u8 band); 851u8 mwifiex_band_to_radio_type(u8 band);
852int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac); 852int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac);
853int mwifiex_adhoc_start(struct mwifiex_private *priv, 853int mwifiex_adhoc_start(struct mwifiex_private *priv,
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index e36a75988f87..9171aaedbccd 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -1296,7 +1296,7 @@ mwifiex_radio_type_to_band(u8 radio_type)
1296int mwifiex_scan_networks(struct mwifiex_private *priv, 1296int mwifiex_scan_networks(struct mwifiex_private *priv,
1297 const struct mwifiex_user_scan_cfg *user_scan_in) 1297 const struct mwifiex_user_scan_cfg *user_scan_in)
1298{ 1298{
1299 int ret = 0; 1299 int ret;
1300 struct mwifiex_adapter *adapter = priv->adapter; 1300 struct mwifiex_adapter *adapter = priv->adapter;
1301 struct cmd_ctrl_node *cmd_node; 1301 struct cmd_ctrl_node *cmd_node;
1302 union mwifiex_scan_cmd_config_tlv *scan_cfg_out; 1302 union mwifiex_scan_cmd_config_tlv *scan_cfg_out;
@@ -1309,25 +1309,26 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
1309 unsigned long flags; 1309 unsigned long flags;
1310 1310
1311 if (adapter->scan_processing) { 1311 if (adapter->scan_processing) {
1312 dev_dbg(adapter->dev, "cmd: Scan already in process...\n"); 1312 dev_err(adapter->dev, "cmd: Scan already in process...\n");
1313 return ret; 1313 return -EBUSY;
1314 } 1314 }
1315 1315
1316 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
1317 adapter->scan_processing = true;
1318 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
1319
1320 if (priv->scan_block) { 1316 if (priv->scan_block) {
1321 dev_dbg(adapter->dev, 1317 dev_err(adapter->dev,
1322 "cmd: Scan is blocked during association...\n"); 1318 "cmd: Scan is blocked during association...\n");
1323 return ret; 1319 return -EBUSY;
1324 } 1320 }
1325 1321
1322 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
1323 adapter->scan_processing = true;
1324 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
1325
1326 scan_cfg_out = kzalloc(sizeof(union mwifiex_scan_cmd_config_tlv), 1326 scan_cfg_out = kzalloc(sizeof(union mwifiex_scan_cmd_config_tlv),
1327 GFP_KERNEL); 1327 GFP_KERNEL);
1328 if (!scan_cfg_out) { 1328 if (!scan_cfg_out) {
1329 dev_err(adapter->dev, "failed to alloc scan_cfg_out\n"); 1329 dev_err(adapter->dev, "failed to alloc scan_cfg_out\n");
1330 return -ENOMEM; 1330 ret = -ENOMEM;
1331 goto done;
1331 } 1332 }
1332 1333
1333 buf_size = sizeof(struct mwifiex_chan_scan_param_set) * 1334 buf_size = sizeof(struct mwifiex_chan_scan_param_set) *
@@ -1336,7 +1337,8 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
1336 if (!scan_chan_list) { 1337 if (!scan_chan_list) {
1337 dev_err(adapter->dev, "failed to alloc scan_chan_list\n"); 1338 dev_err(adapter->dev, "failed to alloc scan_chan_list\n");
1338 kfree(scan_cfg_out); 1339 kfree(scan_cfg_out);
1339 return -ENOMEM; 1340 ret = -ENOMEM;
1341 goto done;
1340 } 1342 }
1341 1343
1342 mwifiex_config_scan(priv, user_scan_in, &scan_cfg_out->config, 1344 mwifiex_config_scan(priv, user_scan_in, &scan_cfg_out->config,
@@ -1364,14 +1366,16 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
1364 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, 1366 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1365 flags); 1367 flags);
1366 } 1368 }
1367 } else {
1368 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
1369 adapter->scan_processing = true;
1370 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
1371 } 1369 }
1372 1370
1373 kfree(scan_cfg_out); 1371 kfree(scan_cfg_out);
1374 kfree(scan_chan_list); 1372 kfree(scan_chan_list);
1373done:
1374 if (ret) {
1375 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
1376 adapter->scan_processing = false;
1377 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
1378 }
1375 return ret; 1379 return ret;
1376} 1380}
1377 1381
@@ -1430,8 +1434,8 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
1430 ret = mwifiex_is_network_compatible(priv, bss_desc, 1434 ret = mwifiex_is_network_compatible(priv, bss_desc,
1431 priv->bss_mode); 1435 priv->bss_mode);
1432 if (ret) 1436 if (ret)
1433 dev_err(priv->adapter->dev, "cannot find ssid " 1437 dev_err(priv->adapter->dev,
1434 "%s\n", bss_desc->ssid.ssid); 1438 "Incompatible network settings\n");
1435 break; 1439 break;
1436 default: 1440 default:
1437 ret = 0; 1441 ret = 0;
@@ -1839,21 +1843,18 @@ static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv,
1839 struct cfg80211_ssid *req_ssid) 1843 struct cfg80211_ssid *req_ssid)
1840{ 1844{
1841 struct mwifiex_adapter *adapter = priv->adapter; 1845 struct mwifiex_adapter *adapter = priv->adapter;
1842 int ret = 0; 1846 int ret;
1843 struct mwifiex_user_scan_cfg *scan_cfg; 1847 struct mwifiex_user_scan_cfg *scan_cfg;
1844 1848
1845 if (!req_ssid)
1846 return -1;
1847
1848 if (adapter->scan_processing) { 1849 if (adapter->scan_processing) {
1849 dev_dbg(adapter->dev, "cmd: Scan already in process...\n"); 1850 dev_err(adapter->dev, "cmd: Scan already in process...\n");
1850 return ret; 1851 return -EBUSY;
1851 } 1852 }
1852 1853
1853 if (priv->scan_block) { 1854 if (priv->scan_block) {
1854 dev_dbg(adapter->dev, 1855 dev_err(adapter->dev,
1855 "cmd: Scan is blocked during association...\n"); 1856 "cmd: Scan is blocked during association...\n");
1856 return ret; 1857 return -EBUSY;
1857 } 1858 }
1858 1859
1859 scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), GFP_KERNEL); 1860 scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), GFP_KERNEL);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index e380171c4c5d..09e6a267f566 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -545,7 +545,7 @@ static int mwifiex_ret_802_11_deauthenticate(struct mwifiex_private *priv,
545 if (!memcmp(resp->params.deauth.mac_addr, 545 if (!memcmp(resp->params.deauth.mac_addr,
546 &priv->curr_bss_params.bss_descriptor.mac_address, 546 &priv->curr_bss_params.bss_descriptor.mac_address,
547 sizeof(resp->params.deauth.mac_addr))) 547 sizeof(resp->params.deauth.mac_addr)))
548 mwifiex_reset_connect_state(priv); 548 mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING);
549 549
550 return 0; 550 return 0;
551} 551}
@@ -558,7 +558,7 @@ static int mwifiex_ret_802_11_deauthenticate(struct mwifiex_private *priv,
558static int mwifiex_ret_802_11_ad_hoc_stop(struct mwifiex_private *priv, 558static int mwifiex_ret_802_11_ad_hoc_stop(struct mwifiex_private *priv,
559 struct host_cmd_ds_command *resp) 559 struct host_cmd_ds_command *resp)
560{ 560{
561 mwifiex_reset_connect_state(priv); 561 mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING);
562 return 0; 562 return 0;
563} 563}
564 564
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index aafde30e714a..8132119e1a21 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -41,7 +41,7 @@
41 * - Sends a disconnect event to upper layers/applications. 41 * - Sends a disconnect event to upper layers/applications.
42 */ 42 */
43void 43void
44mwifiex_reset_connect_state(struct mwifiex_private *priv) 44mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
45{ 45{
46 struct mwifiex_adapter *adapter = priv->adapter; 46 struct mwifiex_adapter *adapter = priv->adapter;
47 47
@@ -117,10 +117,10 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv)
117 priv->media_connected = false; 117 priv->media_connected = false;
118 dev_dbg(adapter->dev, 118 dev_dbg(adapter->dev,
119 "info: successfully disconnected from %pM: reason code %d\n", 119 "info: successfully disconnected from %pM: reason code %d\n",
120 priv->cfg_bssid, WLAN_REASON_DEAUTH_LEAVING); 120 priv->cfg_bssid, reason_code);
121 if (priv->bss_mode == NL80211_IFTYPE_STATION) { 121 if (priv->bss_mode == NL80211_IFTYPE_STATION) {
122 cfg80211_disconnected(priv->netdev, WLAN_REASON_DEAUTH_LEAVING, 122 cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
123 NULL, 0, GFP_KERNEL); 123 GFP_KERNEL);
124 } 124 }
125 memset(priv->cfg_bssid, 0, ETH_ALEN); 125 memset(priv->cfg_bssid, 0, ETH_ALEN);
126 126
@@ -186,7 +186,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
186 struct mwifiex_adapter *adapter = priv->adapter; 186 struct mwifiex_adapter *adapter = priv->adapter;
187 int ret = 0; 187 int ret = 0;
188 u32 eventcause = adapter->event_cause; 188 u32 eventcause = adapter->event_cause;
189 u16 ctrl; 189 u16 ctrl, reason_code;
190 190
191 switch (eventcause) { 191 switch (eventcause) {
192 case EVENT_DUMMY_HOST_WAKEUP_SIGNAL: 192 case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
@@ -204,22 +204,31 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
204 case EVENT_DEAUTHENTICATED: 204 case EVENT_DEAUTHENTICATED:
205 dev_dbg(adapter->dev, "event: Deauthenticated\n"); 205 dev_dbg(adapter->dev, "event: Deauthenticated\n");
206 adapter->dbg.num_event_deauth++; 206 adapter->dbg.num_event_deauth++;
207 if (priv->media_connected) 207 if (priv->media_connected) {
208 mwifiex_reset_connect_state(priv); 208 reason_code =
209 le16_to_cpu(*(__le16 *)adapter->event_body);
210 mwifiex_reset_connect_state(priv, reason_code);
211 }
209 break; 212 break;
210 213
211 case EVENT_DISASSOCIATED: 214 case EVENT_DISASSOCIATED:
212 dev_dbg(adapter->dev, "event: Disassociated\n"); 215 dev_dbg(adapter->dev, "event: Disassociated\n");
213 adapter->dbg.num_event_disassoc++; 216 adapter->dbg.num_event_disassoc++;
214 if (priv->media_connected) 217 if (priv->media_connected) {
215 mwifiex_reset_connect_state(priv); 218 reason_code =
219 le16_to_cpu(*(__le16 *)adapter->event_body);
220 mwifiex_reset_connect_state(priv, reason_code);
221 }
216 break; 222 break;
217 223
218 case EVENT_LINK_LOST: 224 case EVENT_LINK_LOST:
219 dev_dbg(adapter->dev, "event: Link lost\n"); 225 dev_dbg(adapter->dev, "event: Link lost\n");
220 adapter->dbg.num_event_link_lost++; 226 adapter->dbg.num_event_link_lost++;
221 if (priv->media_connected) 227 if (priv->media_connected) {
222 mwifiex_reset_connect_state(priv); 228 reason_code =
229 le16_to_cpu(*(__le16 *)adapter->event_body);
230 mwifiex_reset_connect_state(priv, reason_code);
231 }
223 break; 232 break;
224 233
225 case EVENT_PS_SLEEP: 234 case EVENT_PS_SLEEP:
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index a12e84f892be..6b2e1e431dd2 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1988,6 +1988,7 @@ static struct usb_driver rt2500usb_driver = {
1988 .disconnect = rt2x00usb_disconnect, 1988 .disconnect = rt2x00usb_disconnect,
1989 .suspend = rt2x00usb_suspend, 1989 .suspend = rt2x00usb_suspend,
1990 .resume = rt2x00usb_resume, 1990 .resume = rt2x00usb_resume,
1991 .reset_resume = rt2x00usb_resume,
1991 .disable_hub_initiated_lpm = 1, 1992 .disable_hub_initiated_lpm = 1,
1992}; 1993};
1993 1994
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 540c94f8505a..59474ae0aec0 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -2252,9 +2252,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2252 */ 2252 */
2253 if (rt2x00_rt(rt2x00dev, RT3352)) { 2253 if (rt2x00_rt(rt2x00dev, RT3352)) {
2254 rt2800_bbp_write(rt2x00dev, 27, 0x0); 2254 rt2800_bbp_write(rt2x00dev, 27, 0x0);
2255 rt2800_bbp_write(rt2x00dev, 62, 0x26 + rt2x00dev->lna_gain); 2255 rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain);
2256 rt2800_bbp_write(rt2x00dev, 27, 0x20); 2256 rt2800_bbp_write(rt2x00dev, 27, 0x20);
2257 rt2800_bbp_write(rt2x00dev, 62, 0x26 + rt2x00dev->lna_gain); 2257 rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain);
2258 } else { 2258 } else {
2259 rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain); 2259 rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
2260 rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain); 2260 rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
@@ -2449,7 +2449,7 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
2449 /* 2449 /*
2450 * Check if temperature compensation is supported. 2450 * Check if temperature compensation is supported.
2451 */ 2451 */
2452 if (tssi_bounds[4] == 0xff) 2452 if (tssi_bounds[4] == 0xff || step == 0xff)
2453 return 0; 2453 return 0;
2454 2454
2455 /* 2455 /*
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index c9e9370eb789..3b8fb5a603f2 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -1282,6 +1282,7 @@ static struct usb_driver rt2800usb_driver = {
1282 .disconnect = rt2x00usb_disconnect, 1282 .disconnect = rt2x00usb_disconnect,
1283 .suspend = rt2x00usb_suspend, 1283 .suspend = rt2x00usb_suspend,
1284 .resume = rt2x00usb_resume, 1284 .resume = rt2x00usb_resume,
1285 .reset_resume = rt2x00usb_resume,
1285 .disable_hub_initiated_lpm = 1, 1286 .disable_hub_initiated_lpm = 1,
1286}; 1287};
1287 1288
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index e5eb43b3eee7..24eec66e9fd2 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2535,6 +2535,7 @@ static struct usb_driver rt73usb_driver = {
2535 .disconnect = rt2x00usb_disconnect, 2535 .disconnect = rt2x00usb_disconnect,
2536 .suspend = rt2x00usb_suspend, 2536 .suspend = rt2x00usb_suspend,
2537 .resume = rt2x00usb_resume, 2537 .resume = rt2x00usb_resume,
2538 .reset_resume = rt2x00usb_resume,
2538 .disable_hub_initiated_lpm = 1, 2539 .disable_hub_initiated_lpm = 1,
2539}; 2540};
2540 2541
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 030beb45d8b0..e3ea4b346889 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -673,7 +673,7 @@ static int rtl_usb_start(struct ieee80211_hw *hw)
673 set_hal_start(rtlhal); 673 set_hal_start(rtlhal);
674 674
675 /* Start bulk IN */ 675 /* Start bulk IN */
676 _rtl_usb_receive(hw); 676 err = _rtl_usb_receive(hw);
677 } 677 }
678 678
679 return err; 679 return err;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 4ebfcf3d8a3b..f2d6b78d901d 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -335,21 +335,35 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
335 335
336 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 336 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
337 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 337 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
338 unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
338 unsigned long bytes; 339 unsigned long bytes;
340
341 offset &= ~PAGE_MASK;
342
339 while (size > 0) { 343 while (size > 0) {
344 BUG_ON(offset >= PAGE_SIZE);
340 BUG_ON(copy_off > MAX_BUFFER_OFFSET); 345 BUG_ON(copy_off > MAX_BUFFER_OFFSET);
341 346
342 if (start_new_rx_buffer(copy_off, size, 0)) { 347 bytes = PAGE_SIZE - offset;
348
349 if (bytes > size)
350 bytes = size;
351
352 if (start_new_rx_buffer(copy_off, bytes, 0)) {
343 count++; 353 count++;
344 copy_off = 0; 354 copy_off = 0;
345 } 355 }
346 356
347 bytes = size;
348 if (copy_off + bytes > MAX_BUFFER_OFFSET) 357 if (copy_off + bytes > MAX_BUFFER_OFFSET)
349 bytes = MAX_BUFFER_OFFSET - copy_off; 358 bytes = MAX_BUFFER_OFFSET - copy_off;
350 359
351 copy_off += bytes; 360 copy_off += bytes;
361
362 offset += bytes;
352 size -= bytes; 363 size -= bytes;
364
365 if (offset == PAGE_SIZE)
366 offset = 0;
353 } 367 }
354 } 368 }
355 return count; 369 return count;
@@ -403,14 +417,24 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
403 unsigned long bytes; 417 unsigned long bytes;
404 418
405 /* Data must not cross a page boundary. */ 419 /* Data must not cross a page boundary. */
406 BUG_ON(size + offset > PAGE_SIZE); 420 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
407 421
408 meta = npo->meta + npo->meta_prod - 1; 422 meta = npo->meta + npo->meta_prod - 1;
409 423
424 /* Skip unused frames from start of page */
425 page += offset >> PAGE_SHIFT;
426 offset &= ~PAGE_MASK;
427
410 while (size > 0) { 428 while (size > 0) {
429 BUG_ON(offset >= PAGE_SIZE);
411 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); 430 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
412 431
413 if (start_new_rx_buffer(npo->copy_off, size, *head)) { 432 bytes = PAGE_SIZE - offset;
433
434 if (bytes > size)
435 bytes = size;
436
437 if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
414 /* 438 /*
415 * Netfront requires there to be some data in the head 439 * Netfront requires there to be some data in the head
416 * buffer. 440 * buffer.
@@ -420,7 +444,6 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
420 meta = get_next_rx_buffer(vif, npo); 444 meta = get_next_rx_buffer(vif, npo);
421 } 445 }
422 446
423 bytes = size;
424 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) 447 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
425 bytes = MAX_BUFFER_OFFSET - npo->copy_off; 448 bytes = MAX_BUFFER_OFFSET - npo->copy_off;
426 449
@@ -453,6 +476,13 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
453 offset += bytes; 476 offset += bytes;
454 size -= bytes; 477 size -= bytes;
455 478
479 /* Next frame */
480 if (offset == PAGE_SIZE && size) {
481 BUG_ON(!PageCompound(page));
482 page++;
483 offset = 0;
484 }
485
456 /* Leave a gap for the GSO descriptor. */ 486 /* Leave a gap for the GSO descriptor. */
457 if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix) 487 if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
458 vif->rx.req_cons++; 488 vif->rx.req_cons++;
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 72e496f1e9b0..0125524c08c4 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -37,9 +37,9 @@ struct of_bus {
37 int (*match)(struct device_node *parent); 37 int (*match)(struct device_node *parent);
38 void (*count_cells)(struct device_node *child, 38 void (*count_cells)(struct device_node *child,
39 int *addrc, int *sizec); 39 int *addrc, int *sizec);
40 u64 (*map)(u32 *addr, const __be32 *range, 40 u64 (*map)(__be32 *addr, const __be32 *range,
41 int na, int ns, int pna); 41 int na, int ns, int pna);
42 int (*translate)(u32 *addr, u64 offset, int na); 42 int (*translate)(__be32 *addr, u64 offset, int na);
43 unsigned int (*get_flags)(const __be32 *addr); 43 unsigned int (*get_flags)(const __be32 *addr);
44}; 44};
45 45
@@ -56,7 +56,7 @@ static void of_bus_default_count_cells(struct device_node *dev,
56 *sizec = of_n_size_cells(dev); 56 *sizec = of_n_size_cells(dev);
57} 57}
58 58
59static u64 of_bus_default_map(u32 *addr, const __be32 *range, 59static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
60 int na, int ns, int pna) 60 int na, int ns, int pna)
61{ 61{
62 u64 cp, s, da; 62 u64 cp, s, da;
@@ -82,7 +82,7 @@ static u64 of_bus_default_map(u32 *addr, const __be32 *range,
82 return da - cp; 82 return da - cp;
83} 83}
84 84
85static int of_bus_default_translate(u32 *addr, u64 offset, int na) 85static int of_bus_default_translate(__be32 *addr, u64 offset, int na)
86{ 86{
87 u64 a = of_read_number(addr, na); 87 u64 a = of_read_number(addr, na);
88 memset(addr, 0, na * 4); 88 memset(addr, 0, na * 4);
@@ -138,7 +138,7 @@ static unsigned int of_bus_pci_get_flags(const __be32 *addr)
138 return flags; 138 return flags;
139} 139}
140 140
141static u64 of_bus_pci_map(u32 *addr, const __be32 *range, int na, int ns, 141static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
142 int pna) 142 int pna)
143{ 143{
144 u64 cp, s, da; 144 u64 cp, s, da;
@@ -165,7 +165,7 @@ static u64 of_bus_pci_map(u32 *addr, const __be32 *range, int na, int ns,
165 return da - cp; 165 return da - cp;
166} 166}
167 167
168static int of_bus_pci_translate(u32 *addr, u64 offset, int na) 168static int of_bus_pci_translate(__be32 *addr, u64 offset, int na)
169{ 169{
170 return of_bus_default_translate(addr + 1, offset, na - 1); 170 return of_bus_default_translate(addr + 1, offset, na - 1);
171} 171}
@@ -247,7 +247,7 @@ static void of_bus_isa_count_cells(struct device_node *child,
247 *sizec = 1; 247 *sizec = 1;
248} 248}
249 249
250static u64 of_bus_isa_map(u32 *addr, const __be32 *range, int na, int ns, 250static u64 of_bus_isa_map(__be32 *addr, const __be32 *range, int na, int ns,
251 int pna) 251 int pna)
252{ 252{
253 u64 cp, s, da; 253 u64 cp, s, da;
@@ -270,7 +270,7 @@ static u64 of_bus_isa_map(u32 *addr, const __be32 *range, int na, int ns,
270 return da - cp; 270 return da - cp;
271} 271}
272 272
273static int of_bus_isa_translate(u32 *addr, u64 offset, int na) 273static int of_bus_isa_translate(__be32 *addr, u64 offset, int na)
274{ 274{
275 return of_bus_default_translate(addr + 1, offset, na - 1); 275 return of_bus_default_translate(addr + 1, offset, na - 1);
276} 276}
@@ -338,7 +338,7 @@ static struct of_bus *of_match_bus(struct device_node *np)
338} 338}
339 339
340static int of_translate_one(struct device_node *parent, struct of_bus *bus, 340static int of_translate_one(struct device_node *parent, struct of_bus *bus,
341 struct of_bus *pbus, u32 *addr, 341 struct of_bus *pbus, __be32 *addr,
342 int na, int ns, int pna, const char *rprop) 342 int na, int ns, int pna, const char *rprop)
343{ 343{
344 const __be32 *ranges; 344 const __be32 *ranges;
@@ -409,12 +409,12 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
409 * that can be mapped to a cpu physical address). This is not really specified 409 * that can be mapped to a cpu physical address). This is not really specified
410 * that way, but this is traditionally the way IBM at least do things 410 * that way, but this is traditionally the way IBM at least do things
411 */ 411 */
412u64 __of_translate_address(struct device_node *dev, const __be32 *in_addr, 412static u64 __of_translate_address(struct device_node *dev,
413 const char *rprop) 413 const __be32 *in_addr, const char *rprop)
414{ 414{
415 struct device_node *parent = NULL; 415 struct device_node *parent = NULL;
416 struct of_bus *bus, *pbus; 416 struct of_bus *bus, *pbus;
417 u32 addr[OF_MAX_ADDR_CELLS]; 417 __be32 addr[OF_MAX_ADDR_CELLS];
418 int na, ns, pna, pns; 418 int na, ns, pna, pns;
419 u64 result = OF_BAD_ADDR; 419 u64 result = OF_BAD_ADDR;
420 420
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index a23ec7779997..a3c1c5aae6a9 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -192,11 +192,13 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
192 /* Compare specifiers */ 192 /* Compare specifiers */
193 match = 1; 193 match = 1;
194 for (i = 0; i < addrsize && match; ++i) { 194 for (i = 0; i < addrsize && match; ++i) {
195 u32 mask = imask ? imask[i] : 0xffffffffu; 195 __be32 mask = imask ? imask[i]
196 : cpu_to_be32(0xffffffffu);
196 match = ((addr[i] ^ imap[i]) & mask) == 0; 197 match = ((addr[i] ^ imap[i]) & mask) == 0;
197 } 198 }
198 for (; i < (addrsize + intsize) && match; ++i) { 199 for (; i < (addrsize + intsize) && match; ++i) {
199 u32 mask = imask ? imask[i] : 0xffffffffu; 200 __be32 mask = imask ? imask[i]
201 : cpu_to_be32(0xffffffffu);
200 match = 202 match =
201 ((intspec[i-addrsize] ^ imap[i]) & mask) == 0; 203 ((intspec[i-addrsize] ^ imap[i]) & mask) == 0;
202 } 204 }
@@ -465,7 +467,7 @@ void __init of_irq_init(const struct of_device_id *matches)
465 pr_debug("of_irq_init: init %s @ %p, parent %p\n", 467 pr_debug("of_irq_init: init %s @ %p, parent %p\n",
466 match->compatible, 468 match->compatible,
467 desc->dev, desc->interrupt_parent); 469 desc->dev, desc->interrupt_parent);
468 irq_init_cb = match->data; 470 irq_init_cb = (of_irq_init_cb_t)match->data;
469 ret = irq_init_cb(desc->dev, desc->interrupt_parent); 471 ret = irq_init_cb(desc->dev, desc->interrupt_parent);
470 if (ret) { 472 if (ret) {
471 kfree(desc); 473 kfree(desc);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 9bdeaf30b17d..b80891b43816 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -76,7 +76,7 @@ void of_device_make_bus_id(struct device *dev)
76{ 76{
77 static atomic_t bus_no_reg_magic; 77 static atomic_t bus_no_reg_magic;
78 struct device_node *node = dev->of_node; 78 struct device_node *node = dev->of_node;
79 const u32 *reg; 79 const __be32 *reg;
80 u64 addr; 80 u64 addr;
81 const __be32 *addrp; 81 const __be32 *addrp;
82 int magic; 82 int magic;
diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c
index b066273b6b4f..7dd879ce514d 100644
--- a/drivers/pcmcia/pxa2xx_sharpsl.c
+++ b/drivers/pcmcia/pxa2xx_sharpsl.c
@@ -194,7 +194,7 @@ static void sharpsl_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
194 sharpsl_pcmcia_init_reset(skt); 194 sharpsl_pcmcia_init_reset(skt);
195} 195}
196 196
197static struct pcmcia_low_level sharpsl_pcmcia_ops __initdata = { 197static struct pcmcia_low_level sharpsl_pcmcia_ops = {
198 .owner = THIS_MODULE, 198 .owner = THIS_MODULE,
199 .hw_init = sharpsl_pcmcia_hw_init, 199 .hw_init = sharpsl_pcmcia_hw_init,
200 .socket_state = sharpsl_pcmcia_socket_state, 200 .socket_state = sharpsl_pcmcia_socket_state,
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 0f1ec9e8ff14..2e39c04fc16b 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1061,8 +1061,10 @@ static int pinctrl_groups_show(struct seq_file *s, void *what)
1061 seq_printf(s, "group: %s\n", gname); 1061 seq_printf(s, "group: %s\n", gname);
1062 for (i = 0; i < num_pins; i++) { 1062 for (i = 0; i < num_pins; i++) {
1063 pname = pin_get_name(pctldev, pins[i]); 1063 pname = pin_get_name(pctldev, pins[i]);
1064 if (WARN_ON(!pname)) 1064 if (WARN_ON(!pname)) {
1065 mutex_unlock(&pinctrl_mutex);
1065 return -EINVAL; 1066 return -EINVAL;
1067 }
1066 seq_printf(s, "pin %d (%s)\n", pins[i], pname); 1068 seq_printf(s, "pin %d (%s)\n", pins[i], pname);
1067 } 1069 }
1068 seq_puts(s, "\n"); 1070 seq_puts(s, "\n");
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index 43f474cdc110..baee2cc46a17 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -537,8 +537,6 @@ static int pinconf_groups_show(struct seq_file *s, void *what)
537 seq_puts(s, "Pin config settings per pin group\n"); 537 seq_puts(s, "Pin config settings per pin group\n");
538 seq_puts(s, "Format: group (name): configs\n"); 538 seq_puts(s, "Format: group (name): configs\n");
539 539
540 mutex_lock(&pinctrl_mutex);
541
542 while (selector < ngroups) { 540 while (selector < ngroups) {
543 const char *gname = pctlops->get_group_name(pctldev, selector); 541 const char *gname = pctlops->get_group_name(pctldev, selector);
544 542
@@ -549,8 +547,6 @@ static int pinconf_groups_show(struct seq_file *s, void *what)
549 selector++; 547 selector++;
550 } 548 }
551 549
552 mutex_unlock(&pinctrl_mutex);
553
554 return 0; 550 return 0;
555} 551}
556 552
diff --git a/drivers/pinctrl/pinctrl-bcm2835.c b/drivers/pinctrl/pinctrl-bcm2835.c
index a4adee633fa9..7e9be18ec2d2 100644
--- a/drivers/pinctrl/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/pinctrl-bcm2835.c
@@ -29,7 +29,6 @@
29#include <linux/irq.h> 29#include <linux/irq.h>
30#include <linux/irqdesc.h> 30#include <linux/irqdesc.h>
31#include <linux/irqdomain.h> 31#include <linux/irqdomain.h>
32#include <linux/irq.h>
33#include <linux/module.h> 32#include <linux/module.h>
34#include <linux/of_address.h> 33#include <linux/of_address.h>
35#include <linux/of.h> 34#include <linux/of.h>
@@ -960,7 +959,7 @@ static int __devinit bcm2835_pinctrl_probe(struct platform_device *pdev)
960 return err; 959 return err;
961 } 960 }
962 961
963 pc->base = devm_request_and_ioremap(&pdev->dev, &iomem); 962 pc->base = devm_request_and_ioremap(dev, &iomem);
964 if (!pc->base) 963 if (!pc->base)
965 return -EADDRNOTAVAIL; 964 return -EADDRNOTAVAIL;
966 965
@@ -1032,7 +1031,7 @@ static int __devinit bcm2835_pinctrl_probe(struct platform_device *pdev)
1032 pc->pctl_dev = pinctrl_register(&bcm2835_pinctrl_desc, dev, pc); 1031 pc->pctl_dev = pinctrl_register(&bcm2835_pinctrl_desc, dev, pc);
1033 if (!pc->pctl_dev) { 1032 if (!pc->pctl_dev) {
1034 gpiochip_remove(&pc->gpio_chip); 1033 gpiochip_remove(&pc->gpio_chip);
1035 return PTR_ERR(pc->pctl_dev); 1034 return -EINVAL;
1036 } 1035 }
1037 1036
1038 pc->gpio_range = bcm2835_pinctrl_gpio_range; 1037 pc->gpio_range = bcm2835_pinctrl_gpio_range;
diff --git a/drivers/pinctrl/pinctrl-nomadik-db8500.c b/drivers/pinctrl/pinctrl-nomadik-db8500.c
index ec6209dd7c39..debaa75b0552 100644
--- a/drivers/pinctrl/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/pinctrl-nomadik-db8500.c
@@ -725,10 +725,10 @@ static const struct nmk_pingroup nmk_db8500_groups[] = {
725 DB8500_PIN_GROUP(spi0_c_1, NMK_GPIO_ALT_C), 725 DB8500_PIN_GROUP(spi0_c_1, NMK_GPIO_ALT_C),
726 DB8500_PIN_GROUP(usbsim_c_2, NMK_GPIO_ALT_C), 726 DB8500_PIN_GROUP(usbsim_c_2, NMK_GPIO_ALT_C),
727 DB8500_PIN_GROUP(i2c3_c_2, NMK_GPIO_ALT_C), 727 DB8500_PIN_GROUP(i2c3_c_2, NMK_GPIO_ALT_C),
728 /* Other alt C1 column, these are still configured as alt C */ 728 /* Other alt C1 column */
729 DB8500_PIN_GROUP(kp_oc1_1, NMK_GPIO_ALT_C), 729 DB8500_PIN_GROUP(kp_oc1_1, NMK_GPIO_ALT_C1),
730 DB8500_PIN_GROUP(spi2_oc1_1, NMK_GPIO_ALT_C), 730 DB8500_PIN_GROUP(spi2_oc1_1, NMK_GPIO_ALT_C1),
731 DB8500_PIN_GROUP(spi2_oc1_2, NMK_GPIO_ALT_C), 731 DB8500_PIN_GROUP(spi2_oc1_2, NMK_GPIO_ALT_C1),
732}; 732};
733 733
734/* We use this macro to define the groups applicable to a function */ 734/* We use this macro to define the groups applicable to a function */
@@ -860,6 +860,284 @@ static const struct nmk_function nmk_db8500_functions[] = {
860 FUNCTION(spi2), 860 FUNCTION(spi2),
861}; 861};
862 862
863static const struct prcm_gpiocr_altcx_pin_desc db8500_altcx_pins[] = {
864 PRCM_GPIOCR_ALTCX(23, true, PRCM_IDX_GPIOCR1, 9, /* STMAPE_CLK_a */
865 true, PRCM_IDX_GPIOCR1, 7, /* SBAG_CLK_a */
866 false, 0, 0,
867 false, 0, 0
868 ),
869 PRCM_GPIOCR_ALTCX(24, true, PRCM_IDX_GPIOCR1, 9, /* STMAPE or U2_RXD ??? */
870 true, PRCM_IDX_GPIOCR1, 7, /* SBAG_VAL_a */
871 true, PRCM_IDX_GPIOCR1, 10, /* STM_MOD_CMD0 */
872 false, 0, 0
873 ),
874 PRCM_GPIOCR_ALTCX(25, true, PRCM_IDX_GPIOCR1, 9, /* STMAPE_DAT_a[0] */
875 true, PRCM_IDX_GPIOCR1, 7, /* SBAG_D_a[0] */
876 false, 0, 0,
877 false, 0, 0
878 ),
879 PRCM_GPIOCR_ALTCX(26, true, PRCM_IDX_GPIOCR1, 9, /* STMAPE_DAT_a[1] */
880 true, PRCM_IDX_GPIOCR1, 7, /* SBAG_D_a[1] */
881 false, 0, 0,
882 false, 0, 0
883 ),
884 PRCM_GPIOCR_ALTCX(27, true, PRCM_IDX_GPIOCR1, 9, /* STMAPE_DAT_a[2] */
885 true, PRCM_IDX_GPIOCR1, 7, /* SBAG_D_a[2] */
886 false, 0, 0,
887 false, 0, 0
888 ),
889 PRCM_GPIOCR_ALTCX(28, true, PRCM_IDX_GPIOCR1, 9, /* STMAPE_DAT_a[3] */
890 true, PRCM_IDX_GPIOCR1, 7, /* SBAG_D_a[3] */
891 false, 0, 0,
892 false, 0, 0
893 ),
894 PRCM_GPIOCR_ALTCX(29, false, 0, 0,
895 false, 0, 0,
896 true, PRCM_IDX_GPIOCR1, 10, /* STM_MOD_CMD0 */
897 false, 0, 0
898 ),
899 PRCM_GPIOCR_ALTCX(30, false, 0, 0,
900 false, 0, 0,
901 true, PRCM_IDX_GPIOCR1, 10, /* STM_MOD_CMD0 */
902 false, 0, 0
903 ),
904 PRCM_GPIOCR_ALTCX(31, false, 0, 0,
905 false, 0, 0,
906 true, PRCM_IDX_GPIOCR1, 10, /* STM_MOD_CMD0 */
907 false, 0, 0
908 ),
909 PRCM_GPIOCR_ALTCX(32, false, 0, 0,
910 false, 0, 0,
911 true, PRCM_IDX_GPIOCR1, 10, /* STM_MOD_CMD0 */
912 false, 0, 0
913 ),
914 PRCM_GPIOCR_ALTCX(68, true, PRCM_IDX_GPIOCR1, 18, /* REMAP_SELECT_ON */
915 false, 0, 0,
916 false, 0, 0,
917 false, 0, 0
918 ),
919 PRCM_GPIOCR_ALTCX(69, true, PRCM_IDX_GPIOCR1, 18, /* REMAP_SELECT_ON */
920 false, 0, 0,
921 false, 0, 0,
922 false, 0, 0
923 ),
924 PRCM_GPIOCR_ALTCX(70, true, PRCM_IDX_GPIOCR1, 5, /* PTM_A9_D23 */
925 true, PRCM_IDX_GPIOCR2, 2, /* DBG_ETM_R4_CMD0 */
926 true, PRCM_IDX_GPIOCR1, 11, /* STM_MOD_CMD1 */
927 true, PRCM_IDX_GPIOCR1, 8 /* SBAG_CLK */
928 ),
929 PRCM_GPIOCR_ALTCX(71, true, PRCM_IDX_GPIOCR1, 5, /* PTM_A9_D22 */
930 true, PRCM_IDX_GPIOCR2, 2, /* DBG_ETM_R4_CMD0 */
931 true, PRCM_IDX_GPIOCR1, 11, /* STM_MOD_CMD1 */
932 true, PRCM_IDX_GPIOCR1, 8 /* SBAG_D3 */
933 ),
934 PRCM_GPIOCR_ALTCX(72, true, PRCM_IDX_GPIOCR1, 5, /* PTM_A9_D21 */
935 true, PRCM_IDX_GPIOCR2, 2, /* DBG_ETM_R4_CMD0 */
936 true, PRCM_IDX_GPIOCR1, 11, /* STM_MOD_CMD1 */
937 true, PRCM_IDX_GPIOCR1, 8 /* SBAG_D2 */
938 ),
939 PRCM_GPIOCR_ALTCX(73, true, PRCM_IDX_GPIOCR1, 5, /* PTM_A9_D20 */
940 true, PRCM_IDX_GPIOCR2, 2, /* DBG_ETM_R4_CMD0 */
941 true, PRCM_IDX_GPIOCR1, 11, /* STM_MOD_CMD1 */
942 true, PRCM_IDX_GPIOCR1, 8 /* SBAG_D1 */
943 ),
944 PRCM_GPIOCR_ALTCX(74, true, PRCM_IDX_GPIOCR1, 5, /* PTM_A9_D19 */
945 true, PRCM_IDX_GPIOCR2, 2, /* DBG_ETM_R4_CMD0 */
946 true, PRCM_IDX_GPIOCR1, 11, /* STM_MOD_CMD1 */
947 true, PRCM_IDX_GPIOCR1, 8 /* SBAG_D0 */
948 ),
949 PRCM_GPIOCR_ALTCX(75, true, PRCM_IDX_GPIOCR1, 5, /* PTM_A9_D18 */
950 true, PRCM_IDX_GPIOCR2, 2, /* DBG_ETM_R4_CMD0 */
951 true, PRCM_IDX_GPIOCR1, 0, /* DBG_UARTMOD_CMD0 */
952 false, 0, 0
953 ),
954 PRCM_GPIOCR_ALTCX(76, true, PRCM_IDX_GPIOCR1, 5, /* PTM_A9_D17 */
955 true, PRCM_IDX_GPIOCR2, 2, /* DBG_ETM_R4_CMD0 */
956 true, PRCM_IDX_GPIOCR1, 0, /* DBG_UARTMOD_CMD0 */
957 false, 0, 0
958 ),
959 PRCM_GPIOCR_ALTCX(77, true, PRCM_IDX_GPIOCR1, 5, /* PTM_A9_D16 */
960 true, PRCM_IDX_GPIOCR2, 2, /* DBG_ETM_R4_CMD0 */
961 false, 0, 0,
962 true, PRCM_IDX_GPIOCR1, 8 /* SBAG_VAL */
963 ),
964 PRCM_GPIOCR_ALTCX(86, true, PRCM_IDX_GPIOCR1, 12, /* KP_O3 */
965 false, 0, 0,
966 false, 0, 0,
967 false, 0, 0
968 ),
969 PRCM_GPIOCR_ALTCX(87, true, PRCM_IDX_GPIOCR1, 12, /* KP_O2 */
970 false, 0, 0,
971 false, 0, 0,
972 false, 0, 0
973 ),
974 PRCM_GPIOCR_ALTCX(88, true, PRCM_IDX_GPIOCR1, 12, /* KP_I3 */
975 false, 0, 0,
976 false, 0, 0,
977 false, 0, 0
978 ),
979 PRCM_GPIOCR_ALTCX(89, true, PRCM_IDX_GPIOCR1, 12, /* KP_I2 */
980 false, 0, 0,
981 false, 0, 0,
982 false, 0, 0
983 ),
984 PRCM_GPIOCR_ALTCX(90, true, PRCM_IDX_GPIOCR1, 12, /* KP_O1 */
985 false, 0, 0,
986 false, 0, 0,
987 false, 0, 0
988 ),
989 PRCM_GPIOCR_ALTCX(91, true, PRCM_IDX_GPIOCR1, 12, /* KP_O0 */
990 false, 0, 0,
991 false, 0, 0,
992 false, 0, 0
993 ),
994 PRCM_GPIOCR_ALTCX(92, true, PRCM_IDX_GPIOCR1, 12, /* KP_I1 */
995 false, 0, 0,
996 false, 0, 0,
997 false, 0, 0
998 ),
999 PRCM_GPIOCR_ALTCX(93, true, PRCM_IDX_GPIOCR1, 12, /* KP_I0 */
1000 false, 0, 0,
1001 false, 0, 0,
1002 false, 0, 0
1003 ),
1004 PRCM_GPIOCR_ALTCX(96, true, PRCM_IDX_GPIOCR2, 3, /* RF_INT */
1005 false, 0, 0,
1006 false, 0, 0,
1007 false, 0, 0
1008 ),
1009 PRCM_GPIOCR_ALTCX(97, true, PRCM_IDX_GPIOCR2, 1, /* RF_CTRL */
1010 false, 0, 0,
1011 false, 0, 0,
1012 false, 0, 0
1013 ),
1014 PRCM_GPIOCR_ALTCX(151, false, 0, 0,
1015 true, PRCM_IDX_GPIOCR1, 6, /* PTM_A9_CTL */
1016 true, PRCM_IDX_GPIOCR1, 15, /* DBG_ETM_R4_CMD1*/
1017 true, PRCM_IDX_GPIOCR1, 25 /* HW_OBS17 */
1018 ),
1019 PRCM_GPIOCR_ALTCX(152, true, PRCM_IDX_GPIOCR1, 4, /* Hx_CLK */
1020 true, PRCM_IDX_GPIOCR1, 6, /* PTM_A9_CLK */
1021 true, PRCM_IDX_GPIOCR1, 15, /* DBG_ETM_R4_CMD1*/
1022 true, PRCM_IDX_GPIOCR1, 25 /* HW_OBS16 */
1023 ),
1024 PRCM_GPIOCR_ALTCX(153, true, PRCM_IDX_GPIOCR1, 1, /* UARTMOD_CMD1 */
1025 true, PRCM_IDX_GPIOCR1, 14, /* PTM_A9_D15 */
1026 true, PRCM_IDX_GPIOCR1, 19, /* DBG_ETM_R4_CMD2 */
1027 true, PRCM_IDX_GPIOCR1, 25 /* HW_OBS15 */
1028 ),
1029 PRCM_GPIOCR_ALTCX(154, true, PRCM_IDX_GPIOCR1, 1, /* UARTMOD_CMD1 */
1030 true, PRCM_IDX_GPIOCR1, 14, /* PTM_A9_D14 */
1031 true, PRCM_IDX_GPIOCR1, 19, /* DBG_ETM_R4_CMD2 */
1032 true, PRCM_IDX_GPIOCR1, 25 /* HW_OBS14 */
1033 ),
1034 PRCM_GPIOCR_ALTCX(155, true, PRCM_IDX_GPIOCR1, 13, /* STM_MOD_CMD2 */
1035 true, PRCM_IDX_GPIOCR1, 14, /* PTM_A9_D13 */
1036 true, PRCM_IDX_GPIOCR1, 19, /* DBG_ETM_R4_CMD2 */
1037 true, PRCM_IDX_GPIOCR1, 25 /* HW_OBS13 */
1038 ),
1039 PRCM_GPIOCR_ALTCX(156, true, PRCM_IDX_GPIOCR1, 13, /* STM_MOD_CMD2 */
1040 true, PRCM_IDX_GPIOCR1, 14, /* PTM_A9_D12 */
1041 true, PRCM_IDX_GPIOCR1, 19, /* DBG_ETM_R4_CMD2 */
1042 true, PRCM_IDX_GPIOCR1, 25 /* HW_OBS12 */
1043 ),
1044 PRCM_GPIOCR_ALTCX(157, true, PRCM_IDX_GPIOCR1, 13, /* STM_MOD_CMD2 */
1045 true, PRCM_IDX_GPIOCR1, 14, /* PTM_A9_D11 */
1046 true, PRCM_IDX_GPIOCR1, 19, /* DBG_ETM_R4_CMD2 */
1047 true, PRCM_IDX_GPIOCR1, 25 /* HW_OBS11 */
1048 ),
1049 PRCM_GPIOCR_ALTCX(158, true, PRCM_IDX_GPIOCR1, 13, /* STM_MOD_CMD2 */
1050 true, PRCM_IDX_GPIOCR1, 14, /* PTM_A9_D10 */
1051 true, PRCM_IDX_GPIOCR1, 19, /* DBG_ETM_R4_CMD2 */
1052 true, PRCM_IDX_GPIOCR1, 25 /* HW_OBS10 */
1053 ),
1054 PRCM_GPIOCR_ALTCX(159, true, PRCM_IDX_GPIOCR1, 13, /* STM_MOD_CMD2 */
1055 true, PRCM_IDX_GPIOCR1, 14, /* PTM_A9_D9 */
1056 true, PRCM_IDX_GPIOCR1, 19, /* DBG_ETM_R4_CMD2 */
1057 true, PRCM_IDX_GPIOCR1, 25 /* HW_OBS9 */
1058 ),
1059 PRCM_GPIOCR_ALTCX(160, false, 0, 0,
1060 true, PRCM_IDX_GPIOCR1, 14, /* PTM_A9_D8 */
1061 true, PRCM_IDX_GPIOCR1, 19, /* DBG_ETM_R4_CMD2 */
1062 true, PRCM_IDX_GPIOCR1, 25 /* HW_OBS8 */
1063 ),
1064 PRCM_GPIOCR_ALTCX(161, true, PRCM_IDX_GPIOCR1, 4, /* Hx_GPIO7 */
1065 true, PRCM_IDX_GPIOCR1, 6, /* PTM_A9_D7 */
1066 true, PRCM_IDX_GPIOCR1, 15, /* DBG_ETM_R4_CMD1*/
1067 true, PRCM_IDX_GPIOCR1, 24 /* HW_OBS7 */
1068 ),
1069 PRCM_GPIOCR_ALTCX(162, true, PRCM_IDX_GPIOCR1, 4, /* Hx_GPIO6 */
1070 true, PRCM_IDX_GPIOCR1, 6, /* PTM_A9_D6 */
1071 true, PRCM_IDX_GPIOCR1, 15, /* DBG_ETM_R4_CMD1*/
1072 true, PRCM_IDX_GPIOCR1, 24 /* HW_OBS6 */
1073 ),
1074 PRCM_GPIOCR_ALTCX(163, true, PRCM_IDX_GPIOCR1, 4, /* Hx_GPIO5 */
1075 true, PRCM_IDX_GPIOCR1, 6, /* PTM_A9_D5 */
1076 true, PRCM_IDX_GPIOCR1, 15, /* DBG_ETM_R4_CMD1*/
1077 true, PRCM_IDX_GPIOCR1, 24 /* HW_OBS5 */
1078 ),
1079 PRCM_GPIOCR_ALTCX(164, true, PRCM_IDX_GPIOCR1, 4, /* Hx_GPIO4 */
1080 true, PRCM_IDX_GPIOCR1, 6, /* PTM_A9_D4 */
1081 true, PRCM_IDX_GPIOCR1, 15, /* DBG_ETM_R4_CMD1*/
1082 true, PRCM_IDX_GPIOCR1, 24 /* HW_OBS4 */
1083 ),
1084 PRCM_GPIOCR_ALTCX(165, true, PRCM_IDX_GPIOCR1, 4, /* Hx_GPIO3 */
1085 true, PRCM_IDX_GPIOCR1, 6, /* PTM_A9_D3 */
1086 true, PRCM_IDX_GPIOCR1, 15, /* DBG_ETM_R4_CMD1*/
1087 true, PRCM_IDX_GPIOCR1, 24 /* HW_OBS3 */
1088 ),
1089 PRCM_GPIOCR_ALTCX(166, true, PRCM_IDX_GPIOCR1, 4, /* Hx_GPIO2 */
1090 true, PRCM_IDX_GPIOCR1, 6, /* PTM_A9_D2 */
1091 true, PRCM_IDX_GPIOCR1, 15, /* DBG_ETM_R4_CMD1*/
1092 true, PRCM_IDX_GPIOCR1, 24 /* HW_OBS2 */
1093 ),
1094 PRCM_GPIOCR_ALTCX(167, true, PRCM_IDX_GPIOCR1, 4, /* Hx_GPIO1 */
1095 true, PRCM_IDX_GPIOCR1, 6, /* PTM_A9_D1 */
1096 true, PRCM_IDX_GPIOCR1, 15, /* DBG_ETM_R4_CMD1*/
1097 true, PRCM_IDX_GPIOCR1, 24 /* HW_OBS1 */
1098 ),
1099 PRCM_GPIOCR_ALTCX(168, true, PRCM_IDX_GPIOCR1, 4, /* Hx_GPIO0 */
1100 true, PRCM_IDX_GPIOCR1, 6, /* PTM_A9_D0 */
1101 true, PRCM_IDX_GPIOCR1, 15, /* DBG_ETM_R4_CMD1*/
1102 true, PRCM_IDX_GPIOCR1, 24 /* HW_OBS0 */
1103 ),
1104 PRCM_GPIOCR_ALTCX(170, true, PRCM_IDX_GPIOCR2, 2, /* RF_INT */
1105 false, 0, 0,
1106 false, 0, 0,
1107 false, 0, 0
1108 ),
1109 PRCM_GPIOCR_ALTCX(171, true, PRCM_IDX_GPIOCR2, 0, /* RF_CTRL */
1110 false, 0, 0,
1111 false, 0, 0,
1112 false, 0, 0
1113 ),
1114 PRCM_GPIOCR_ALTCX(215, true, PRCM_IDX_GPIOCR1, 23, /* SPI2_TXD */
1115 false, 0, 0,
1116 false, 0, 0,
1117 false, 0, 0
1118 ),
1119 PRCM_GPIOCR_ALTCX(216, true, PRCM_IDX_GPIOCR1, 23, /* SPI2_FRM */
1120 false, 0, 0,
1121 false, 0, 0,
1122 false, 0, 0
1123 ),
1124 PRCM_GPIOCR_ALTCX(217, true, PRCM_IDX_GPIOCR1, 23, /* SPI2_CLK */
1125 false, 0, 0,
1126 false, 0, 0,
1127 false, 0, 0
1128 ),
1129 PRCM_GPIOCR_ALTCX(218, true, PRCM_IDX_GPIOCR1, 23, /* SPI2_RXD */
1130 false, 0, 0,
1131 false, 0, 0,
1132 false, 0, 0
1133 ),
1134};
1135
1136static const u16 db8500_prcm_gpiocr_regs[] = {
1137 [PRCM_IDX_GPIOCR1] = 0x138,
1138 [PRCM_IDX_GPIOCR2] = 0x574,
1139};
1140
863static const struct nmk_pinctrl_soc_data nmk_db8500_soc = { 1141static const struct nmk_pinctrl_soc_data nmk_db8500_soc = {
864 .gpio_ranges = nmk_db8500_ranges, 1142 .gpio_ranges = nmk_db8500_ranges,
865 .gpio_num_ranges = ARRAY_SIZE(nmk_db8500_ranges), 1143 .gpio_num_ranges = ARRAY_SIZE(nmk_db8500_ranges),
@@ -869,6 +1147,9 @@ static const struct nmk_pinctrl_soc_data nmk_db8500_soc = {
869 .nfunctions = ARRAY_SIZE(nmk_db8500_functions), 1147 .nfunctions = ARRAY_SIZE(nmk_db8500_functions),
870 .groups = nmk_db8500_groups, 1148 .groups = nmk_db8500_groups,
871 .ngroups = ARRAY_SIZE(nmk_db8500_groups), 1149 .ngroups = ARRAY_SIZE(nmk_db8500_groups),
1150 .altcx_pins = db8500_altcx_pins,
1151 .npins_altcx = ARRAY_SIZE(db8500_altcx_pins),
1152 .prcm_gpiocr_registers = db8500_prcm_gpiocr_regs,
872}; 1153};
873 1154
874void __devinit 1155void __devinit
diff --git a/drivers/pinctrl/pinctrl-nomadik-db8540.c b/drivers/pinctrl/pinctrl-nomadik-db8540.c
index 3daf665c84c3..52fc30181f7e 100644
--- a/drivers/pinctrl/pinctrl-nomadik-db8540.c
+++ b/drivers/pinctrl/pinctrl-nomadik-db8540.c
@@ -778,50 +778,50 @@ static const struct nmk_pingroup nmk_db8540_groups[] = {
778 DB8540_PIN_GROUP(spi0_c_1, NMK_GPIO_ALT_C), 778 DB8540_PIN_GROUP(spi0_c_1, NMK_GPIO_ALT_C),
779 DB8540_PIN_GROUP(i2c3_c_1, NMK_GPIO_ALT_C), 779 DB8540_PIN_GROUP(i2c3_c_1, NMK_GPIO_ALT_C),
780 780
781 /* Other alt C1 column, these are still configured as alt C */ 781 /* Other alt C1 column */
782 DB8540_PIN_GROUP(spi3_oc1_1, NMK_GPIO_ALT_C), 782 DB8540_PIN_GROUP(spi3_oc1_1, NMK_GPIO_ALT_C1),
783 DB8540_PIN_GROUP(stmape_oc1_1, NMK_GPIO_ALT_C), 783 DB8540_PIN_GROUP(stmape_oc1_1, NMK_GPIO_ALT_C1),
784 DB8540_PIN_GROUP(u2_oc1_1, NMK_GPIO_ALT_C), 784 DB8540_PIN_GROUP(u2_oc1_1, NMK_GPIO_ALT_C1),
785 DB8540_PIN_GROUP(remap0_oc1_1, NMK_GPIO_ALT_C), 785 DB8540_PIN_GROUP(remap0_oc1_1, NMK_GPIO_ALT_C1),
786 DB8540_PIN_GROUP(remap1_oc1_1, NMK_GPIO_ALT_C), 786 DB8540_PIN_GROUP(remap1_oc1_1, NMK_GPIO_ALT_C1),
787 DB8540_PIN_GROUP(modobsrefclk_oc1_1, NMK_GPIO_ALT_C), 787 DB8540_PIN_GROUP(modobsrefclk_oc1_1, NMK_GPIO_ALT_C1),
788 DB8540_PIN_GROUP(modobspwrctrl_oc1_1, NMK_GPIO_ALT_C), 788 DB8540_PIN_GROUP(modobspwrctrl_oc1_1, NMK_GPIO_ALT_C1),
789 DB8540_PIN_GROUP(modobsclkout_oc1_1, NMK_GPIO_ALT_C), 789 DB8540_PIN_GROUP(modobsclkout_oc1_1, NMK_GPIO_ALT_C1),
790 DB8540_PIN_GROUP(moduart1_oc1_1, NMK_GPIO_ALT_C), 790 DB8540_PIN_GROUP(moduart1_oc1_1, NMK_GPIO_ALT_C1),
791 DB8540_PIN_GROUP(modprcmudbg_oc1_1, NMK_GPIO_ALT_C), 791 DB8540_PIN_GROUP(modprcmudbg_oc1_1, NMK_GPIO_ALT_C1),
792 DB8540_PIN_GROUP(modobsresout_oc1_1, NMK_GPIO_ALT_C), 792 DB8540_PIN_GROUP(modobsresout_oc1_1, NMK_GPIO_ALT_C1),
793 DB8540_PIN_GROUP(modaccgpo_oc1_1, NMK_GPIO_ALT_C), 793 DB8540_PIN_GROUP(modaccgpo_oc1_1, NMK_GPIO_ALT_C1),
794 DB8540_PIN_GROUP(kp_oc1_1, NMK_GPIO_ALT_C), 794 DB8540_PIN_GROUP(kp_oc1_1, NMK_GPIO_ALT_C1),
795 DB8540_PIN_GROUP(modxmip_oc1_1, NMK_GPIO_ALT_C), 795 DB8540_PIN_GROUP(modxmip_oc1_1, NMK_GPIO_ALT_C1),
796 DB8540_PIN_GROUP(i2c6_oc1_1, NMK_GPIO_ALT_C), 796 DB8540_PIN_GROUP(i2c6_oc1_1, NMK_GPIO_ALT_C1),
797 DB8540_PIN_GROUP(u2txrx_oc1_1, NMK_GPIO_ALT_C), 797 DB8540_PIN_GROUP(u2txrx_oc1_1, NMK_GPIO_ALT_C1),
798 DB8540_PIN_GROUP(u2ctsrts_oc1_1, NMK_GPIO_ALT_C), 798 DB8540_PIN_GROUP(u2ctsrts_oc1_1, NMK_GPIO_ALT_C1),
799 799
800 /* Other alt C2 column, these are still configured as alt C */ 800 /* Other alt C2 column */
801 DB8540_PIN_GROUP(sbag_oc2_1, NMK_GPIO_ALT_C), 801 DB8540_PIN_GROUP(sbag_oc2_1, NMK_GPIO_ALT_C2),
802 DB8540_PIN_GROUP(hxclk_oc2_1, NMK_GPIO_ALT_C), 802 DB8540_PIN_GROUP(hxclk_oc2_1, NMK_GPIO_ALT_C2),
803 DB8540_PIN_GROUP(modaccuart_oc2_1, NMK_GPIO_ALT_C), 803 DB8540_PIN_GROUP(modaccuart_oc2_1, NMK_GPIO_ALT_C2),
804 DB8540_PIN_GROUP(stmmod_oc2_1, NMK_GPIO_ALT_C), 804 DB8540_PIN_GROUP(stmmod_oc2_1, NMK_GPIO_ALT_C2),
805 DB8540_PIN_GROUP(moduartstmmux_oc2_1, NMK_GPIO_ALT_C), 805 DB8540_PIN_GROUP(moduartstmmux_oc2_1, NMK_GPIO_ALT_C2),
806 DB8540_PIN_GROUP(hxgpio_oc2_1, NMK_GPIO_ALT_C), 806 DB8540_PIN_GROUP(hxgpio_oc2_1, NMK_GPIO_ALT_C2),
807 DB8540_PIN_GROUP(sbag_oc2_2, NMK_GPIO_ALT_C), 807 DB8540_PIN_GROUP(sbag_oc2_2, NMK_GPIO_ALT_C2),
808 DB8540_PIN_GROUP(modobsservice_oc2_1, NMK_GPIO_ALT_C), 808 DB8540_PIN_GROUP(modobsservice_oc2_1, NMK_GPIO_ALT_C2),
809 DB8540_PIN_GROUP(moduart0_oc2_1, NMK_GPIO_ALT_C), 809 DB8540_PIN_GROUP(moduart0_oc2_1, NMK_GPIO_ALT_C2),
810 DB8540_PIN_GROUP(stmape_oc2_1, NMK_GPIO_ALT_C), 810 DB8540_PIN_GROUP(stmape_oc2_1, NMK_GPIO_ALT_C2),
811 DB8540_PIN_GROUP(u2_oc2_1, NMK_GPIO_ALT_C), 811 DB8540_PIN_GROUP(u2_oc2_1, NMK_GPIO_ALT_C2),
812 DB8540_PIN_GROUP(modxmip_oc2_1, NMK_GPIO_ALT_C), 812 DB8540_PIN_GROUP(modxmip_oc2_1, NMK_GPIO_ALT_C2),
813 813
814 /* Other alt C3 column, these are still configured as alt C */ 814 /* Other alt C3 column */
815 DB8540_PIN_GROUP(modaccgpo_oc3_1, NMK_GPIO_ALT_C), 815 DB8540_PIN_GROUP(modaccgpo_oc3_1, NMK_GPIO_ALT_C3),
816 DB8540_PIN_GROUP(tpui_oc3_1, NMK_GPIO_ALT_C), 816 DB8540_PIN_GROUP(tpui_oc3_1, NMK_GPIO_ALT_C3),
817 817
818 /* Other alt C4 column, these are still configured as alt C */ 818 /* Other alt C4 column */
819 DB8540_PIN_GROUP(hwobs_oc4_1, NMK_GPIO_ALT_C), 819 DB8540_PIN_GROUP(hwobs_oc4_1, NMK_GPIO_ALT_C4),
820 DB8540_PIN_GROUP(moduart1txrx_oc4_1, NMK_GPIO_ALT_C), 820 DB8540_PIN_GROUP(moduart1txrx_oc4_1, NMK_GPIO_ALT_C4),
821 DB8540_PIN_GROUP(moduart1rtscts_oc4_1, NMK_GPIO_ALT_C), 821 DB8540_PIN_GROUP(moduart1rtscts_oc4_1, NMK_GPIO_ALT_C4),
822 DB8540_PIN_GROUP(modaccuarttxrx_oc4_1, NMK_GPIO_ALT_C), 822 DB8540_PIN_GROUP(modaccuarttxrx_oc4_1, NMK_GPIO_ALT_C4),
823 DB8540_PIN_GROUP(modaccuartrtscts_oc4_1, NMK_GPIO_ALT_C), 823 DB8540_PIN_GROUP(modaccuartrtscts_oc4_1, NMK_GPIO_ALT_C4),
824 DB8540_PIN_GROUP(stmmod_oc4_1, NMK_GPIO_ALT_C), 824 DB8540_PIN_GROUP(stmmod_oc4_1, NMK_GPIO_ALT_C4),
825 825
826}; 826};
827 827
@@ -981,6 +981,265 @@ static const struct nmk_function nmk_db8540_functions[] = {
981 FUNCTION(usb) 981 FUNCTION(usb)
982}; 982};
983 983
984static const struct prcm_gpiocr_altcx_pin_desc db8540_altcx_pins[] = {
985 PRCM_GPIOCR_ALTCX(8, true, PRCM_IDX_GPIOCR1, 20, /* SPI3_CLK */
986 false, 0, 0,
987 false, 0, 0,
988 false, 0, 0
989 ),
990 PRCM_GPIOCR_ALTCX(9, true, PRCM_IDX_GPIOCR1, 20, /* SPI3_RXD */
991 false, 0, 0,
992 false, 0, 0,
993 false, 0, 0
994 ),
995 PRCM_GPIOCR_ALTCX(10, true, PRCM_IDX_GPIOCR1, 20, /* SPI3_FRM */
996 false, 0, 0,
997 false, 0, 0,
998 false, 0, 0
999 ),
1000 PRCM_GPIOCR_ALTCX(11, true, PRCM_IDX_GPIOCR1, 20, /* SPI3_TXD */
1001 false, 0, 0,
1002 false, 0, 0,
1003 false, 0, 0
1004 ),
1005 PRCM_GPIOCR_ALTCX(23, true, PRCM_IDX_GPIOCR1, 9, /* STMAPE_CLK_a */
1006 true, PRCM_IDX_GPIOCR2, 10, /* SBAG_CLK_a */
1007 false, 0, 0,
1008 false, 0, 0
1009 ),
1010 PRCM_GPIOCR_ALTCX(24, true, PRCM_IDX_GPIOCR3, 30, /* U2_RXD_g */
1011 true, PRCM_IDX_GPIOCR2, 10, /* SBAG_VAL_a */
1012 false, 0, 0,
1013 false, 0, 0
1014 ),
1015 PRCM_GPIOCR_ALTCX(25, true, PRCM_IDX_GPIOCR1, 9, /* STMAPE_DAT_a[0] */
1016 true, PRCM_IDX_GPIOCR2, 10, /* SBAG_D_a[0] */
1017 false, 0, 0,
1018 false, 0, 0
1019 ),
1020 PRCM_GPIOCR_ALTCX(26, true, PRCM_IDX_GPIOCR1, 9, /* STMAPE_DAT_a[1] */
1021 true, PRCM_IDX_GPIOCR2, 10, /* SBAG_D_a[1] */
1022 false, 0, 0,
1023 false, 0, 0
1024 ),
1025 PRCM_GPIOCR_ALTCX(27, true, PRCM_IDX_GPIOCR1, 9, /* STMAPE_DAT_a[2] */
1026 true, PRCM_IDX_GPIOCR2, 10, /* SBAG_D_a[2] */
1027 false, 0, 0,
1028 false, 0, 0
1029 ),
1030 PRCM_GPIOCR_ALTCX(28, true, PRCM_IDX_GPIOCR1, 9, /* STMAPE_DAT_a[3] */
1031 true, PRCM_IDX_GPIOCR2, 10, /* SBAG_D_a[3] */
1032 false, 0, 0,
1033 false, 0, 0
1034 ),
1035 PRCM_GPIOCR_ALTCX(64, true, PRCM_IDX_GPIOCR1, 15, /* MODOBS_REFCLK_REQ */
1036 false, 0, 0,
1037 true, PRCM_IDX_GPIOCR1, 2, /* TPIU_CTL */
1038 true, PRCM_IDX_GPIOCR2, 23 /* HW_OBS_APE_PRCMU[17] */
1039 ),
1040 PRCM_GPIOCR_ALTCX(65, true, PRCM_IDX_GPIOCR1, 19, /* MODOBS_PWRCTRL0 */
1041 true, PRCM_IDX_GPIOCR1, 24, /* Hx_CLK */
1042 true, PRCM_IDX_GPIOCR1, 2, /* TPIU_CLK */
1043 true, PRCM_IDX_GPIOCR2, 24 /* HW_OBS_APE_PRCMU[16] */
1044 ),
1045 PRCM_GPIOCR_ALTCX(66, true, PRCM_IDX_GPIOCR1, 15, /* MODOBS_CLKOUT1 */
1046 false, 0, 0,
1047 true, PRCM_IDX_GPIOCR1, 2, /* TPIU_D[15] */
1048 true, PRCM_IDX_GPIOCR2, 25 /* HW_OBS_APE_PRCMU[15] */
1049 ),
1050 PRCM_GPIOCR_ALTCX(67, true, PRCM_IDX_GPIOCR1, 1, /* MODUART1_TXD_a */
1051 true, PRCM_IDX_GPIOCR1, 6, /* MODACCUART_TXD_a */
1052 true, PRCM_IDX_GPIOCR1, 2, /* TPIU_D[14] */
1053 true, PRCM_IDX_GPIOCR2, 26 /* HW_OBS_APE_PRCMU[14] */
1054 ),
1055 PRCM_GPIOCR_ALTCX(70, true, PRCM_IDX_GPIOCR3, 6, /* MOD_PRCMU_DEBUG[17] */
1056 true, PRCM_IDX_GPIOCR1, 10, /* STMMOD_CLK_b */
1057 true, PRCM_IDX_GPIOCR1, 2, /* TPIU_D[13] */
1058 true, PRCM_IDX_GPIOCR2, 27 /* HW_OBS_APE_PRCMU[13] */
1059 ),
1060 PRCM_GPIOCR_ALTCX(71, true, PRCM_IDX_GPIOCR3, 6, /* MOD_PRCMU_DEBUG[16] */
1061 true, PRCM_IDX_GPIOCR1, 10, /* STMMOD_DAT_b[3] */
1062 true, PRCM_IDX_GPIOCR1, 2, /* TPIU_D[12] */
1063 true, PRCM_IDX_GPIOCR2, 27 /* HW_OBS_APE_PRCMU[12] */
1064 ),
1065 PRCM_GPIOCR_ALTCX(72, true, PRCM_IDX_GPIOCR3, 6, /* MOD_PRCMU_DEBUG[15] */
1066 true, PRCM_IDX_GPIOCR1, 10, /* STMMOD_DAT_b[2] */
1067 true, PRCM_IDX_GPIOCR1, 2, /* TPIU_D[11] */
1068 true, PRCM_IDX_GPIOCR2, 27 /* HW_OBS_APE_PRCMU[11] */
1069 ),
1070 PRCM_GPIOCR_ALTCX(73, true, PRCM_IDX_GPIOCR3, 6, /* MOD_PRCMU_DEBUG[14] */
1071 true, PRCM_IDX_GPIOCR1, 10, /* STMMOD_DAT_b[1] */
1072 true, PRCM_IDX_GPIOCR1, 2, /* TPIU_D[10] */
1073 true, PRCM_IDX_GPIOCR2, 27 /* HW_OBS_APE_PRCMU[10] */
1074 ),
1075 PRCM_GPIOCR_ALTCX(74, true, PRCM_IDX_GPIOCR3, 6, /* MOD_PRCMU_DEBUG[13] */
1076 true, PRCM_IDX_GPIOCR1, 10, /* STMMOD_DAT_b[0] */
1077 true, PRCM_IDX_GPIOCR1, 2, /* TPIU_D[9] */
1078 true, PRCM_IDX_GPIOCR2, 27 /* HW_OBS_APE_PRCMU[9] */
1079 ),
1080 PRCM_GPIOCR_ALTCX(75, true, PRCM_IDX_GPIOCR1, 12, /* MODOBS_RESOUT0_N */
1081 true, PRCM_IDX_GPIOCR2, 1, /* MODUART_STMMUX_RXD_b */
1082 true, PRCM_IDX_GPIOCR1, 2, /* TPIU_D[8] */
1083 true, PRCM_IDX_GPIOCR2, 28 /* HW_OBS_APE_PRCMU[8] */
1084 ),
1085 PRCM_GPIOCR_ALTCX(76, true, PRCM_IDX_GPIOCR3, 7, /* MOD_PRCMU_DEBUG[12] */
1086 true, PRCM_IDX_GPIOCR1, 25, /* Hx_GPIO[7] */
1087 true, PRCM_IDX_GPIOCR1, 2, /* TPIU_D[7] */
1088 true, PRCM_IDX_GPIOCR2, 29 /* HW_OBS_APE_PRCMU[7] */
1089 ),
1090 PRCM_GPIOCR_ALTCX(77, true, PRCM_IDX_GPIOCR3, 7, /* MOD_PRCMU_DEBUG[11] */
1091 true, PRCM_IDX_GPIOCR1, 25, /* Hx_GPIO[6] */
1092 true, PRCM_IDX_GPIOCR1, 2, /* TPIU_D[6] */
1093 true, PRCM_IDX_GPIOCR2, 29 /* HW_OBS_APE_PRCMU[6] */
1094 ),
1095 PRCM_GPIOCR_ALTCX(78, true, PRCM_IDX_GPIOCR3, 7, /* MOD_PRCMU_DEBUG[10] */
1096 true, PRCM_IDX_GPIOCR1, 25, /* Hx_GPIO[5] */
1097 true, PRCM_IDX_GPIOCR1, 2, /* TPIU_D[5] */
1098 true, PRCM_IDX_GPIOCR2, 29 /* HW_OBS_APE_PRCMU[5] */
1099 ),
1100 PRCM_GPIOCR_ALTCX(79, true, PRCM_IDX_GPIOCR3, 7, /* MOD_PRCMU_DEBUG[9] */
1101 true, PRCM_IDX_GPIOCR1, 25, /* Hx_GPIO[4] */
1102 true, PRCM_IDX_GPIOCR1, 2, /* TPIU_D[4] */
1103 true, PRCM_IDX_GPIOCR2, 29 /* HW_OBS_APE_PRCMU[4] */
1104 ),
1105 PRCM_GPIOCR_ALTCX(80, true, PRCM_IDX_GPIOCR1, 26, /* MODACC_GPO[0] */
1106 true, PRCM_IDX_GPIOCR1, 25, /* Hx_GPIO[3] */
1107 true, PRCM_IDX_GPIOCR1, 2, /* TPIU_D[3] */
1108 true, PRCM_IDX_GPIOCR2, 30 /* HW_OBS_APE_PRCMU[3] */
1109 ),
1110 PRCM_GPIOCR_ALTCX(81, true, PRCM_IDX_GPIOCR2, 17, /* MODACC_GPO[1] */
1111 true, PRCM_IDX_GPIOCR1, 25, /* Hx_GPIO[2] */
1112 true, PRCM_IDX_GPIOCR1, 2, /* TPIU_D[2] */
1113 true, PRCM_IDX_GPIOCR2, 30 /* HW_OBS_APE_PRCMU[2] */
1114 ),
1115 PRCM_GPIOCR_ALTCX(82, true, PRCM_IDX_GPIOCR3, 8, /* MOD_PRCMU_DEBUG[8] */
1116 true, PRCM_IDX_GPIOCR1, 25, /* Hx_GPIO[1] */
1117 true, PRCM_IDX_GPIOCR1, 2, /* TPIU_D[1] */
1118 true, PRCM_IDX_GPIOCR2, 31 /* HW_OBS_APE_PRCMU[1] */
1119 ),
1120 PRCM_GPIOCR_ALTCX(83, true, PRCM_IDX_GPIOCR3, 8, /* MOD_PRCMU_DEBUG[7] */
1121 true, PRCM_IDX_GPIOCR1, 25, /* Hx_GPIO[0] */
1122 true, PRCM_IDX_GPIOCR1, 2, /* TPIU_D[0] */
1123 true, PRCM_IDX_GPIOCR2, 31 /* HW_OBS_APE_PRCMU[0] */
1124 ),
1125 PRCM_GPIOCR_ALTCX(84, true, PRCM_IDX_GPIOCR3, 9, /* MOD_PRCMU_DEBUG[6] */
1126 true, PRCM_IDX_GPIOCR1, 8, /* SBAG_CLK_b */
1127 true, PRCM_IDX_GPIOCR1, 3, /* TPIU_D[23] */
1128 true, PRCM_IDX_GPIOCR1, 16 /* MODUART1_RXD_b */
1129 ),
1130 PRCM_GPIOCR_ALTCX(85, true, PRCM_IDX_GPIOCR3, 9, /* MOD_PRCMU_DEBUG[5] */
1131 true, PRCM_IDX_GPIOCR1, 8, /* SBAG_D_b[3] */
1132 true, PRCM_IDX_GPIOCR1, 3, /* TPIU_D[22] */
1133 true, PRCM_IDX_GPIOCR1, 16 /* MODUART1_TXD_b */
1134 ),
1135 PRCM_GPIOCR_ALTCX(86, true, PRCM_IDX_GPIOCR3, 9, /* MOD_PRCMU_DEBUG[0] */
1136 true, PRCM_IDX_GPIOCR2, 18, /* STMAPE_DAT_b[0] */
1137 true, PRCM_IDX_GPIOCR1, 14, /* TPIU_D[25] */
1138 true, PRCM_IDX_GPIOCR1, 11 /* STMMOD_DAT_c[0] */
1139 ),
1140 PRCM_GPIOCR_ALTCX(87, true, PRCM_IDX_GPIOCR3, 0, /* MODACC_GPO_a[5] */
1141 true, PRCM_IDX_GPIOCR2, 3, /* U2_RXD_c */
1142 true, PRCM_IDX_GPIOCR1, 4, /* TPIU_D[24] */
1143 true, PRCM_IDX_GPIOCR1, 21 /* MODUART_STMMUX_RXD_c */
1144 ),
1145 PRCM_GPIOCR_ALTCX(151, true, PRCM_IDX_GPIOCR1, 18, /* REMAP0 */
1146 false, 0, 0,
1147 false, 0, 0,
1148 false, 0, 0
1149 ),
1150 PRCM_GPIOCR_ALTCX(152, true, PRCM_IDX_GPIOCR1, 18, /* REMAP1 */
1151 false, 0, 0,
1152 false, 0, 0,
1153 false, 0, 0
1154 ),
1155 PRCM_GPIOCR_ALTCX(153, true, PRCM_IDX_GPIOCR3, 2, /* KP_O_b[6] */
1156 true, PRCM_IDX_GPIOCR1, 8, /* SBAG_D_b[2] */
1157 true, PRCM_IDX_GPIOCR1, 3, /* TPIU_D[21] */
1158 true, PRCM_IDX_GPIOCR1, 0 /* MODUART1_RTS */
1159 ),
1160 PRCM_GPIOCR_ALTCX(154, true, PRCM_IDX_GPIOCR3, 2, /* KP_I_b[6] */
1161 true, PRCM_IDX_GPIOCR1, 8, /* SBAG_D_b[1] */
1162 true, PRCM_IDX_GPIOCR1, 3, /* TPIU_D[20] */
1163 true, PRCM_IDX_GPIOCR1, 0 /* MODUART1_CTS */
1164 ),
1165 PRCM_GPIOCR_ALTCX(155, true, PRCM_IDX_GPIOCR3, 3, /* KP_O_b[5] */
1166 true, PRCM_IDX_GPIOCR1, 8, /* SBAG_D_b[0] */
1167 true, PRCM_IDX_GPIOCR1, 3, /* TPIU_D[19] */
1168 true, PRCM_IDX_GPIOCR1, 5 /* MODACCUART_RXD_c */
1169 ),
1170 PRCM_GPIOCR_ALTCX(156, true, PRCM_IDX_GPIOCR3, 3, /* KP_O_b[4] */
1171 true, PRCM_IDX_GPIOCR1, 8, /* SBAG_VAL_b */
1172 true, PRCM_IDX_GPIOCR1, 3, /* TPIU_D[18] */
1173 true, PRCM_IDX_GPIOCR1, 5 /* MODACCUART_TXD_b */
1174 ),
1175 PRCM_GPIOCR_ALTCX(157, true, PRCM_IDX_GPIOCR3, 4, /* KP_I_b[5] */
1176 true, PRCM_IDX_GPIOCR1, 23, /* MODOBS_SERVICE_N */
1177 true, PRCM_IDX_GPIOCR1, 3, /* TPIU_D[17] */
1178 true, PRCM_IDX_GPIOCR1, 14 /* MODACCUART_RTS */
1179 ),
1180 PRCM_GPIOCR_ALTCX(158, true, PRCM_IDX_GPIOCR3, 4, /* KP_I_b[4] */
1181 true, PRCM_IDX_GPIOCR2, 0, /* U2_TXD_c */
1182 true, PRCM_IDX_GPIOCR1, 3, /* TPIU_D[16] */
1183 true, PRCM_IDX_GPIOCR1, 14 /* MODACCUART_CTS */
1184 ),
1185 PRCM_GPIOCR_ALTCX(159, true, PRCM_IDX_GPIOCR3, 5, /* KP_O_b[3] */
1186 true, PRCM_IDX_GPIOCR3, 10, /* MODUART0_RXD */
1187 true, PRCM_IDX_GPIOCR1, 4, /* TPIU_D[31] */
1188 false, 0, 0
1189 ),
1190 PRCM_GPIOCR_ALTCX(160, true, PRCM_IDX_GPIOCR3, 5, /* KP_I_b[3] */
1191 true, PRCM_IDX_GPIOCR3, 10, /* MODUART0_TXD */
1192 true, PRCM_IDX_GPIOCR1, 4, /* TPIU_D[30] */
1193 false, 0, 0
1194 ),
1195 PRCM_GPIOCR_ALTCX(161, true, PRCM_IDX_GPIOCR3, 9, /* MOD_PRCMU_DEBUG[4] */
1196 true, PRCM_IDX_GPIOCR2, 18, /* STMAPE_CLK_b */
1197 true, PRCM_IDX_GPIOCR1, 4, /* TPIU_D[29] */
1198 true, PRCM_IDX_GPIOCR1, 11 /* STMMOD_CLK_c */
1199 ),
1200 PRCM_GPIOCR_ALTCX(162, true, PRCM_IDX_GPIOCR3, 9, /* MOD_PRCMU_DEBUG[3] */
1201 true, PRCM_IDX_GPIOCR2, 18, /* STMAPE_DAT_b[3] */
1202 true, PRCM_IDX_GPIOCR1, 4, /* TPIU_D[28] */
1203 true, PRCM_IDX_GPIOCR1, 11 /* STMMOD_DAT_c[3] */
1204 ),
1205 PRCM_GPIOCR_ALTCX(163, true, PRCM_IDX_GPIOCR3, 9, /* MOD_PRCMU_DEBUG[2] */
1206 true, PRCM_IDX_GPIOCR2, 18, /* STMAPE_DAT_b[2] */
1207 true, PRCM_IDX_GPIOCR1, 4, /* TPIU_D[27] */
1208 true, PRCM_IDX_GPIOCR1, 11 /* STMMOD_DAT_c[2] */
1209 ),
1210 PRCM_GPIOCR_ALTCX(164, true, PRCM_IDX_GPIOCR3, 9, /* MOD_PRCMU_DEBUG[1] */
1211 true, PRCM_IDX_GPIOCR2, 18, /* STMAPE_DAT_b[1] */
1212 true, PRCM_IDX_GPIOCR1, 4, /* TPIU_D[26] */
1213 true, PRCM_IDX_GPIOCR1, 11 /* STMMOD_DAT_c[1] */
1214 ),
1215 PRCM_GPIOCR_ALTCX(204, true, PRCM_IDX_GPIOCR2, 2, /* U2_RXD_f */
1216 false, 0, 0,
1217 false, 0, 0,
1218 false, 0, 0
1219 ),
1220 PRCM_GPIOCR_ALTCX(205, true, PRCM_IDX_GPIOCR2, 2, /* U2_TXD_f */
1221 false, 0, 0,
1222 false, 0, 0,
1223 false, 0, 0
1224 ),
1225 PRCM_GPIOCR_ALTCX(206, true, PRCM_IDX_GPIOCR2, 2, /* U2_CTSn_b */
1226 false, 0, 0,
1227 false, 0, 0,
1228 false, 0, 0
1229 ),
1230 PRCM_GPIOCR_ALTCX(207, true, PRCM_IDX_GPIOCR2, 2, /* U2_RTSn_b */
1231 false, 0, 0,
1232 false, 0, 0,
1233 false, 0, 0
1234 ),
1235};
1236
1237static const u16 db8540_prcm_gpiocr_regs[] = {
1238 [PRCM_IDX_GPIOCR1] = 0x138,
1239 [PRCM_IDX_GPIOCR2] = 0x574,
1240 [PRCM_IDX_GPIOCR3] = 0x2bc,
1241};
1242
984static const struct nmk_pinctrl_soc_data nmk_db8540_soc = { 1243static const struct nmk_pinctrl_soc_data nmk_db8540_soc = {
985 .gpio_ranges = nmk_db8540_ranges, 1244 .gpio_ranges = nmk_db8540_ranges,
986 .gpio_num_ranges = ARRAY_SIZE(nmk_db8540_ranges), 1245 .gpio_num_ranges = ARRAY_SIZE(nmk_db8540_ranges),
@@ -990,6 +1249,9 @@ static const struct nmk_pinctrl_soc_data nmk_db8540_soc = {
990 .nfunctions = ARRAY_SIZE(nmk_db8540_functions), 1249 .nfunctions = ARRAY_SIZE(nmk_db8540_functions),
991 .groups = nmk_db8540_groups, 1250 .groups = nmk_db8540_groups,
992 .ngroups = ARRAY_SIZE(nmk_db8540_groups), 1251 .ngroups = ARRAY_SIZE(nmk_db8540_groups),
1252 .altcx_pins = db8540_altcx_pins,
1253 .npins_altcx = ARRAY_SIZE(db8540_altcx_pins),
1254 .prcm_gpiocr_registers = db8540_prcm_gpiocr_regs,
993}; 1255};
994 1256
995void __devinit 1257void __devinit
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index 6030a513f3c4..cf82d9ce4dee 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -30,6 +30,20 @@
30#include <linux/pinctrl/pinconf.h> 30#include <linux/pinctrl/pinconf.h>
31/* Since we request GPIOs from ourself */ 31/* Since we request GPIOs from ourself */
32#include <linux/pinctrl/consumer.h> 32#include <linux/pinctrl/consumer.h>
33/*
34 * For the U8500 archs, use the PRCMU register interface, for the older
35 * Nomadik, provide some stubs. The functions using these will only be
36 * called on the U8500 series.
37 */
38#ifdef CONFIG_ARCH_U8500
39#include <linux/mfd/dbx500-prcmu.h>
40#else
41static inline u32 prcmu_read(unsigned int reg) {
42 return 0;
43}
44static inline void prcmu_write(unsigned int reg, u32 value) {}
45static inline void prcmu_write_masked(unsigned int reg, u32 mask, u32 value) {}
46#endif
33 47
34#include <asm/mach/irq.h> 48#include <asm/mach/irq.h>
35 49
@@ -237,6 +251,89 @@ nmk_gpio_disable_lazy_irq(struct nmk_gpio_chip *nmk_chip, unsigned offset)
237 dev_dbg(nmk_chip->chip.dev, "%d: clearing interrupt mask\n", gpio); 251 dev_dbg(nmk_chip->chip.dev, "%d: clearing interrupt mask\n", gpio);
238} 252}
239 253
254static void nmk_prcm_altcx_set_mode(struct nmk_pinctrl *npct,
255 unsigned offset, unsigned alt_num)
256{
257 int i;
258 u16 reg;
259 u8 bit;
260 u8 alt_index;
261 const struct prcm_gpiocr_altcx_pin_desc *pin_desc;
262 const u16 *gpiocr_regs;
263
264 if (alt_num > PRCM_IDX_GPIOCR_ALTC_MAX) {
265 dev_err(npct->dev, "PRCM GPIOCR: alternate-C%i is invalid\n",
266 alt_num);
267 return;
268 }
269
270 for (i = 0 ; i < npct->soc->npins_altcx ; i++) {
271 if (npct->soc->altcx_pins[i].pin == offset)
272 break;
273 }
274 if (i == npct->soc->npins_altcx) {
275 dev_dbg(npct->dev, "PRCM GPIOCR: pin %i is not found\n",
276 offset);
277 return;
278 }
279
280 pin_desc = npct->soc->altcx_pins + i;
281 gpiocr_regs = npct->soc->prcm_gpiocr_registers;
282
283 /*
284 * If alt_num is NULL, just clear current ALTCx selection
285 * to make sure we come back to a pure ALTC selection
286 */
287 if (!alt_num) {
288 for (i = 0 ; i < PRCM_IDX_GPIOCR_ALTC_MAX ; i++) {
289 if (pin_desc->altcx[i].used == true) {
290 reg = gpiocr_regs[pin_desc->altcx[i].reg_index];
291 bit = pin_desc->altcx[i].control_bit;
292 if (prcmu_read(reg) & BIT(bit)) {
293 prcmu_write_masked(reg, BIT(bit), 0);
294 dev_dbg(npct->dev,
295 "PRCM GPIOCR: pin %i: alternate-C%i has been disabled\n",
296 offset, i+1);
297 }
298 }
299 }
300 return;
301 }
302
303 alt_index = alt_num - 1;
304 if (pin_desc->altcx[alt_index].used == false) {
305 dev_warn(npct->dev,
306 "PRCM GPIOCR: pin %i: alternate-C%i does not exist\n",
307 offset, alt_num);
308 return;
309 }
310
311 /*
312 * Check if any other ALTCx functions are activated on this pin
313 * and disable it first.
314 */
315 for (i = 0 ; i < PRCM_IDX_GPIOCR_ALTC_MAX ; i++) {
316 if (i == alt_index)
317 continue;
318 if (pin_desc->altcx[i].used == true) {
319 reg = gpiocr_regs[pin_desc->altcx[i].reg_index];
320 bit = pin_desc->altcx[i].control_bit;
321 if (prcmu_read(reg) & BIT(bit)) {
322 prcmu_write_masked(reg, BIT(bit), 0);
323 dev_dbg(npct->dev,
324 "PRCM GPIOCR: pin %i: alternate-C%i has been disabled\n",
325 offset, i+1);
326 }
327 }
328 }
329
330 reg = gpiocr_regs[pin_desc->altcx[alt_index].reg_index];
331 bit = pin_desc->altcx[alt_index].control_bit;
332 dev_dbg(npct->dev, "PRCM GPIOCR: pin %i: alternate-C%i has been selected\n",
333 offset, alt_index+1);
334 prcmu_write_masked(reg, BIT(bit), BIT(bit));
335}
336
240static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset, 337static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset,
241 pin_cfg_t cfg, bool sleep, unsigned int *slpmregs) 338 pin_cfg_t cfg, bool sleep, unsigned int *slpmregs)
242{ 339{
@@ -959,7 +1056,7 @@ static int nmk_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
959 struct nmk_gpio_chip *nmk_chip = 1056 struct nmk_gpio_chip *nmk_chip =
960 container_of(chip, struct nmk_gpio_chip, chip); 1057 container_of(chip, struct nmk_gpio_chip, chip);
961 1058
962 return irq_find_mapping(nmk_chip->domain, offset); 1059 return irq_create_mapping(nmk_chip->domain, offset);
963} 1060}
964 1061
965#ifdef CONFIG_DEBUG_FS 1062#ifdef CONFIG_DEBUG_FS
@@ -1184,6 +1281,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
1184 struct clk *clk; 1281 struct clk *clk;
1185 int secondary_irq; 1282 int secondary_irq;
1186 void __iomem *base; 1283 void __iomem *base;
1284 int irq_start = 0;
1187 int irq; 1285 int irq;
1188 int ret; 1286 int ret;
1189 1287
@@ -1287,9 +1385,11 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
1287 1385
1288 platform_set_drvdata(dev, nmk_chip); 1386 platform_set_drvdata(dev, nmk_chip);
1289 1387
1290 nmk_chip->domain = irq_domain_add_legacy(np, NMK_GPIO_PER_CHIP, 1388 if (!np)
1291 NOMADIK_GPIO_TO_IRQ(pdata->first_gpio), 1389 irq_start = NOMADIK_GPIO_TO_IRQ(pdata->first_gpio);
1292 0, &nmk_gpio_irq_simple_ops, nmk_chip); 1390 nmk_chip->domain = irq_domain_add_simple(np,
1391 NMK_GPIO_PER_CHIP, irq_start,
1392 &nmk_gpio_irq_simple_ops, nmk_chip);
1293 if (!nmk_chip->domain) { 1393 if (!nmk_chip->domain) {
1294 dev_err(&dev->dev, "failed to create irqdomain\n"); 1394 dev_err(&dev->dev, "failed to create irqdomain\n");
1295 ret = -ENOSYS; 1395 ret = -ENOSYS;
@@ -1441,7 +1541,7 @@ static int nmk_pmx_enable(struct pinctrl_dev *pctldev, unsigned function,
1441 * IOFORCE will switch *all* ports to their sleepmode setting to as 1541 * IOFORCE will switch *all* ports to their sleepmode setting to as
1442 * to avoid glitches. (Not just one port!) 1542 * to avoid glitches. (Not just one port!)
1443 */ 1543 */
1444 glitch = (g->altsetting == NMK_GPIO_ALT_C); 1544 glitch = ((g->altsetting & NMK_GPIO_ALT_C) == NMK_GPIO_ALT_C);
1445 1545
1446 if (glitch) { 1546 if (glitch) {
1447 spin_lock_irqsave(&nmk_gpio_slpm_lock, flags); 1547 spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
@@ -1491,8 +1591,21 @@ static int nmk_pmx_enable(struct pinctrl_dev *pctldev, unsigned function,
1491 */ 1591 */
1492 nmk_gpio_disable_lazy_irq(nmk_chip, bit); 1592 nmk_gpio_disable_lazy_irq(nmk_chip, bit);
1493 1593
1494 __nmk_gpio_set_mode_safe(nmk_chip, bit, g->altsetting, glitch); 1594 __nmk_gpio_set_mode_safe(nmk_chip, bit,
1595 (g->altsetting & NMK_GPIO_ALT_C), glitch);
1495 clk_disable(nmk_chip->clk); 1596 clk_disable(nmk_chip->clk);
1597
1598 /*
1599 * Call PRCM GPIOCR config function in case ALTC
1600 * has been selected:
1601 * - If selection is a ALTCx, some bits in PRCM GPIOCR registers
1602 * must be set.
1603 * - If selection is pure ALTC and previous selection was ALTCx,
1604 * then some bits in PRCM GPIOCR registers must be cleared.
1605 */
1606 if ((g->altsetting & NMK_GPIO_ALT_C) == NMK_GPIO_ALT_C)
1607 nmk_prcm_altcx_set_mode(npct, g->pins[i],
1608 g->altsetting >> NMK_GPIO_ALT_CX_SHIFT);
1496 } 1609 }
1497 1610
1498 /* When all pins are successfully reconfigured we get here */ 1611 /* When all pins are successfully reconfigured we get here */
diff --git a/drivers/pinctrl/pinctrl-nomadik.h b/drivers/pinctrl/pinctrl-nomadik.h
index 5c99f1c62dfd..eef316e979a0 100644
--- a/drivers/pinctrl/pinctrl-nomadik.h
+++ b/drivers/pinctrl/pinctrl-nomadik.h
@@ -8,6 +8,78 @@
8#define PINCTRL_NMK_DB8500 1 8#define PINCTRL_NMK_DB8500 1
9#define PINCTRL_NMK_DB8540 2 9#define PINCTRL_NMK_DB8540 2
10 10
11#define PRCM_GPIOCR_ALTCX(pin_num,\
12 altc1_used, altc1_ri, altc1_cb,\
13 altc2_used, altc2_ri, altc2_cb,\
14 altc3_used, altc3_ri, altc3_cb,\
15 altc4_used, altc4_ri, altc4_cb)\
16{\
17 .pin = pin_num,\
18 .altcx[PRCM_IDX_GPIOCR_ALTC1] = {\
19 .used = altc1_used,\
20 .reg_index = altc1_ri,\
21 .control_bit = altc1_cb\
22 },\
23 .altcx[PRCM_IDX_GPIOCR_ALTC2] = {\
24 .used = altc2_used,\
25 .reg_index = altc2_ri,\
26 .control_bit = altc2_cb\
27 },\
28 .altcx[PRCM_IDX_GPIOCR_ALTC3] = {\
29 .used = altc3_used,\
30 .reg_index = altc3_ri,\
31 .control_bit = altc3_cb\
32 },\
33 .altcx[PRCM_IDX_GPIOCR_ALTC4] = {\
34 .used = altc4_used,\
35 .reg_index = altc4_ri,\
36 .control_bit = altc4_cb\
37 },\
38}
39
40/**
41 * enum prcm_gpiocr_reg_index
42 * Used to reference an PRCM GPIOCR register address.
43 */
44enum prcm_gpiocr_reg_index {
45 PRCM_IDX_GPIOCR1,
46 PRCM_IDX_GPIOCR2,
47 PRCM_IDX_GPIOCR3
48};
49/**
50 * enum prcm_gpiocr_altcx_index
51 * Used to reference an Other alternate-C function.
52 */
53enum prcm_gpiocr_altcx_index {
54 PRCM_IDX_GPIOCR_ALTC1,
55 PRCM_IDX_GPIOCR_ALTC2,
56 PRCM_IDX_GPIOCR_ALTC3,
57 PRCM_IDX_GPIOCR_ALTC4,
58 PRCM_IDX_GPIOCR_ALTC_MAX,
59};
60
61/**
62 * struct prcm_gpio_altcx - Other alternate-C function
63 * @used: other alternate-C function availability
64 * @reg_index: PRCM GPIOCR register index used to control the function
65 * @control_bit: PRCM GPIOCR bit used to control the function
66 */
67struct prcm_gpiocr_altcx {
68 bool used:1;
69 u8 reg_index:2;
70 u8 control_bit:5;
71} __packed;
72
73/**
74 * struct prcm_gpio_altcx_pin_desc - Other alternate-C pin
75 * @pin: The pin number
76 * @altcx: array of other alternate-C[1-4] functions
77 */
78struct prcm_gpiocr_altcx_pin_desc {
79 unsigned short pin;
80 struct prcm_gpiocr_altcx altcx[PRCM_IDX_GPIOCR_ALTC_MAX];
81};
82
11/** 83/**
12 * struct nmk_function - Nomadik pinctrl mux function 84 * struct nmk_function - Nomadik pinctrl mux function
13 * @name: The name of the function, exported to pinctrl core. 85 * @name: The name of the function, exported to pinctrl core.
@@ -50,6 +122,9 @@ struct nmk_pingroup {
50 * @nfunction: The number of entries in @functions. 122 * @nfunction: The number of entries in @functions.
51 * @groups: An array describing all pin groups the pin SoC supports. 123 * @groups: An array describing all pin groups the pin SoC supports.
52 * @ngroups: The number of entries in @groups. 124 * @ngroups: The number of entries in @groups.
125 * @altcx_pins: The pins that support Other alternate-C function on this SoC
126 * @npins_altcx: The number of Other alternate-C pins
127 * @prcm_gpiocr_registers: The array of PRCM GPIOCR registers on this SoC
53 */ 128 */
54struct nmk_pinctrl_soc_data { 129struct nmk_pinctrl_soc_data {
55 struct pinctrl_gpio_range *gpio_ranges; 130 struct pinctrl_gpio_range *gpio_ranges;
@@ -60,6 +135,9 @@ struct nmk_pinctrl_soc_data {
60 unsigned nfunctions; 135 unsigned nfunctions;
61 const struct nmk_pingroup *groups; 136 const struct nmk_pingroup *groups;
62 unsigned ngroups; 137 unsigned ngroups;
138 const struct prcm_gpiocr_altcx_pin_desc *altcx_pins;
139 unsigned npins_altcx;
140 const u16 *prcm_gpiocr_registers;
63}; 141};
64 142
65#ifdef CONFIG_PINCTRL_STN8815 143#ifdef CONFIG_PINCTRL_STN8815
diff --git a/drivers/pinctrl/pinctrl-samsung.c b/drivers/pinctrl/pinctrl-samsung.c
index dd108a94acf9..861cd5f04d5e 100644
--- a/drivers/pinctrl/pinctrl-samsung.c
+++ b/drivers/pinctrl/pinctrl-samsung.c
@@ -513,7 +513,7 @@ static int samsung_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
513 * Parse the pin names listed in the 'samsung,pins' property and convert it 513 * Parse the pin names listed in the 'samsung,pins' property and convert it
514 * into a list of gpio numbers are create a pin group from it. 514 * into a list of gpio numbers are create a pin group from it.
515 */ 515 */
516static int __init samsung_pinctrl_parse_dt_pins(struct platform_device *pdev, 516static int __devinit samsung_pinctrl_parse_dt_pins(struct platform_device *pdev,
517 struct device_node *cfg_np, struct pinctrl_desc *pctl, 517 struct device_node *cfg_np, struct pinctrl_desc *pctl,
518 unsigned int **pin_list, unsigned int *npins) 518 unsigned int **pin_list, unsigned int *npins)
519{ 519{
@@ -560,7 +560,7 @@ static int __init samsung_pinctrl_parse_dt_pins(struct platform_device *pdev,
560 * from device node of the pin-controller. A pin group is formed with all 560 * from device node of the pin-controller. A pin group is formed with all
561 * the pins listed in the "samsung,pins" property. 561 * the pins listed in the "samsung,pins" property.
562 */ 562 */
563static int __init samsung_pinctrl_parse_dt(struct platform_device *pdev, 563static int __devinit samsung_pinctrl_parse_dt(struct platform_device *pdev,
564 struct samsung_pinctrl_drv_data *drvdata) 564 struct samsung_pinctrl_drv_data *drvdata)
565{ 565{
566 struct device *dev = &pdev->dev; 566 struct device *dev = &pdev->dev;
@@ -655,7 +655,7 @@ static int __init samsung_pinctrl_parse_dt(struct platform_device *pdev,
655} 655}
656 656
657/* register the pinctrl interface with the pinctrl subsystem */ 657/* register the pinctrl interface with the pinctrl subsystem */
658static int __init samsung_pinctrl_register(struct platform_device *pdev, 658static int __devinit samsung_pinctrl_register(struct platform_device *pdev,
659 struct samsung_pinctrl_drv_data *drvdata) 659 struct samsung_pinctrl_drv_data *drvdata)
660{ 660{
661 struct pinctrl_desc *ctrldesc = &drvdata->pctl; 661 struct pinctrl_desc *ctrldesc = &drvdata->pctl;
@@ -729,7 +729,7 @@ static int __init samsung_pinctrl_register(struct platform_device *pdev,
729} 729}
730 730
731/* register the gpiolib interface with the gpiolib subsystem */ 731/* register the gpiolib interface with the gpiolib subsystem */
732static int __init samsung_gpiolib_register(struct platform_device *pdev, 732static int __devinit samsung_gpiolib_register(struct platform_device *pdev,
733 struct samsung_pinctrl_drv_data *drvdata) 733 struct samsung_pinctrl_drv_data *drvdata)
734{ 734{
735 struct gpio_chip *gc; 735 struct gpio_chip *gc;
@@ -762,7 +762,7 @@ static int __init samsung_gpiolib_register(struct platform_device *pdev,
762} 762}
763 763
764/* unregister the gpiolib interface with the gpiolib subsystem */ 764/* unregister the gpiolib interface with the gpiolib subsystem */
765static int __init samsung_gpiolib_unregister(struct platform_device *pdev, 765static int __devinit samsung_gpiolib_unregister(struct platform_device *pdev,
766 struct samsung_pinctrl_drv_data *drvdata) 766 struct samsung_pinctrl_drv_data *drvdata)
767{ 767{
768 int ret = gpiochip_remove(drvdata->gc); 768 int ret = gpiochip_remove(drvdata->gc);
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index 675497c15149..9ecacf3d0a75 100644
--- a/drivers/pinctrl/pinctrl-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -1323,41 +1323,6 @@ static inline struct sirfsoc_gpio_bank *sirfsoc_gpio_to_bank(unsigned int gpio)
1323 return &sgpio_bank[gpio / SIRFSOC_GPIO_BANK_SIZE]; 1323 return &sgpio_bank[gpio / SIRFSOC_GPIO_BANK_SIZE];
1324} 1324}
1325 1325
1326void sirfsoc_gpio_set_pull(unsigned gpio, unsigned mode)
1327{
1328 struct sirfsoc_gpio_bank *bank = sirfsoc_gpio_to_bank(gpio);
1329 int idx = sirfsoc_gpio_to_offset(gpio);
1330 u32 val, offset;
1331 unsigned long flags;
1332
1333 offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
1334
1335 spin_lock_irqsave(&sgpio_lock, flags);
1336
1337 val = readl(bank->chip.regs + offset);
1338
1339 switch (mode) {
1340 case SIRFSOC_GPIO_PULL_NONE:
1341 val &= ~SIRFSOC_GPIO_CTL_PULL_MASK;
1342 break;
1343 case SIRFSOC_GPIO_PULL_UP:
1344 val |= SIRFSOC_GPIO_CTL_PULL_MASK;
1345 val |= SIRFSOC_GPIO_CTL_PULL_HIGH;
1346 break;
1347 case SIRFSOC_GPIO_PULL_DOWN:
1348 val |= SIRFSOC_GPIO_CTL_PULL_MASK;
1349 val &= ~SIRFSOC_GPIO_CTL_PULL_HIGH;
1350 break;
1351 default:
1352 break;
1353 }
1354
1355 writel(val, bank->chip.regs + offset);
1356
1357 spin_unlock_irqrestore(&sgpio_lock, flags);
1358}
1359EXPORT_SYMBOL(sirfsoc_gpio_set_pull);
1360
1361static inline struct sirfsoc_gpio_bank *sirfsoc_irqchip_to_bank(struct gpio_chip *chip) 1326static inline struct sirfsoc_gpio_bank *sirfsoc_irqchip_to_bank(struct gpio_chip *chip)
1362{ 1327{
1363 return container_of(to_of_mm_gpio_chip(chip), struct sirfsoc_gpio_bank, chip); 1328 return container_of(to_of_mm_gpio_chip(chip), struct sirfsoc_gpio_bank, chip);
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
index 729b686c3ad2..7da0b371fd65 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/pinctrl-tegra.c
@@ -464,7 +464,7 @@ static int tegra_pinconf_reg(struct tegra_pmx *pmx,
464 *bank = g->drv_bank; 464 *bank = g->drv_bank;
465 *reg = g->drv_reg; 465 *reg = g->drv_reg;
466 *bit = g->lpmd_bit; 466 *bit = g->lpmd_bit;
467 *width = 1; 467 *width = 2;
468 break; 468 break;
469 case TEGRA_PINCONF_PARAM_DRIVE_DOWN_STRENGTH: 469 case TEGRA_PINCONF_PARAM_DRIVE_DOWN_STRENGTH:
470 *bank = g->drv_bank; 470 *bank = g->drv_bank;
diff --git a/drivers/pinctrl/pinctrl-tegra30.c b/drivers/pinctrl/pinctrl-tegra30.c
index 0386fdf0da16..7894f14c7059 100644
--- a/drivers/pinctrl/pinctrl-tegra30.c
+++ b/drivers/pinctrl/pinctrl-tegra30.c
@@ -3345,10 +3345,10 @@ static const struct tegra_function tegra30_functions[] = {
3345 FUNCTION(vi_alt3), 3345 FUNCTION(vi_alt3),
3346}; 3346};
3347 3347
3348#define MUXCTL_REG_A 0x3000 3348#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
3349#define PINGROUP_REG_A 0x868 3349#define PINGROUP_REG_A 0x3000 /* bank 1 */
3350 3350
3351#define PINGROUP_REG_Y(r) ((r) - MUXCTL_REG_A) 3351#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_A)
3352#define PINGROUP_REG_N(r) -1 3352#define PINGROUP_REG_N(r) -1
3353 3353
3354#define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior) \ 3354#define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior) \
@@ -3364,25 +3364,25 @@ static const struct tegra_function tegra30_functions[] = {
3364 }, \ 3364 }, \
3365 .func_safe = TEGRA_MUX_ ## f_safe, \ 3365 .func_safe = TEGRA_MUX_ ## f_safe, \
3366 .mux_reg = PINGROUP_REG_Y(r), \ 3366 .mux_reg = PINGROUP_REG_Y(r), \
3367 .mux_bank = 0, \ 3367 .mux_bank = 1, \
3368 .mux_bit = 0, \ 3368 .mux_bit = 0, \
3369 .pupd_reg = PINGROUP_REG_Y(r), \ 3369 .pupd_reg = PINGROUP_REG_Y(r), \
3370 .pupd_bank = 0, \ 3370 .pupd_bank = 1, \
3371 .pupd_bit = 2, \ 3371 .pupd_bit = 2, \
3372 .tri_reg = PINGROUP_REG_Y(r), \ 3372 .tri_reg = PINGROUP_REG_Y(r), \
3373 .tri_bank = 0, \ 3373 .tri_bank = 1, \
3374 .tri_bit = 4, \ 3374 .tri_bit = 4, \
3375 .einput_reg = PINGROUP_REG_Y(r), \ 3375 .einput_reg = PINGROUP_REG_Y(r), \
3376 .einput_bank = 0, \ 3376 .einput_bank = 1, \
3377 .einput_bit = 5, \ 3377 .einput_bit = 5, \
3378 .odrain_reg = PINGROUP_REG_##od(r), \ 3378 .odrain_reg = PINGROUP_REG_##od(r), \
3379 .odrain_bank = 0, \ 3379 .odrain_bank = 1, \
3380 .odrain_bit = 6, \ 3380 .odrain_bit = 6, \
3381 .lock_reg = PINGROUP_REG_Y(r), \ 3381 .lock_reg = PINGROUP_REG_Y(r), \
3382 .lock_bank = 0, \ 3382 .lock_bank = 1, \
3383 .lock_bit = 7, \ 3383 .lock_bit = 7, \
3384 .ioreset_reg = PINGROUP_REG_##ior(r), \ 3384 .ioreset_reg = PINGROUP_REG_##ior(r), \
3385 .ioreset_bank = 0, \ 3385 .ioreset_bank = 1, \
3386 .ioreset_bit = 8, \ 3386 .ioreset_bit = 8, \
3387 .drv_reg = -1, \ 3387 .drv_reg = -1, \
3388 } 3388 }
@@ -3401,8 +3401,8 @@ static const struct tegra_function tegra30_functions[] = {
3401 .odrain_reg = -1, \ 3401 .odrain_reg = -1, \
3402 .lock_reg = -1, \ 3402 .lock_reg = -1, \
3403 .ioreset_reg = -1, \ 3403 .ioreset_reg = -1, \
3404 .drv_reg = ((r) - PINGROUP_REG_A), \ 3404 .drv_reg = ((r) - DRV_PINGROUP_REG_A), \
3405 .drv_bank = 1, \ 3405 .drv_bank = 0, \
3406 .hsm_bit = hsm_b, \ 3406 .hsm_bit = hsm_b, \
3407 .schmitt_bit = schmitt_b, \ 3407 .schmitt_bit = schmitt_b, \
3408 .lpmd_bit = lpmd_b, \ 3408 .lpmd_bit = lpmd_b, \
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index f8d917d40c92..b9bcaec66223 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -17,8 +17,6 @@
17#include <linux/ioport.h> 17#include <linux/ioport.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/device.h> 19#include <linux/device.h>
20#include <linux/module.h>
21#include <linux/io.h>
22#include <linux/platform_device.h> 20#include <linux/platform_device.h>
23 21
24#include "pinctrl-lantiq.h" 22#include "pinctrl-lantiq.h"
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 39abb150bdd4..84c56881ba80 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -329,7 +329,8 @@ static int acerhdf_bind(struct thermal_zone_device *thermal,
329 if (cdev != cl_dev) 329 if (cdev != cl_dev)
330 return 0; 330 return 0;
331 331
332 if (thermal_zone_bind_cooling_device(thermal, 0, cdev)) { 332 if (thermal_zone_bind_cooling_device(thermal, 0, cdev,
333 THERMAL_NO_LIMIT, THERMAL_NO_LIMIT)) {
333 pr_err("error binding cooling dev\n"); 334 pr_err("error binding cooling dev\n");
334 return -EINVAL; 335 return -EINVAL;
335 } 336 }
@@ -661,7 +662,7 @@ static int acerhdf_register_thermal(void)
661 return -EINVAL; 662 return -EINVAL;
662 663
663 thz_dev = thermal_zone_device_register("acerhdf", 1, 0, NULL, 664 thz_dev = thermal_zone_device_register("acerhdf", 1, 0, NULL,
664 &acerhdf_dev_ops, 0, 0, 0, 665 &acerhdf_dev_ops, 0,
665 (kernelmode) ? interval*1000 : 0); 666 (kernelmode) ? interval*1000 : 0);
666 if (IS_ERR(thz_dev)) 667 if (IS_ERR(thz_dev))
667 return -EINVAL; 668 return -EINVAL;
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 3a27113deda9..c8097616dd62 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -502,7 +502,7 @@ static int mid_thermal_probe(struct platform_device *pdev)
502 goto err; 502 goto err;
503 } 503 }
504 pinfo->tzd[i] = thermal_zone_device_register(name[i], 504 pinfo->tzd[i] = thermal_zone_device_register(name[i],
505 0, 0, td_info, &tzd_ops, 0, 0, 0, 0); 505 0, 0, td_info, &tzd_ops, 0, 0);
506 if (IS_ERR(pinfo->tzd[i])) { 506 if (IS_ERR(pinfo->tzd[i])) {
507 kfree(td_info); 507 kfree(td_info);
508 ret = PTR_ERR(pinfo->tzd[i]); 508 ret = PTR_ERR(pinfo->tzd[i]);
diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c
index d4957b4edb62..24768a27e1d8 100644
--- a/drivers/power/avs/smartreflex.c
+++ b/drivers/power/avs/smartreflex.c
@@ -930,7 +930,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
930 if (!sr_info->base) { 930 if (!sr_info->base) {
931 dev_err(&pdev->dev, "%s: ioremap fail\n", __func__); 931 dev_err(&pdev->dev, "%s: ioremap fail\n", __func__);
932 ret = -ENOMEM; 932 ret = -ENOMEM;
933 goto err_release_region; 933 goto err_free_name;
934 } 934 }
935 935
936 if (irq) 936 if (irq)
@@ -969,7 +969,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
969 dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n", 969 dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n",
970 __func__); 970 __func__);
971 ret = PTR_ERR(sr_info->dbg_dir); 971 ret = PTR_ERR(sr_info->dbg_dir);
972 goto err_free_name; 972 goto err_debugfs;
973 } 973 }
974 974
975 (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, 975 (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR,
@@ -1013,11 +1013,11 @@ static int __init omap_sr_probe(struct platform_device *pdev)
1013 1013
1014err_debugfs: 1014err_debugfs:
1015 debugfs_remove_recursive(sr_info->dbg_dir); 1015 debugfs_remove_recursive(sr_info->dbg_dir);
1016err_free_name:
1017 kfree(sr_info->name);
1018err_iounmap: 1016err_iounmap:
1019 list_del(&sr_info->node); 1017 list_del(&sr_info->node);
1020 iounmap(sr_info->base); 1018 iounmap(sr_info->base);
1019err_free_name:
1020 kfree(sr_info->name);
1021err_release_region: 1021err_release_region:
1022 release_mem_region(mem->start, resource_size(mem)); 1022 release_mem_region(mem->start, resource_size(mem));
1023err_free_devinfo: 1023err_free_devinfo:
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 08cc8a3c15af..2436f1350013 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -201,7 +201,7 @@ static int psy_register_thermal(struct power_supply *psy)
201 for (i = 0; i < psy->num_properties; i++) { 201 for (i = 0; i < psy->num_properties; i++) {
202 if (psy->properties[i] == POWER_SUPPLY_PROP_TEMP) { 202 if (psy->properties[i] == POWER_SUPPLY_PROP_TEMP) {
203 psy->tzd = thermal_zone_device_register(psy->name, 0, 0, 203 psy->tzd = thermal_zone_device_register(psy->name, 0, 0,
204 psy, &psy_tzd_ops, 0, 0, 0, 0); 204 psy, &psy_tzd_ops, 0, 0);
205 if (IS_ERR(psy->tzd)) 205 if (IS_ERR(psy->tzd))
206 return PTR_ERR(psy->tzd); 206 return PTR_ERR(psy->tzd);
207 break; 207 break;
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index d7c6b83097c1..ed81720e7b2b 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -1,6 +1,5 @@
1menuconfig PWM 1menuconfig PWM
2 bool "Pulse-Width Modulation (PWM) Support" 2 bool "Pulse-Width Modulation (PWM) Support"
3 depends on !MACH_JZ4740 && !PUV3_PWM
4 help 3 help
5 Generic Pulse-Width Modulation (PWM) support. 4 Generic Pulse-Width Modulation (PWM) support.
6 5
@@ -29,6 +28,15 @@ menuconfig PWM
29 28
30if PWM 29if PWM
31 30
31config PWM_AB8500
32 tristate "AB8500 PWM support"
33 depends on AB8500_CORE && ARCH_U8500
34 help
35 Generic PWM framework driver for Analog Baseband AB8500.
36
37 To compile this driver as a module, choose M here: the module
38 will be called pwm-ab8500.
39
32config PWM_BFIN 40config PWM_BFIN
33 tristate "Blackfin PWM support" 41 tristate "Blackfin PWM support"
34 depends on BFIN_GPTIMERS 42 depends on BFIN_GPTIMERS
@@ -47,6 +55,16 @@ config PWM_IMX
47 To compile this driver as a module, choose M here: the module 55 To compile this driver as a module, choose M here: the module
48 will be called pwm-imx. 56 will be called pwm-imx.
49 57
58config PWM_JZ4740
59 tristate "Ingenic JZ4740 PWM support"
60 depends on MACH_JZ4740
61 help
62 Generic PWM framework driver for Ingenic JZ4740 based
63 machines.
64
65 To compile this driver as a module, choose M here: the module
66 will be called pwm-jz4740.
67
50config PWM_LPC32XX 68config PWM_LPC32XX
51 tristate "LPC32XX PWM support" 69 tristate "LPC32XX PWM support"
52 depends on ARCH_LPC32XX 70 depends on ARCH_LPC32XX
@@ -67,6 +85,15 @@ config PWM_MXS
67 To compile this driver as a module, choose M here: the module 85 To compile this driver as a module, choose M here: the module
68 will be called pwm-mxs. 86 will be called pwm-mxs.
69 87
88config PWM_PUV3
89 tristate "PKUnity NetBook-0916 PWM support"
90 depends on ARCH_PUV3
91 help
92 Generic PWM framework driver for PKUnity NetBook-0916.
93
94 To compile this driver as a module, choose M here: the module
95 will be called pwm-puv3.
96
70config PWM_PXA 97config PWM_PXA
71 tristate "PXA PWM support" 98 tristate "PXA PWM support"
72 depends on ARCH_PXA 99 depends on ARCH_PXA
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 78f123dca30d..acfe4821c58b 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -1,8 +1,11 @@
1obj-$(CONFIG_PWM) += core.o 1obj-$(CONFIG_PWM) += core.o
2obj-$(CONFIG_PWM_AB8500) += pwm-ab8500.o
2obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o 3obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o
3obj-$(CONFIG_PWM_IMX) += pwm-imx.o 4obj-$(CONFIG_PWM_IMX) += pwm-imx.o
5obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
4obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o 6obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o
5obj-$(CONFIG_PWM_MXS) += pwm-mxs.o 7obj-$(CONFIG_PWM_MXS) += pwm-mxs.o
8obj-$(CONFIG_PWM_PUV3) += pwm-puv3.o
6obj-$(CONFIG_PWM_PXA) += pwm-pxa.o 9obj-$(CONFIG_PWM_PXA) += pwm-pxa.o
7obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o 10obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o
8obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o 11obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index c6e05078d3ad..f5acdaa52707 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -371,7 +371,7 @@ EXPORT_SYMBOL_GPL(pwm_free);
371 */ 371 */
372int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) 372int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
373{ 373{
374 if (!pwm || period_ns == 0 || duty_ns > period_ns) 374 if (!pwm || duty_ns < 0 || period_ns <= 0 || duty_ns > period_ns)
375 return -EINVAL; 375 return -EINVAL;
376 376
377 return pwm->chip->ops->config(pwm->chip, pwm, duty_ns, period_ns); 377 return pwm->chip->ops->config(pwm->chip, pwm, duty_ns, period_ns);
@@ -379,6 +379,28 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
379EXPORT_SYMBOL_GPL(pwm_config); 379EXPORT_SYMBOL_GPL(pwm_config);
380 380
381/** 381/**
382 * pwm_set_polarity() - configure the polarity of a PWM signal
383 * @pwm: PWM device
384 * @polarity: new polarity of the PWM signal
385 *
386 * Note that the polarity cannot be configured while the PWM device is enabled
387 */
388int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity)
389{
390 if (!pwm || !pwm->chip->ops)
391 return -EINVAL;
392
393 if (!pwm->chip->ops->set_polarity)
394 return -ENOSYS;
395
396 if (test_bit(PWMF_ENABLED, &pwm->flags))
397 return -EBUSY;
398
399 return pwm->chip->ops->set_polarity(pwm->chip, pwm, polarity);
400}
401EXPORT_SYMBOL_GPL(pwm_set_polarity);
402
403/**
382 * pwm_enable() - start a PWM output toggling 404 * pwm_enable() - start a PWM output toggling
383 * @pwm: PWM device 405 * @pwm: PWM device
384 */ 406 */
@@ -624,6 +646,64 @@ out:
624} 646}
625EXPORT_SYMBOL_GPL(pwm_put); 647EXPORT_SYMBOL_GPL(pwm_put);
626 648
649static void devm_pwm_release(struct device *dev, void *res)
650{
651 pwm_put(*(struct pwm_device **)res);
652}
653
654/**
655 * devm_pwm_get() - resource managed pwm_get()
656 * @dev: device for PWM consumer
657 * @con_id: consumer name
658 *
659 * This function performs like pwm_get() but the acquired PWM device will
660 * automatically be released on driver detach.
661 */
662struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id)
663{
664 struct pwm_device **ptr, *pwm;
665
666 ptr = devres_alloc(devm_pwm_release, sizeof(**ptr), GFP_KERNEL);
667 if (!ptr)
668 return ERR_PTR(-ENOMEM);
669
670 pwm = pwm_get(dev, con_id);
671 if (!IS_ERR(pwm)) {
672 *ptr = pwm;
673 devres_add(dev, ptr);
674 } else {
675 devres_free(ptr);
676 }
677
678 return pwm;
679}
680EXPORT_SYMBOL_GPL(devm_pwm_get);
681
682static int devm_pwm_match(struct device *dev, void *res, void *data)
683{
684 struct pwm_device **p = res;
685
686 if (WARN_ON(!p || !*p))
687 return 0;
688
689 return *p == data;
690}
691
692/**
693 * devm_pwm_put() - resource managed pwm_put()
694 * @dev: device for PWM consumer
695 * @pwm: PWM device
696 *
697 * Release a PWM previously allocated using devm_pwm_get(). Calling this
698 * function is usually not needed because devm-allocated resources are
699 * automatically released on driver detach.
700 */
701void devm_pwm_put(struct device *dev, struct pwm_device *pwm)
702{
703 WARN_ON(devres_release(dev, devm_pwm_release, devm_pwm_match, pwm));
704}
705EXPORT_SYMBOL_GPL(devm_pwm_put);
706
627#ifdef CONFIG_DEBUG_FS 707#ifdef CONFIG_DEBUG_FS
628static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s) 708static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s)
629{ 709{
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/pwm/pwm-ab8500.c
index d7a9aa14e5d5..cfb72ca873d1 100644
--- a/drivers/misc/ab8500-pwm.c
+++ b/drivers/pwm/pwm-ab8500.c
@@ -24,16 +24,12 @@
24#define ENABLE_PWM 1 24#define ENABLE_PWM 1
25#define DISABLE_PWM 0 25#define DISABLE_PWM 0
26 26
27struct pwm_device { 27struct ab8500_pwm_chip {
28 struct device *dev; 28 struct pwm_chip chip;
29 struct list_head node;
30 const char *label;
31 unsigned int pwm_id;
32}; 29};
33 30
34static LIST_HEAD(pwm_list); 31static int ab8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
35 32 int duty_ns, int period_ns)
36int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
37{ 33{
38 int ret = 0; 34 int ret = 0;
39 unsigned int higher_val, lower_val; 35 unsigned int higher_val, lower_val;
@@ -50,95 +46,94 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
50 */ 46 */
51 higher_val = ((duty_ns & 0x0300) >> 8); 47 higher_val = ((duty_ns & 0x0300) >> 8);
52 48
53 reg = AB8500_PWM_OUT_CTRL1_REG + ((pwm->pwm_id - 1) * 2); 49 reg = AB8500_PWM_OUT_CTRL1_REG + ((chip->base - 1) * 2);
54 50
55 ret = abx500_set_register_interruptible(pwm->dev, AB8500_MISC, 51 ret = abx500_set_register_interruptible(chip->dev, AB8500_MISC,
56 reg, (u8)lower_val); 52 reg, (u8)lower_val);
57 if (ret < 0) 53 if (ret < 0)
58 return ret; 54 return ret;
59 ret = abx500_set_register_interruptible(pwm->dev, AB8500_MISC, 55 ret = abx500_set_register_interruptible(chip->dev, AB8500_MISC,
60 (reg + 1), (u8)higher_val); 56 (reg + 1), (u8)higher_val);
61 57
62 return ret; 58 return ret;
63} 59}
64EXPORT_SYMBOL(pwm_config);
65 60
66int pwm_enable(struct pwm_device *pwm) 61static int ab8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
67{ 62{
68 int ret; 63 int ret;
69 64
70 ret = abx500_mask_and_set_register_interruptible(pwm->dev, 65 ret = abx500_mask_and_set_register_interruptible(chip->dev,
71 AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG, 66 AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
72 1 << (pwm->pwm_id-1), ENABLE_PWM); 67 1 << (chip->base - 1), ENABLE_PWM);
73 if (ret < 0) 68 if (ret < 0)
74 dev_err(pwm->dev, "%s: Failed to disable PWM, Error %d\n", 69 dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n",
75 pwm->label, ret); 70 pwm->label, ret);
76 return ret; 71 return ret;
77} 72}
78EXPORT_SYMBOL(pwm_enable);
79 73
80void pwm_disable(struct pwm_device *pwm) 74static void ab8500_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
81{ 75{
82 int ret; 76 int ret;
83 77
84 ret = abx500_mask_and_set_register_interruptible(pwm->dev, 78 ret = abx500_mask_and_set_register_interruptible(chip->dev,
85 AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG, 79 AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
86 1 << (pwm->pwm_id-1), DISABLE_PWM); 80 1 << (chip->base - 1), DISABLE_PWM);
87 if (ret < 0) 81 if (ret < 0)
88 dev_err(pwm->dev, "%s: Failed to disable PWM, Error %d\n", 82 dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n",
89 pwm->label, ret); 83 pwm->label, ret);
90 return; 84 return;
91} 85}
92EXPORT_SYMBOL(pwm_disable);
93
94struct pwm_device *pwm_request(int pwm_id, const char *label)
95{
96 struct pwm_device *pwm;
97
98 list_for_each_entry(pwm, &pwm_list, node) {
99 if (pwm->pwm_id == pwm_id) {
100 pwm->label = label;
101 pwm->pwm_id = pwm_id;
102 return pwm;
103 }
104 }
105
106 return ERR_PTR(-ENOENT);
107}
108EXPORT_SYMBOL(pwm_request);
109 86
110void pwm_free(struct pwm_device *pwm) 87static const struct pwm_ops ab8500_pwm_ops = {
111{ 88 .config = ab8500_pwm_config,
112 pwm_disable(pwm); 89 .enable = ab8500_pwm_enable,
113} 90 .disable = ab8500_pwm_disable,
114EXPORT_SYMBOL(pwm_free); 91};
115 92
116static int __devinit ab8500_pwm_probe(struct platform_device *pdev) 93static int __devinit ab8500_pwm_probe(struct platform_device *pdev)
117{ 94{
118 struct pwm_device *pwm; 95 struct ab8500_pwm_chip *ab8500;
96 int err;
97
119 /* 98 /*
120 * Nothing to be done in probe, this is required to get the 99 * Nothing to be done in probe, this is required to get the
121 * device which is required for ab8500 read and write 100 * device which is required for ab8500 read and write
122 */ 101 */
123 pwm = kzalloc(sizeof(struct pwm_device), GFP_KERNEL); 102 ab8500 = kzalloc(sizeof(*ab8500), GFP_KERNEL);
124 if (pwm == NULL) { 103 if (ab8500 == NULL) {
125 dev_err(&pdev->dev, "failed to allocate memory\n"); 104 dev_err(&pdev->dev, "failed to allocate memory\n");
126 return -ENOMEM; 105 return -ENOMEM;
127 } 106 }
128 pwm->dev = &pdev->dev; 107
129 pwm->pwm_id = pdev->id; 108 ab8500->chip.dev = &pdev->dev;
130 list_add_tail(&pwm->node, &pwm_list); 109 ab8500->chip.ops = &ab8500_pwm_ops;
131 platform_set_drvdata(pdev, pwm); 110 ab8500->chip.base = pdev->id;
132 dev_dbg(pwm->dev, "pwm probe successful\n"); 111 ab8500->chip.npwm = 1;
112
113 err = pwmchip_add(&ab8500->chip);
114 if (err < 0) {
115 kfree(ab8500);
116 return err;
117 }
118
119 dev_dbg(&pdev->dev, "pwm probe successful\n");
120 platform_set_drvdata(pdev, ab8500);
121
133 return 0; 122 return 0;
134} 123}
135 124
136static int __devexit ab8500_pwm_remove(struct platform_device *pdev) 125static int __devexit ab8500_pwm_remove(struct platform_device *pdev)
137{ 126{
138 struct pwm_device *pwm = platform_get_drvdata(pdev); 127 struct ab8500_pwm_chip *ab8500 = platform_get_drvdata(pdev);
139 list_del(&pwm->node); 128 int err;
129
130 err = pwmchip_remove(&ab8500->chip);
131 if (err < 0)
132 return err;
133
140 dev_dbg(&pdev->dev, "pwm driver removed\n"); 134 dev_dbg(&pdev->dev, "pwm driver removed\n");
141 kfree(pwm); 135 kfree(ab8500);
136
142 return 0; 137 return 0;
143} 138}
144 139
@@ -150,19 +145,8 @@ static struct platform_driver ab8500_pwm_driver = {
150 .probe = ab8500_pwm_probe, 145 .probe = ab8500_pwm_probe,
151 .remove = __devexit_p(ab8500_pwm_remove), 146 .remove = __devexit_p(ab8500_pwm_remove),
152}; 147};
148module_platform_driver(ab8500_pwm_driver);
153 149
154static int __init ab8500_pwm_init(void)
155{
156 return platform_driver_register(&ab8500_pwm_driver);
157}
158
159static void __exit ab8500_pwm_exit(void)
160{
161 platform_driver_unregister(&ab8500_pwm_driver);
162}
163
164subsys_initcall(ab8500_pwm_init);
165module_exit(ab8500_pwm_exit);
166MODULE_AUTHOR("Arun MURTHY <arun.murthy@stericsson.com>"); 150MODULE_AUTHOR("Arun MURTHY <arun.murthy@stericsson.com>");
167MODULE_DESCRIPTION("AB8500 Pulse Width Modulation Driver"); 151MODULE_DESCRIPTION("AB8500 Pulse Width Modulation Driver");
168MODULE_ALIAS("platform:ab8500-pwm"); 152MODULE_ALIAS("platform:ab8500-pwm");
diff --git a/drivers/pwm/pwm-bfin.c b/drivers/pwm/pwm-bfin.c
index d53c4e7941ef..5da8e185e838 100644
--- a/drivers/pwm/pwm-bfin.c
+++ b/drivers/pwm/pwm-bfin.c
@@ -69,9 +69,6 @@ static int bfin_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
69 unsigned long period, duty; 69 unsigned long period, duty;
70 unsigned long long val; 70 unsigned long long val;
71 71
72 if (duty_ns < 0 || duty_ns > period_ns)
73 return -EINVAL;
74
75 val = (unsigned long long)get_sclk() * period_ns; 72 val = (unsigned long long)get_sclk() * period_ns;
76 do_div(val, NSEC_PER_SEC); 73 do_div(val, NSEC_PER_SEC);
77 period = val; 74 period = val;
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c
index 2a0b35333972..8a5d3ae2946a 100644
--- a/drivers/pwm/pwm-imx.c
+++ b/drivers/pwm/pwm-imx.c
@@ -16,8 +16,7 @@
16#include <linux/clk.h> 16#include <linux/clk.h>
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/pwm.h> 18#include <linux/pwm.h>
19#include <mach/hardware.h> 19#include <linux/of_device.h>
20
21 20
22/* i.MX1 and i.MX21 share the same PWM function block: */ 21/* i.MX1 and i.MX21 share the same PWM function block: */
23 22
@@ -25,6 +24,7 @@
25#define MX1_PWMS 0x04 /* PWM Sample Register */ 24#define MX1_PWMS 0x04 /* PWM Sample Register */
26#define MX1_PWMP 0x08 /* PWM Period Register */ 25#define MX1_PWMP 0x08 /* PWM Period Register */
27 26
27#define MX1_PWMC_EN (1 << 4)
28 28
29/* i.MX27, i.MX31, i.MX35 share the same PWM function block: */ 29/* i.MX27, i.MX31, i.MX35 share the same PWM function block: */
30 30
@@ -40,110 +40,165 @@
40#define MX3_PWMCR_EN (1 << 0) 40#define MX3_PWMCR_EN (1 << 0)
41 41
42struct imx_chip { 42struct imx_chip {
43 struct clk *clk; 43 struct clk *clk_per;
44 struct clk *clk_ipg;
44 45
45 int clk_enabled; 46 int enabled;
46 void __iomem *mmio_base; 47 void __iomem *mmio_base;
47 48
48 struct pwm_chip chip; 49 struct pwm_chip chip;
50
51 int (*config)(struct pwm_chip *chip,
52 struct pwm_device *pwm, int duty_ns, int period_ns);
53 void (*set_enable)(struct pwm_chip *chip, bool enable);
49}; 54};
50 55
51#define to_imx_chip(chip) container_of(chip, struct imx_chip, chip) 56#define to_imx_chip(chip) container_of(chip, struct imx_chip, chip)
52 57
53static int imx_pwm_config(struct pwm_chip *chip, 58static int imx_pwm_config_v1(struct pwm_chip *chip,
54 struct pwm_device *pwm, int duty_ns, int period_ns) 59 struct pwm_device *pwm, int duty_ns, int period_ns)
55{ 60{
56 struct imx_chip *imx = to_imx_chip(chip); 61 struct imx_chip *imx = to_imx_chip(chip);
57 62
58 if (!(cpu_is_mx1() || cpu_is_mx21())) { 63 /*
59 unsigned long long c; 64 * The PWM subsystem allows for exact frequencies. However,
60 unsigned long period_cycles, duty_cycles, prescale; 65 * I cannot connect a scope on my device to the PWM line and
61 u32 cr; 66 * thus cannot provide the program the PWM controller
62 67 * exactly. Instead, I'm relying on the fact that the
63 c = clk_get_rate(imx->clk); 68 * Bootloader (u-boot or WinCE+haret) has programmed the PWM
64 c = c * period_ns; 69 * function group already. So I'll just modify the PWM sample
65 do_div(c, 1000000000); 70 * register to follow the ratio of duty_ns vs. period_ns
66 period_cycles = c; 71 * accordingly.
67 72 *
68 prescale = period_cycles / 0x10000 + 1; 73 * This is good enough for programming the brightness of
69 74 * the LCD backlight.
70 period_cycles /= prescale; 75 *
71 c = (unsigned long long)period_cycles * duty_ns; 76 * The real implementation would divide PERCLK[0] first by
72 do_div(c, period_ns); 77 * both the prescaler (/1 .. /128) and then by CLKSEL
73 duty_cycles = c; 78 * (/2 .. /16).
74 79 */
75 /* 80 u32 max = readl(imx->mmio_base + MX1_PWMP);
76 * according to imx pwm RM, the real period value should be 81 u32 p = max * duty_ns / period_ns;
77 * PERIOD value in PWMPR plus 2. 82 writel(max - p, imx->mmio_base + MX1_PWMS);
78 */
79 if (period_cycles > 2)
80 period_cycles -= 2;
81 else
82 period_cycles = 0;
83
84 writel(duty_cycles, imx->mmio_base + MX3_PWMSAR);
85 writel(period_cycles, imx->mmio_base + MX3_PWMPR);
86
87 cr = MX3_PWMCR_PRESCALER(prescale) |
88 MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
89 MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
90
91 if (cpu_is_mx25())
92 cr |= MX3_PWMCR_CLKSRC_IPG;
93 else
94 cr |= MX3_PWMCR_CLKSRC_IPG_HIGH;
95
96 writel(cr, imx->mmio_base + MX3_PWMCR);
97 } else if (cpu_is_mx1() || cpu_is_mx21()) {
98 /* The PWM subsystem allows for exact frequencies. However,
99 * I cannot connect a scope on my device to the PWM line and
100 * thus cannot provide the program the PWM controller
101 * exactly. Instead, I'm relying on the fact that the
102 * Bootloader (u-boot or WinCE+haret) has programmed the PWM
103 * function group already. So I'll just modify the PWM sample
104 * register to follow the ratio of duty_ns vs. period_ns
105 * accordingly.
106 *
107 * This is good enough for programming the brightness of
108 * the LCD backlight.
109 *
110 * The real implementation would divide PERCLK[0] first by
111 * both the prescaler (/1 .. /128) and then by CLKSEL
112 * (/2 .. /16).
113 */
114 u32 max = readl(imx->mmio_base + MX1_PWMP);
115 u32 p = max * duty_ns / period_ns;
116 writel(max - p, imx->mmio_base + MX1_PWMS);
117 } else {
118 BUG();
119 }
120 83
121 return 0; 84 return 0;
122} 85}
123 86
87static void imx_pwm_set_enable_v1(struct pwm_chip *chip, bool enable)
88{
89 struct imx_chip *imx = to_imx_chip(chip);
90 u32 val;
91
92 val = readl(imx->mmio_base + MX1_PWMC);
93
94 if (enable)
95 val |= MX1_PWMC_EN;
96 else
97 val &= ~MX1_PWMC_EN;
98
99 writel(val, imx->mmio_base + MX1_PWMC);
100}
101
102static int imx_pwm_config_v2(struct pwm_chip *chip,
103 struct pwm_device *pwm, int duty_ns, int period_ns)
104{
105 struct imx_chip *imx = to_imx_chip(chip);
106 unsigned long long c;
107 unsigned long period_cycles, duty_cycles, prescale;
108 u32 cr;
109
110 c = clk_get_rate(imx->clk_per);
111 c = c * period_ns;
112 do_div(c, 1000000000);
113 period_cycles = c;
114
115 prescale = period_cycles / 0x10000 + 1;
116
117 period_cycles /= prescale;
118 c = (unsigned long long)period_cycles * duty_ns;
119 do_div(c, period_ns);
120 duty_cycles = c;
121
122 /*
123 * according to imx pwm RM, the real period value should be
124 * PERIOD value in PWMPR plus 2.
125 */
126 if (period_cycles > 2)
127 period_cycles -= 2;
128 else
129 period_cycles = 0;
130
131 writel(duty_cycles, imx->mmio_base + MX3_PWMSAR);
132 writel(period_cycles, imx->mmio_base + MX3_PWMPR);
133
134 cr = MX3_PWMCR_PRESCALER(prescale) |
135 MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
136 MX3_PWMCR_DBGEN | MX3_PWMCR_CLKSRC_IPG_HIGH;
137
138 if (imx->enabled)
139 cr |= MX3_PWMCR_EN;
140
141 writel(cr, imx->mmio_base + MX3_PWMCR);
142
143 return 0;
144}
145
146static void imx_pwm_set_enable_v2(struct pwm_chip *chip, bool enable)
147{
148 struct imx_chip *imx = to_imx_chip(chip);
149 u32 val;
150
151 val = readl(imx->mmio_base + MX3_PWMCR);
152
153 if (enable)
154 val |= MX3_PWMCR_EN;
155 else
156 val &= ~MX3_PWMCR_EN;
157
158 writel(val, imx->mmio_base + MX3_PWMCR);
159}
160
161static int imx_pwm_config(struct pwm_chip *chip,
162 struct pwm_device *pwm, int duty_ns, int period_ns)
163{
164 struct imx_chip *imx = to_imx_chip(chip);
165 int ret;
166
167 ret = clk_prepare_enable(imx->clk_ipg);
168 if (ret)
169 return ret;
170
171 ret = imx->config(chip, pwm, duty_ns, period_ns);
172
173 clk_disable_unprepare(imx->clk_ipg);
174
175 return ret;
176}
177
124static int imx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) 178static int imx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
125{ 179{
126 struct imx_chip *imx = to_imx_chip(chip); 180 struct imx_chip *imx = to_imx_chip(chip);
127 int rc = 0; 181 int ret;
128 182
129 if (!imx->clk_enabled) { 183 ret = clk_prepare_enable(imx->clk_per);
130 rc = clk_prepare_enable(imx->clk); 184 if (ret)
131 if (!rc) 185 return ret;
132 imx->clk_enabled = 1; 186
133 } 187 imx->set_enable(chip, true);
134 return rc; 188
189 imx->enabled = 1;
190
191 return 0;
135} 192}
136 193
137static void imx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) 194static void imx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
138{ 195{
139 struct imx_chip *imx = to_imx_chip(chip); 196 struct imx_chip *imx = to_imx_chip(chip);
140 197
141 writel(0, imx->mmio_base + MX3_PWMCR); 198 imx->set_enable(chip, false);
142 199
143 if (imx->clk_enabled) { 200 clk_disable_unprepare(imx->clk_per);
144 clk_disable_unprepare(imx->clk); 201 imx->enabled = 0;
145 imx->clk_enabled = 0;
146 }
147} 202}
148 203
149static struct pwm_ops imx_pwm_ops = { 204static struct pwm_ops imx_pwm_ops = {
@@ -153,30 +208,66 @@ static struct pwm_ops imx_pwm_ops = {
153 .owner = THIS_MODULE, 208 .owner = THIS_MODULE,
154}; 209};
155 210
211struct imx_pwm_data {
212 int (*config)(struct pwm_chip *chip,
213 struct pwm_device *pwm, int duty_ns, int period_ns);
214 void (*set_enable)(struct pwm_chip *chip, bool enable);
215};
216
217static struct imx_pwm_data imx_pwm_data_v1 = {
218 .config = imx_pwm_config_v1,
219 .set_enable = imx_pwm_set_enable_v1,
220};
221
222static struct imx_pwm_data imx_pwm_data_v2 = {
223 .config = imx_pwm_config_v2,
224 .set_enable = imx_pwm_set_enable_v2,
225};
226
227static const struct of_device_id imx_pwm_dt_ids[] = {
228 { .compatible = "fsl,imx1-pwm", .data = &imx_pwm_data_v1, },
229 { .compatible = "fsl,imx27-pwm", .data = &imx_pwm_data_v2, },
230 { /* sentinel */ }
231};
232MODULE_DEVICE_TABLE(of, imx_pwm_dt_ids);
233
156static int __devinit imx_pwm_probe(struct platform_device *pdev) 234static int __devinit imx_pwm_probe(struct platform_device *pdev)
157{ 235{
236 const struct of_device_id *of_id =
237 of_match_device(imx_pwm_dt_ids, &pdev->dev);
238 struct imx_pwm_data *data;
158 struct imx_chip *imx; 239 struct imx_chip *imx;
159 struct resource *r; 240 struct resource *r;
160 int ret = 0; 241 int ret = 0;
161 242
243 if (!of_id)
244 return -ENODEV;
245
162 imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL); 246 imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL);
163 if (imx == NULL) { 247 if (imx == NULL) {
164 dev_err(&pdev->dev, "failed to allocate memory\n"); 248 dev_err(&pdev->dev, "failed to allocate memory\n");
165 return -ENOMEM; 249 return -ENOMEM;
166 } 250 }
167 251
168 imx->clk = devm_clk_get(&pdev->dev, "pwm"); 252 imx->clk_per = devm_clk_get(&pdev->dev, "per");
253 if (IS_ERR(imx->clk_per)) {
254 dev_err(&pdev->dev, "getting per clock failed with %ld\n",
255 PTR_ERR(imx->clk_per));
256 return PTR_ERR(imx->clk_per);
257 }
169 258
170 if (IS_ERR(imx->clk)) 259 imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
171 return PTR_ERR(imx->clk); 260 if (IS_ERR(imx->clk_ipg)) {
261 dev_err(&pdev->dev, "getting ipg clock failed with %ld\n",
262 PTR_ERR(imx->clk_ipg));
263 return PTR_ERR(imx->clk_ipg);
264 }
172 265
173 imx->chip.ops = &imx_pwm_ops; 266 imx->chip.ops = &imx_pwm_ops;
174 imx->chip.dev = &pdev->dev; 267 imx->chip.dev = &pdev->dev;
175 imx->chip.base = -1; 268 imx->chip.base = -1;
176 imx->chip.npwm = 1; 269 imx->chip.npwm = 1;
177 270
178 imx->clk_enabled = 0;
179
180 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 271 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
181 if (r == NULL) { 272 if (r == NULL) {
182 dev_err(&pdev->dev, "no memory resource defined\n"); 273 dev_err(&pdev->dev, "no memory resource defined\n");
@@ -187,6 +278,10 @@ static int __devinit imx_pwm_probe(struct platform_device *pdev)
187 if (imx->mmio_base == NULL) 278 if (imx->mmio_base == NULL)
188 return -EADDRNOTAVAIL; 279 return -EADDRNOTAVAIL;
189 280
281 data = of_id->data;
282 imx->config = data->config;
283 imx->set_enable = data->set_enable;
284
190 ret = pwmchip_add(&imx->chip); 285 ret = pwmchip_add(&imx->chip);
191 if (ret < 0) 286 if (ret < 0)
192 return ret; 287 return ret;
@@ -208,23 +303,14 @@ static int __devexit imx_pwm_remove(struct platform_device *pdev)
208 303
209static struct platform_driver imx_pwm_driver = { 304static struct platform_driver imx_pwm_driver = {
210 .driver = { 305 .driver = {
211 .name = "mxc_pwm", 306 .name = "imx-pwm",
307 .of_match_table = of_match_ptr(imx_pwm_dt_ids),
212 }, 308 },
213 .probe = imx_pwm_probe, 309 .probe = imx_pwm_probe,
214 .remove = __devexit_p(imx_pwm_remove), 310 .remove = __devexit_p(imx_pwm_remove),
215}; 311};
216 312
217static int __init imx_pwm_init(void) 313module_platform_driver(imx_pwm_driver);
218{
219 return platform_driver_register(&imx_pwm_driver);
220}
221arch_initcall(imx_pwm_init);
222
223static void __exit imx_pwm_exit(void)
224{
225 platform_driver_unregister(&imx_pwm_driver);
226}
227module_exit(imx_pwm_exit);
228 314
229MODULE_LICENSE("GPL v2"); 315MODULE_LICENSE("GPL v2");
230MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); 316MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
new file mode 100644
index 000000000000..10250fcefb98
--- /dev/null
+++ b/drivers/pwm/pwm-jz4740.c
@@ -0,0 +1,221 @@
1/*
2 * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
3 * JZ4740 platform PWM support
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * You should have received a copy of the GNU General Public License along
11 * with this program; if not, write to the Free Software Foundation, Inc.,
12 * 675 Mass Ave, Cambridge, MA 02139, USA.
13 *
14 */
15
16#include <linux/clk.h>
17#include <linux/err.h>
18#include <linux/gpio.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/platform_device.h>
22#include <linux/pwm.h>
23
24#include <asm/mach-jz4740/gpio.h>
25#include <asm/mach-jz4740/timer.h>
26
27#define NUM_PWM 8
28
29static const unsigned int jz4740_pwm_gpio_list[NUM_PWM] = {
30 JZ_GPIO_PWM0,
31 JZ_GPIO_PWM1,
32 JZ_GPIO_PWM2,
33 JZ_GPIO_PWM3,
34 JZ_GPIO_PWM4,
35 JZ_GPIO_PWM5,
36 JZ_GPIO_PWM6,
37 JZ_GPIO_PWM7,
38};
39
40struct jz4740_pwm_chip {
41 struct pwm_chip chip;
42 struct clk *clk;
43};
44
45static inline struct jz4740_pwm_chip *to_jz4740(struct pwm_chip *chip)
46{
47 return container_of(chip, struct jz4740_pwm_chip, chip);
48}
49
50static int jz4740_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
51{
52 unsigned int gpio = jz4740_pwm_gpio_list[pwm->hwpwm];
53 int ret;
54
55 /*
56 * Timers 0 and 1 are used for system tasks, so they are unavailable
57 * for use as PWMs.
58 */
59 if (pwm->hwpwm < 2)
60 return -EBUSY;
61
62 ret = gpio_request(gpio, pwm->label);
63 if (ret) {
64 dev_err(chip->dev, "Failed to request GPIO#%u for PWM: %d\n",
65 gpio, ret);
66 return ret;
67 }
68
69 jz_gpio_set_function(gpio, JZ_GPIO_FUNC_PWM);
70
71 jz4740_timer_start(pwm->hwpwm);
72
73 return 0;
74}
75
76static void jz4740_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
77{
78 unsigned int gpio = jz4740_pwm_gpio_list[pwm->hwpwm];
79
80 jz4740_timer_set_ctrl(pwm->hwpwm, 0);
81
82 jz_gpio_set_function(gpio, JZ_GPIO_FUNC_NONE);
83 gpio_free(gpio);
84
85 jz4740_timer_stop(pwm->hwpwm);
86}
87
88static int jz4740_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
89{
90 uint32_t ctrl = jz4740_timer_get_ctrl(pwm->pwm);
91
92 ctrl |= JZ_TIMER_CTRL_PWM_ENABLE;
93 jz4740_timer_set_ctrl(pwm->hwpwm, ctrl);
94 jz4740_timer_enable(pwm->hwpwm);
95
96 return 0;
97}
98
99static void jz4740_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
100{
101 uint32_t ctrl = jz4740_timer_get_ctrl(pwm->hwpwm);
102
103 ctrl &= ~JZ_TIMER_CTRL_PWM_ENABLE;
104 jz4740_timer_disable(pwm->hwpwm);
105 jz4740_timer_set_ctrl(pwm->hwpwm, ctrl);
106}
107
108static int jz4740_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
109 int duty_ns, int period_ns)
110{
111 struct jz4740_pwm_chip *jz4740 = to_jz4740(pwm->chip);
112 unsigned long long tmp;
113 unsigned long period, duty;
114 unsigned int prescaler = 0;
115 uint16_t ctrl;
116 bool is_enabled;
117
118 tmp = (unsigned long long)clk_get_rate(jz4740->clk) * period_ns;
119 do_div(tmp, 1000000000);
120 period = tmp;
121
122 while (period > 0xffff && prescaler < 6) {
123 period >>= 2;
124 ++prescaler;
125 }
126
127 if (prescaler == 6)
128 return -EINVAL;
129
130 tmp = (unsigned long long)period * duty_ns;
131 do_div(tmp, period_ns);
132 duty = period - tmp;
133
134 if (duty >= period)
135 duty = period - 1;
136
137 is_enabled = jz4740_timer_is_enabled(pwm->hwpwm);
138 if (is_enabled)
139 jz4740_pwm_disable(chip, pwm);
140
141 jz4740_timer_set_count(pwm->hwpwm, 0);
142 jz4740_timer_set_duty(pwm->hwpwm, duty);
143 jz4740_timer_set_period(pwm->hwpwm, period);
144
145 ctrl = JZ_TIMER_CTRL_PRESCALER(prescaler) | JZ_TIMER_CTRL_SRC_EXT |
146 JZ_TIMER_CTRL_PWM_ABBRUPT_SHUTDOWN;
147
148 jz4740_timer_set_ctrl(pwm->hwpwm, ctrl);
149
150 if (is_enabled)
151 jz4740_pwm_enable(chip, pwm);
152
153 return 0;
154}
155
156static const struct pwm_ops jz4740_pwm_ops = {
157 .request = jz4740_pwm_request,
158 .free = jz4740_pwm_free,
159 .config = jz4740_pwm_config,
160 .enable = jz4740_pwm_enable,
161 .disable = jz4740_pwm_disable,
162 .owner = THIS_MODULE,
163};
164
165static int __devinit jz4740_pwm_probe(struct platform_device *pdev)
166{
167 struct jz4740_pwm_chip *jz4740;
168 int ret;
169
170 jz4740 = devm_kzalloc(&pdev->dev, sizeof(*jz4740), GFP_KERNEL);
171 if (!jz4740)
172 return -ENOMEM;
173
174 jz4740->clk = clk_get(NULL, "ext");
175 if (IS_ERR(jz4740->clk))
176 return PTR_ERR(jz4740->clk);
177
178 jz4740->chip.dev = &pdev->dev;
179 jz4740->chip.ops = &jz4740_pwm_ops;
180 jz4740->chip.npwm = NUM_PWM;
181 jz4740->chip.base = -1;
182
183 ret = pwmchip_add(&jz4740->chip);
184 if (ret < 0) {
185 clk_put(jz4740->clk);
186 return ret;
187 }
188
189 platform_set_drvdata(pdev, jz4740);
190
191 return 0;
192}
193
194static int __devexit jz4740_pwm_remove(struct platform_device *pdev)
195{
196 struct jz4740_pwm_chip *jz4740 = platform_get_drvdata(pdev);
197 int ret;
198
199 ret = pwmchip_remove(&jz4740->chip);
200 if (ret < 0)
201 return ret;
202
203 clk_put(jz4740->clk);
204
205 return 0;
206}
207
208static struct platform_driver jz4740_pwm_driver = {
209 .driver = {
210 .name = "jz4740-pwm",
211 .owner = THIS_MODULE,
212 },
213 .probe = jz4740_pwm_probe,
214 .remove = __devexit_p(jz4740_pwm_remove),
215};
216module_platform_driver(jz4740_pwm_driver);
217
218MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
219MODULE_DESCRIPTION("Ingenic JZ4740 PWM driver");
220MODULE_ALIAS("platform:jz4740-pwm");
221MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-puv3.c b/drivers/pwm/pwm-puv3.c
new file mode 100644
index 000000000000..2a93f37c46ad
--- /dev/null
+++ b/drivers/pwm/pwm-puv3.c
@@ -0,0 +1,161 @@
1/*
2 * linux/arch/unicore32/kernel/pwm.c
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
7 * Copyright (C) 2001-2010 Guan Xuetao
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
18#include <linux/err.h>
19#include <linux/clk.h>
20#include <linux/io.h>
21#include <linux/pwm.h>
22
23#include <asm/div64.h>
24#include <mach/hardware.h>
25
26struct puv3_pwm_chip {
27 struct pwm_chip chip;
28 void __iomem *base;
29 struct clk *clk;
30 bool enabled;
31};
32
33static inline struct puv3_pwm_chip *to_puv3(struct pwm_chip *chip)
34{
35 return container_of(chip, struct puv3_pwm_chip, chip);
36}
37
38/*
39 * period_ns = 10^9 * (PRESCALE + 1) * (PV + 1) / PWM_CLK_RATE
40 * duty_ns = 10^9 * (PRESCALE + 1) * DC / PWM_CLK_RATE
41 */
42static int puv3_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
43 int duty_ns, int period_ns)
44{
45 unsigned long period_cycles, prescale, pv, dc;
46 struct puv3_pwm_chip *puv3 = to_puv3(chip);
47 unsigned long long c;
48
49 c = clk_get_rate(puv3->clk);
50 c = c * period_ns;
51 do_div(c, 1000000000);
52 period_cycles = c;
53
54 if (period_cycles < 1)
55 period_cycles = 1;
56
57 prescale = (period_cycles - 1) / 1024;
58 pv = period_cycles / (prescale + 1) - 1;
59
60 if (prescale > 63)
61 return -EINVAL;
62
63 if (duty_ns == period_ns)
64 dc = OST_PWMDCCR_FDCYCLE;
65 else
66 dc = (pv + 1) * duty_ns / period_ns;
67
68 /*
69 * NOTE: the clock to PWM has to be enabled first
70 * before writing to the registers
71 */
72 clk_prepare_enable(puv3->clk);
73
74 writel(prescale, puv3->base + OST_PWM_PWCR);
75 writel(pv - dc, puv3->base + OST_PWM_DCCR);
76 writel(pv, puv3->base + OST_PWM_PCR);
77
78 clk_disable_unprepare(puv3->clk);
79
80 return 0;
81}
82
83static int puv3_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
84{
85 struct puv3_pwm_chip *puv3 = to_puv3(chip);
86
87 return clk_prepare_enable(puv3->clk);
88}
89
90static void puv3_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
91{
92 struct puv3_pwm_chip *puv3 = to_puv3(chip);
93
94 clk_disable_unprepare(puv3->clk);
95}
96
97static const struct pwm_ops puv3_pwm_ops = {
98 .config = puv3_pwm_config,
99 .enable = puv3_pwm_enable,
100 .disable = puv3_pwm_disable,
101 .owner = THIS_MODULE,
102};
103
104static int __devinit pwm_probe(struct platform_device *pdev)
105{
106 struct puv3_pwm_chip *puv3;
107 struct resource *r;
108 int ret;
109
110 puv3 = devm_kzalloc(&pdev->dev, sizeof(*puv3), GFP_KERNEL);
111 if (puv3 == NULL) {
112 dev_err(&pdev->dev, "failed to allocate memory\n");
113 return -ENOMEM;
114 }
115
116 puv3->clk = devm_clk_get(&pdev->dev, "OST_CLK");
117 if (IS_ERR(puv3->clk))
118 return PTR_ERR(puv3->clk);
119
120 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
121 if (r == NULL) {
122 dev_err(&pdev->dev, "no memory resource defined\n");
123 return -ENODEV;
124 }
125
126 puv3->base = devm_request_and_ioremap(&pdev->dev, r);
127 if (puv3->base == NULL)
128 return -EADDRNOTAVAIL;
129
130 puv3->chip.dev = &pdev->dev;
131 puv3->chip.ops = &puv3_pwm_ops;
132 puv3->chip.base = -1;
133 puv3->chip.npwm = 1;
134
135 ret = pwmchip_add(&puv3->chip);
136 if (ret < 0) {
137 dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
138 return ret;
139 }
140
141 platform_set_drvdata(pdev, puv3);
142 return 0;
143}
144
145static int __devexit pwm_remove(struct platform_device *pdev)
146{
147 struct puv3_pwm_chip *puv3 = platform_get_drvdata(pdev);
148
149 return pwmchip_remove(&puv3->chip);
150}
151
152static struct platform_driver puv3_pwm_driver = {
153 .driver = {
154 .name = "PKUnity-v3-PWM",
155 },
156 .probe = pwm_probe,
157 .remove = __devexit_p(pwm_remove),
158};
159module_platform_driver(puv3_pwm_driver);
160
161MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index bd5867a1c700..260c3a88564d 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -70,9 +70,6 @@ static int pxa_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
70 unsigned long offset; 70 unsigned long offset;
71 int rc; 71 int rc;
72 72
73 if (period_ns == 0 || duty_ns > period_ns)
74 return -EINVAL;
75
76 offset = pwm->hwpwm ? 0x10 : 0; 73 offset = pwm->hwpwm ? 0x10 : 0;
77 74
78 c = clk_get_rate(pc->clk); 75 c = clk_get_rate(pc->clk);
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index e5187c0ade9f..023a3bee76e7 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -126,9 +126,6 @@ static int s3c_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
126 if (period_ns > NS_IN_HZ || duty_ns > NS_IN_HZ) 126 if (period_ns > NS_IN_HZ || duty_ns > NS_IN_HZ)
127 return -ERANGE; 127 return -ERANGE;
128 128
129 if (duty_ns > period_ns)
130 return -EINVAL;
131
132 if (period_ns == s3c->period_ns && 129 if (period_ns == s3c->period_ns &&
133 duty_ns == s3c->duty_ns) 130 duty_ns == s3c->duty_ns)
134 return 0; 131 return 0;
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 4b6688909fee..d6d4cf05565e 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -32,6 +32,7 @@
32#define CAP3 0x10 32#define CAP3 0x10
33#define CAP4 0x14 33#define CAP4 0x14
34#define ECCTL2 0x2A 34#define ECCTL2 0x2A
35#define ECCTL2_APWM_POL_LOW BIT(10)
35#define ECCTL2_APWM_MODE BIT(9) 36#define ECCTL2_APWM_MODE BIT(9)
36#define ECCTL2_SYNC_SEL_DISA (BIT(7) | BIT(6)) 37#define ECCTL2_SYNC_SEL_DISA (BIT(7) | BIT(6))
37#define ECCTL2_TSCTR_FREERUN BIT(4) 38#define ECCTL2_TSCTR_FREERUN BIT(4)
@@ -59,7 +60,7 @@ static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
59 unsigned long period_cycles, duty_cycles; 60 unsigned long period_cycles, duty_cycles;
60 unsigned int reg_val; 61 unsigned int reg_val;
61 62
62 if (period_ns < 0 || duty_ns < 0 || period_ns > NSEC_PER_SEC) 63 if (period_ns > NSEC_PER_SEC)
63 return -ERANGE; 64 return -ERANGE;
64 65
65 c = pc->clk_rate; 66 c = pc->clk_rate;
@@ -111,6 +112,26 @@ static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
111 return 0; 112 return 0;
112} 113}
113 114
115static int ecap_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
116 enum pwm_polarity polarity)
117{
118 struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip);
119 unsigned short reg_val;
120
121 pm_runtime_get_sync(pc->chip.dev);
122 reg_val = readw(pc->mmio_base + ECCTL2);
123 if (polarity == PWM_POLARITY_INVERSED)
124 /* Duty cycle defines LOW period of PWM */
125 reg_val |= ECCTL2_APWM_POL_LOW;
126 else
127 /* Duty cycle defines HIGH period of PWM */
128 reg_val &= ~ECCTL2_APWM_POL_LOW;
129
130 writew(reg_val, pc->mmio_base + ECCTL2);
131 pm_runtime_put_sync(pc->chip.dev);
132 return 0;
133}
134
114static int ecap_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) 135static int ecap_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
115{ 136{
116 struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip); 137 struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip);
@@ -157,6 +178,7 @@ static void ecap_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
157static const struct pwm_ops ecap_pwm_ops = { 178static const struct pwm_ops ecap_pwm_ops = {
158 .free = ecap_pwm_free, 179 .free = ecap_pwm_free,
159 .config = ecap_pwm_config, 180 .config = ecap_pwm_config,
181 .set_polarity = ecap_pwm_set_polarity,
160 .enable = ecap_pwm_enable, 182 .enable = ecap_pwm_enable,
161 .disable = ecap_pwm_disable, 183 .disable = ecap_pwm_disable,
162 .owner = THIS_MODULE, 184 .owner = THIS_MODULE,
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index b1996bcd5b78..d3c1dff0a0dc 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -81,6 +81,15 @@
81#define AQCTL_ZRO_FRCHIGH BIT(1) 81#define AQCTL_ZRO_FRCHIGH BIT(1)
82#define AQCTL_ZRO_FRCTOGGLE (BIT(1) | BIT(0)) 82#define AQCTL_ZRO_FRCTOGGLE (BIT(1) | BIT(0))
83 83
84#define AQCTL_CHANA_POLNORMAL (AQCTL_CAU_FRCLOW | AQCTL_PRD_FRCHIGH | \
85 AQCTL_ZRO_FRCHIGH)
86#define AQCTL_CHANA_POLINVERSED (AQCTL_CAU_FRCHIGH | AQCTL_PRD_FRCLOW | \
87 AQCTL_ZRO_FRCLOW)
88#define AQCTL_CHANB_POLNORMAL (AQCTL_CBU_FRCLOW | AQCTL_PRD_FRCHIGH | \
89 AQCTL_ZRO_FRCHIGH)
90#define AQCTL_CHANB_POLINVERSED (AQCTL_CBU_FRCHIGH | AQCTL_PRD_FRCLOW | \
91 AQCTL_ZRO_FRCLOW)
92
84#define AQSFRC_RLDCSF_MASK (BIT(7) | BIT(6)) 93#define AQSFRC_RLDCSF_MASK (BIT(7) | BIT(6))
85#define AQSFRC_RLDCSF_ZRO 0 94#define AQSFRC_RLDCSF_ZRO 0
86#define AQSFRC_RLDCSF_PRD BIT(6) 95#define AQSFRC_RLDCSF_PRD BIT(6)
@@ -105,6 +114,7 @@ struct ehrpwm_pwm_chip {
105 unsigned int clk_rate; 114 unsigned int clk_rate;
106 void __iomem *mmio_base; 115 void __iomem *mmio_base;
107 unsigned long period_cycles[NUM_PWM_CHANNEL]; 116 unsigned long period_cycles[NUM_PWM_CHANNEL];
117 enum pwm_polarity polarity[NUM_PWM_CHANNEL];
108}; 118};
109 119
110static inline struct ehrpwm_pwm_chip *to_ehrpwm_pwm_chip(struct pwm_chip *chip) 120static inline struct ehrpwm_pwm_chip *to_ehrpwm_pwm_chip(struct pwm_chip *chip)
@@ -165,39 +175,37 @@ static int set_prescale_div(unsigned long rqst_prescaler,
165 return 1; 175 return 1;
166} 176}
167 177
168static void configure_chans(struct ehrpwm_pwm_chip *pc, int chan, 178static void configure_polarity(struct ehrpwm_pwm_chip *pc, int chan)
169 unsigned long duty_cycles)
170{ 179{
171 int cmp_reg, aqctl_reg; 180 int aqctl_reg;
172 unsigned short aqctl_val, aqctl_mask; 181 unsigned short aqctl_val, aqctl_mask;
173 182
174 /* 183 /*
175 * Channels can be configured from action qualifier module. 184 * Configure PWM output to HIGH/LOW level on counter
176 * Channel 0 configured with compare A register and for 185 * reaches compare register value and LOW/HIGH level
177 * up-counter mode. 186 * on counter value reaches period register value and
178 * Channel 1 configured with compare B register and for 187 * zero value on counter
179 * up-counter mode.
180 */ 188 */
181 if (chan == 1) { 189 if (chan == 1) {
182 aqctl_reg = AQCTLB; 190 aqctl_reg = AQCTLB;
183 cmp_reg = CMPB;
184 /* Configure PWM Low from compare B value */
185 aqctl_val = AQCTL_CBU_FRCLOW;
186 aqctl_mask = AQCTL_CBU_MASK; 191 aqctl_mask = AQCTL_CBU_MASK;
192
193 if (pc->polarity[chan] == PWM_POLARITY_INVERSED)
194 aqctl_val = AQCTL_CHANB_POLINVERSED;
195 else
196 aqctl_val = AQCTL_CHANB_POLNORMAL;
187 } else { 197 } else {
188 cmp_reg = CMPA;
189 aqctl_reg = AQCTLA; 198 aqctl_reg = AQCTLA;
190 /* Configure PWM Low from compare A value*/
191 aqctl_val = AQCTL_CAU_FRCLOW;
192 aqctl_mask = AQCTL_CAU_MASK; 199 aqctl_mask = AQCTL_CAU_MASK;
200
201 if (pc->polarity[chan] == PWM_POLARITY_INVERSED)
202 aqctl_val = AQCTL_CHANA_POLINVERSED;
203 else
204 aqctl_val = AQCTL_CHANA_POLNORMAL;
193 } 205 }
194 206
195 /* Configure PWM High from period value and zero value */
196 aqctl_val |= AQCTL_PRD_FRCHIGH | AQCTL_ZRO_FRCHIGH;
197 aqctl_mask |= AQCTL_PRD_MASK | AQCTL_ZRO_MASK; 207 aqctl_mask |= AQCTL_PRD_MASK | AQCTL_ZRO_MASK;
198 ehrpwm_modify(pc->mmio_base, aqctl_reg, aqctl_mask, aqctl_val); 208 ehrpwm_modify(pc->mmio_base, aqctl_reg, aqctl_mask, aqctl_val);
199
200 ehrpwm_write(pc->mmio_base, cmp_reg, duty_cycles);
201} 209}
202 210
203/* 211/*
@@ -211,9 +219,9 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
211 unsigned long long c; 219 unsigned long long c;
212 unsigned long period_cycles, duty_cycles; 220 unsigned long period_cycles, duty_cycles;
213 unsigned short ps_divval, tb_divval; 221 unsigned short ps_divval, tb_divval;
214 int i; 222 int i, cmp_reg;
215 223
216 if (period_ns < 0 || duty_ns < 0 || period_ns > NSEC_PER_SEC) 224 if (period_ns > NSEC_PER_SEC)
217 return -ERANGE; 225 return -ERANGE;
218 226
219 c = pc->clk_rate; 227 c = pc->clk_rate;
@@ -278,12 +286,29 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
278 ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CTRMODE_MASK, 286 ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CTRMODE_MASK,
279 TBCTL_CTRMODE_UP); 287 TBCTL_CTRMODE_UP);
280 288
281 /* Configure the channel for duty cycle */ 289 if (pwm->hwpwm == 1)
282 configure_chans(pc, pwm->hwpwm, duty_cycles); 290 /* Channel 1 configured with compare B register */
291 cmp_reg = CMPB;
292 else
293 /* Channel 0 configured with compare A register */
294 cmp_reg = CMPA;
295
296 ehrpwm_write(pc->mmio_base, cmp_reg, duty_cycles);
297
283 pm_runtime_put_sync(chip->dev); 298 pm_runtime_put_sync(chip->dev);
284 return 0; 299 return 0;
285} 300}
286 301
302static int ehrpwm_pwm_set_polarity(struct pwm_chip *chip,
303 struct pwm_device *pwm, enum pwm_polarity polarity)
304{
305 struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
306
307 /* Configuration of polarity in hardware delayed, do at enable */
308 pc->polarity[pwm->hwpwm] = polarity;
309 return 0;
310}
311
287static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) 312static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
288{ 313{
289 struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip); 314 struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
@@ -307,6 +332,9 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
307 332
308 ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val); 333 ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
309 334
335 /* Channels polarity can be configured from action qualifier module */
336 configure_polarity(pc, pwm->hwpwm);
337
310 /* Enable time counter for free_run */ 338 /* Enable time counter for free_run */
311 ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_FREE_RUN); 339 ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_FREE_RUN);
312 return 0; 340 return 0;
@@ -358,6 +386,7 @@ static void ehrpwm_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
358static const struct pwm_ops ehrpwm_pwm_ops = { 386static const struct pwm_ops ehrpwm_pwm_ops = {
359 .free = ehrpwm_pwm_free, 387 .free = ehrpwm_pwm_free,
360 .config = ehrpwm_pwm_config, 388 .config = ehrpwm_pwm_config,
389 .set_polarity = ehrpwm_pwm_set_polarity,
361 .enable = ehrpwm_pwm_enable, 390 .enable = ehrpwm_pwm_enable,
362 .disable = ehrpwm_pwm_disable, 391 .disable = ehrpwm_pwm_disable,
363 .owner = THIS_MODULE, 392 .owner = THIS_MODULE,
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 48e9041dd1e2..07da58bb495c 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -55,9 +55,9 @@ static int rio_mport_phys_table[] = {
55}; 55};
56 56
57 57
58/* 58/**
59 * rio_destid_alloc - Allocate next available destID for given network 59 * rio_destid_alloc - Allocate next available destID for given network
60 * net: RIO network 60 * @net: RIO network
61 * 61 *
62 * Returns next available device destination ID for the specified RIO network. 62 * Returns next available device destination ID for the specified RIO network.
63 * Marks allocated ID as one in use. 63 * Marks allocated ID as one in use.
@@ -69,14 +69,9 @@ static u16 rio_destid_alloc(struct rio_net *net)
69 struct rio_id_table *idtab = &net->destid_table; 69 struct rio_id_table *idtab = &net->destid_table;
70 70
71 spin_lock(&idtab->lock); 71 spin_lock(&idtab->lock);
72 destid = find_next_zero_bit(idtab->table, idtab->max, idtab->next); 72 destid = find_first_zero_bit(idtab->table, idtab->max);
73 if (destid >= idtab->max)
74 destid = find_first_zero_bit(idtab->table, idtab->max);
75 73
76 if (destid < idtab->max) { 74 if (destid < idtab->max) {
77 idtab->next = destid + 1;
78 if (idtab->next >= idtab->max)
79 idtab->next = 0;
80 set_bit(destid, idtab->table); 75 set_bit(destid, idtab->table);
81 destid += idtab->start; 76 destid += idtab->start;
82 } else 77 } else
@@ -86,10 +81,10 @@ static u16 rio_destid_alloc(struct rio_net *net)
86 return (u16)destid; 81 return (u16)destid;
87} 82}
88 83
89/* 84/**
90 * rio_destid_reserve - Reserve the specivied destID 85 * rio_destid_reserve - Reserve the specivied destID
91 * net: RIO network 86 * @net: RIO network
92 * destid: destID to reserve 87 * @destid: destID to reserve
93 * 88 *
94 * Tries to reserve the specified destID. 89 * Tries to reserve the specified destID.
95 * Returns 0 if successfull. 90 * Returns 0 if successfull.
@@ -106,10 +101,10 @@ static int rio_destid_reserve(struct rio_net *net, u16 destid)
106 return oldbit; 101 return oldbit;
107} 102}
108 103
109/* 104/**
110 * rio_destid_free - free a previously allocated destID 105 * rio_destid_free - free a previously allocated destID
111 * net: RIO network 106 * @net: RIO network
112 * destid: destID to free 107 * @destid: destID to free
113 * 108 *
114 * Makes the specified destID available for use. 109 * Makes the specified destID available for use.
115 */ 110 */
@@ -123,9 +118,9 @@ static void rio_destid_free(struct rio_net *net, u16 destid)
123 spin_unlock(&idtab->lock); 118 spin_unlock(&idtab->lock);
124} 119}
125 120
126/* 121/**
127 * rio_destid_first - return first destID in use 122 * rio_destid_first - return first destID in use
128 * net: RIO network 123 * @net: RIO network
129 */ 124 */
130static u16 rio_destid_first(struct rio_net *net) 125static u16 rio_destid_first(struct rio_net *net)
131{ 126{
@@ -142,10 +137,10 @@ static u16 rio_destid_first(struct rio_net *net)
142 return (u16)destid; 137 return (u16)destid;
143} 138}
144 139
145/* 140/**
146 * rio_destid_next - return next destID in use 141 * rio_destid_next - return next destID in use
147 * net: RIO network 142 * @net: RIO network
148 * from: destination ID from which search shall continue 143 * @from: destination ID from which search shall continue
149 */ 144 */
150static u16 rio_destid_next(struct rio_net *net, u16 from) 145static u16 rio_destid_next(struct rio_net *net, u16 from)
151{ 146{
@@ -1163,8 +1158,8 @@ static struct rio_net __devinit *rio_alloc_net(struct rio_mport *port,
1163 1158
1164 net = kzalloc(sizeof(struct rio_net), GFP_KERNEL); 1159 net = kzalloc(sizeof(struct rio_net), GFP_KERNEL);
1165 if (net && do_enum) { 1160 if (net && do_enum) {
1166 net->destid_table.table = kzalloc( 1161 net->destid_table.table = kcalloc(
1167 BITS_TO_LONGS(RIO_MAX_ROUTE_ENTRIES(port->sys_size)) * 1162 BITS_TO_LONGS(RIO_MAX_ROUTE_ENTRIES(port->sys_size)),
1168 sizeof(long), 1163 sizeof(long),
1169 GFP_KERNEL); 1164 GFP_KERNEL);
1170 1165
@@ -1174,7 +1169,6 @@ static struct rio_net __devinit *rio_alloc_net(struct rio_mport *port,
1174 net = NULL; 1169 net = NULL;
1175 } else { 1170 } else {
1176 net->destid_table.start = start; 1171 net->destid_table.start = start;
1177 net->destid_table.next = 0;
1178 net->destid_table.max = 1172 net->destid_table.max =
1179 RIO_MAX_ROUTE_ENTRIES(port->sys_size); 1173 RIO_MAX_ROUTE_ENTRIES(port->sys_size);
1180 spin_lock_init(&net->destid_table.lock); 1174 spin_lock_init(&net->destid_table.lock);
@@ -1391,7 +1385,7 @@ int __devinit rio_disc_mport(struct rio_mport *mport)
1391 while (time_before(jiffies, to_end)) { 1385 while (time_before(jiffies, to_end)) {
1392 if (rio_enum_complete(mport)) 1386 if (rio_enum_complete(mport))
1393 goto enum_done; 1387 goto enum_done;
1394 schedule_timeout_uninterruptible(msecs_to_jiffies(10)); 1388 msleep(10);
1395 } 1389 }
1396 1390
1397 pr_debug("RIO: discovery timeout on mport %d %s\n", 1391 pr_debug("RIO: discovery timeout on mport %d %s\n",
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index d4bd69013c50..c17ae22567e0 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1275,49 +1275,68 @@ static void __devinit disc_work_handler(struct work_struct *_work)
1275 pr_debug("RIO: discovery work for mport %d %s\n", 1275 pr_debug("RIO: discovery work for mport %d %s\n",
1276 work->mport->id, work->mport->name); 1276 work->mport->id, work->mport->name);
1277 rio_disc_mport(work->mport); 1277 rio_disc_mport(work->mport);
1278
1279 kfree(work);
1280} 1278}
1281 1279
1282int __devinit rio_init_mports(void) 1280int __devinit rio_init_mports(void)
1283{ 1281{
1284 struct rio_mport *port; 1282 struct rio_mport *port;
1285 struct rio_disc_work *work; 1283 struct rio_disc_work *work;
1286 int no_disc = 0; 1284 int n = 0;
1285
1286 if (!next_portid)
1287 return -ENODEV;
1287 1288
1289 /*
1290 * First, run enumerations and check if we need to perform discovery
1291 * on any of the registered mports.
1292 */
1288 list_for_each_entry(port, &rio_mports, node) { 1293 list_for_each_entry(port, &rio_mports, node) {
1289 if (port->host_deviceid >= 0) 1294 if (port->host_deviceid >= 0)
1290 rio_enum_mport(port); 1295 rio_enum_mport(port);
1291 else if (!no_disc) { 1296 else
1292 if (!rio_wq) { 1297 n++;
1293 rio_wq = alloc_workqueue("riodisc", 0, 0); 1298 }
1294 if (!rio_wq) { 1299
1295 pr_err("RIO: unable allocate rio_wq\n"); 1300 if (!n)
1296 no_disc = 1; 1301 goto no_disc;
1297 continue; 1302
1298 } 1303 /*
1299 } 1304 * If we have mports that require discovery schedule a discovery work
1300 1305 * for each of them. If the code below fails to allocate needed
1301 work = kzalloc(sizeof *work, GFP_KERNEL); 1306 * resources, exit without error to keep results of enumeration
1302 if (!work) { 1307 * process (if any).
1303 pr_err("RIO: no memory for work struct\n"); 1308 * TODO: Implement restart of dicovery process for all or
1304 no_disc = 1; 1309 * individual discovering mports.
1305 continue; 1310 */
1306 } 1311 rio_wq = alloc_workqueue("riodisc", 0, 0);
1307 1312 if (!rio_wq) {
1308 work->mport = port; 1313 pr_err("RIO: unable allocate rio_wq\n");
1309 INIT_WORK(&work->work, disc_work_handler); 1314 goto no_disc;
1310 queue_work(rio_wq, &work->work);
1311 }
1312 } 1315 }
1313 1316
1314 if (rio_wq) { 1317 work = kcalloc(n, sizeof *work, GFP_KERNEL);
1315 pr_debug("RIO: flush discovery workqueue\n"); 1318 if (!work) {
1316 flush_workqueue(rio_wq); 1319 pr_err("RIO: no memory for work struct\n");
1317 pr_debug("RIO: flush discovery workqueue finished\n");
1318 destroy_workqueue(rio_wq); 1320 destroy_workqueue(rio_wq);
1321 goto no_disc;
1319 } 1322 }
1320 1323
1324 n = 0;
1325 list_for_each_entry(port, &rio_mports, node) {
1326 if (port->host_deviceid < 0) {
1327 work[n].mport = port;
1328 INIT_WORK(&work[n].work, disc_work_handler);
1329 queue_work(rio_wq, &work[n].work);
1330 n++;
1331 }
1332 }
1333
1334 flush_workqueue(rio_wq);
1335 pr_debug("RIO: destroy discovery workqueue\n");
1336 destroy_workqueue(rio_wq);
1337 kfree(work);
1338
1339no_disc:
1321 rio_init(); 1340 rio_init();
1322 1341
1323 return 0; 1342 return 0;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index e069f176a82d..19c03ab2bdcb 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -59,6 +59,7 @@ comment "RTC interfaces"
59config RTC_INTF_SYSFS 59config RTC_INTF_SYSFS
60 boolean "/sys/class/rtc/rtcN (sysfs)" 60 boolean "/sys/class/rtc/rtcN (sysfs)"
61 depends on SYSFS 61 depends on SYSFS
62 default RTC_CLASS
62 help 63 help
63 Say yes here if you want to use your RTCs using sysfs interfaces, 64 Say yes here if you want to use your RTCs using sysfs interfaces,
64 /sys/class/rtc/rtc0 through /sys/.../rtcN. 65 /sys/class/rtc/rtc0 through /sys/.../rtcN.
@@ -68,6 +69,7 @@ config RTC_INTF_SYSFS
68config RTC_INTF_PROC 69config RTC_INTF_PROC
69 boolean "/proc/driver/rtc (procfs for rtcN)" 70 boolean "/proc/driver/rtc (procfs for rtcN)"
70 depends on PROC_FS 71 depends on PROC_FS
72 default RTC_CLASS
71 help 73 help
72 Say yes here if you want to use your system clock RTC through 74 Say yes here if you want to use your system clock RTC through
73 the proc interface, /proc/driver/rtc. 75 the proc interface, /proc/driver/rtc.
@@ -79,6 +81,7 @@ config RTC_INTF_PROC
79 81
80config RTC_INTF_DEV 82config RTC_INTF_DEV
81 boolean "/dev/rtcN (character devices)" 83 boolean "/dev/rtcN (character devices)"
84 default RTC_CLASS
82 help 85 help
83 Say yes here if you want to use your RTCs using the /dev 86 Say yes here if you want to use your RTCs using the /dev
84 interfaces, which "udev" sets up as /dev/rtc0 through 87 interfaces, which "udev" sets up as /dev/rtc0 through
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index 891cd6c61d0a..4eed51044c5d 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -392,6 +392,8 @@ static int dryice_rtc_probe(struct platform_device *pdev)
392 if (imxdi->ioaddr == NULL) 392 if (imxdi->ioaddr == NULL)
393 return -ENOMEM; 393 return -ENOMEM;
394 394
395 spin_lock_init(&imxdi->irq_lock);
396
395 imxdi->irq = platform_get_irq(pdev, 0); 397 imxdi->irq = platform_get_irq(pdev, 0);
396 if (imxdi->irq < 0) 398 if (imxdi->irq < 0)
397 return imxdi->irq; 399 return imxdi->irq;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index a5a55da2a1ac..b6ad0de07930 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -69,23 +69,9 @@ static ssize_t dcssblk_add_store(struct device * dev, struct device_attribute *a
69 size_t count); 69 size_t count);
70static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf, 70static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf,
71 size_t count); 71 size_t count);
72static ssize_t dcssblk_save_store(struct device * dev, struct device_attribute *attr, const char * buf,
73 size_t count);
74static ssize_t dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf);
75static ssize_t dcssblk_shared_store(struct device * dev, struct device_attribute *attr, const char * buf,
76 size_t count);
77static ssize_t dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf);
78static ssize_t dcssblk_seglist_show(struct device *dev,
79 struct device_attribute *attr,
80 char *buf);
81 72
82static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store); 73static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store);
83static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store); 74static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store);
84static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show,
85 dcssblk_save_store);
86static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show,
87 dcssblk_shared_store);
88static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL);
89 75
90static struct device *dcssblk_root_dev; 76static struct device *dcssblk_root_dev;
91 77
@@ -416,6 +402,8 @@ out:
416 up_write(&dcssblk_devices_sem); 402 up_write(&dcssblk_devices_sem);
417 return rc; 403 return rc;
418} 404}
405static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show,
406 dcssblk_shared_store);
419 407
420/* 408/*
421 * device attribute for save operation on current copy 409 * device attribute for save operation on current copy
@@ -476,6 +464,8 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
476 up_write(&dcssblk_devices_sem); 464 up_write(&dcssblk_devices_sem);
477 return count; 465 return count;
478} 466}
467static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show,
468 dcssblk_save_store);
479 469
480/* 470/*
481 * device attribute for showing all segments in a device 471 * device attribute for showing all segments in a device
@@ -502,6 +492,21 @@ dcssblk_seglist_show(struct device *dev, struct device_attribute *attr,
502 up_read(&dcssblk_devices_sem); 492 up_read(&dcssblk_devices_sem);
503 return i; 493 return i;
504} 494}
495static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL);
496
497static struct attribute *dcssblk_dev_attrs[] = {
498 &dev_attr_shared.attr,
499 &dev_attr_save.attr,
500 &dev_attr_seglist.attr,
501 NULL,
502};
503static struct attribute_group dcssblk_dev_attr_group = {
504 .attrs = dcssblk_dev_attrs,
505};
506static const struct attribute_group *dcssblk_dev_attr_groups[] = {
507 &dcssblk_dev_attr_group,
508 NULL,
509};
505 510
506/* 511/*
507 * device attribute for adding devices 512 * device attribute for adding devices
@@ -590,6 +595,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
590 595
591 dev_set_name(&dev_info->dev, dev_info->segment_name); 596 dev_set_name(&dev_info->dev, dev_info->segment_name);
592 dev_info->dev.release = dcssblk_release_segment; 597 dev_info->dev.release = dcssblk_release_segment;
598 dev_info->dev.groups = dcssblk_dev_attr_groups;
593 INIT_LIST_HEAD(&dev_info->lh); 599 INIT_LIST_HEAD(&dev_info->lh);
594 dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK); 600 dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK);
595 if (dev_info->gd == NULL) { 601 if (dev_info->gd == NULL) {
@@ -637,21 +643,10 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
637 * register the device 643 * register the device
638 */ 644 */
639 rc = device_register(&dev_info->dev); 645 rc = device_register(&dev_info->dev);
640 if (rc) {
641 module_put(THIS_MODULE);
642 goto dev_list_del;
643 }
644 get_device(&dev_info->dev);
645 rc = device_create_file(&dev_info->dev, &dev_attr_shared);
646 if (rc)
647 goto unregister_dev;
648 rc = device_create_file(&dev_info->dev, &dev_attr_save);
649 if (rc)
650 goto unregister_dev;
651 rc = device_create_file(&dev_info->dev, &dev_attr_seglist);
652 if (rc) 646 if (rc)
653 goto unregister_dev; 647 goto put_dev;
654 648
649 get_device(&dev_info->dev);
655 add_disk(dev_info->gd); 650 add_disk(dev_info->gd);
656 651
657 switch (dev_info->segment_type) { 652 switch (dev_info->segment_type) {
@@ -668,12 +663,11 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
668 rc = count; 663 rc = count;
669 goto out; 664 goto out;
670 665
671unregister_dev: 666put_dev:
672 list_del(&dev_info->lh); 667 list_del(&dev_info->lh);
673 blk_cleanup_queue(dev_info->dcssblk_queue); 668 blk_cleanup_queue(dev_info->dcssblk_queue);
674 dev_info->gd->queue = NULL; 669 dev_info->gd->queue = NULL;
675 put_disk(dev_info->gd); 670 put_disk(dev_info->gd);
676 device_unregister(&dev_info->dev);
677 list_for_each_entry(seg_info, &dev_info->seg_list, lh) { 671 list_for_each_entry(seg_info, &dev_info->seg_list, lh) {
678 segment_unload(seg_info->segment_name); 672 segment_unload(seg_info->segment_name);
679 } 673 }
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index b4d572f65f07..fd00afd8b850 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -377,7 +377,11 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
377 /* Will be done on the slow path. */ 377 /* Will be done on the slow path. */
378 return -EAGAIN; 378 return -EAGAIN;
379 } 379 }
380 if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) { 380 if (stsch_err(schid, &schib)) {
381 /* Subchannel is not provided. */
382 return -ENXIO;
383 }
384 if (!css_sch_is_valid(&schib)) {
381 /* Unusable - ignore. */ 385 /* Unusable - ignore. */
382 return 0; 386 return 0;
383 } 387 }
@@ -536,6 +540,7 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
536 case -ENOMEM: 540 case -ENOMEM:
537 case -EIO: 541 case -EIO:
538 /* These should abort looping */ 542 /* These should abort looping */
543 idset_sch_del_subseq(slow_subchannel_set, schid);
539 break; 544 break;
540 default: 545 default:
541 rc = 0; 546 rc = 0;
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
index e6d5f8c49524..199bc6791177 100644
--- a/drivers/s390/cio/idset.c
+++ b/drivers/s390/cio/idset.c
@@ -1,9 +1,10 @@
1/* 1/*
2 * Copyright IBM Corp. 2007 2 * Copyright IBM Corp. 2007, 2012
3 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 3 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
4 */ 4 */
5 5
6#include <linux/vmalloc.h> 6#include <linux/vmalloc.h>
7#include <linux/bitmap.h>
7#include <linux/bitops.h> 8#include <linux/bitops.h>
8#include "idset.h" 9#include "idset.h"
9#include "css.h" 10#include "css.h"
@@ -89,6 +90,14 @@ void idset_sch_del(struct idset *set, struct subchannel_id schid)
89 idset_del(set, schid.ssid, schid.sch_no); 90 idset_del(set, schid.ssid, schid.sch_no);
90} 91}
91 92
93/* Clear ids starting from @schid up to end of subchannel set. */
94void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid)
95{
96 int pos = schid.ssid * set->num_id + schid.sch_no;
97
98 bitmap_clear(set->bitmap, pos, set->num_id - schid.sch_no);
99}
100
92int idset_sch_contains(struct idset *set, struct subchannel_id schid) 101int idset_sch_contains(struct idset *set, struct subchannel_id schid)
93{ 102{
94 return idset_contains(set, schid.ssid, schid.sch_no); 103 return idset_contains(set, schid.ssid, schid.sch_no);
@@ -111,20 +120,13 @@ int idset_sch_get_first(struct idset *set, struct subchannel_id *schid)
111 120
112int idset_is_empty(struct idset *set) 121int idset_is_empty(struct idset *set)
113{ 122{
114 int bitnum; 123 return bitmap_empty(set->bitmap, set->num_ssid * set->num_id);
115
116 bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
117 if (bitnum >= set->num_ssid * set->num_id)
118 return 1;
119 return 0;
120} 124}
121 125
122void idset_add_set(struct idset *to, struct idset *from) 126void idset_add_set(struct idset *to, struct idset *from)
123{ 127{
124 unsigned long i, len; 128 int len = min(__BITOPS_WORDS(to->num_ssid * to->num_id),
129 __BITOPS_WORDS(from->num_ssid * from->num_id));
125 130
126 len = min(__BITOPS_WORDS(to->num_ssid * to->num_id), 131 bitmap_or(to->bitmap, to->bitmap, from->bitmap, len);
127 __BITOPS_WORDS(from->num_ssid * from->num_id));
128 for (i = 0; i < len ; i++)
129 to->bitmap[i] |= from->bitmap[i];
130} 132}
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
index 3d943f03591e..06d3bc01bb09 100644
--- a/drivers/s390/cio/idset.h
+++ b/drivers/s390/cio/idset.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 2007 2 * Copyright IBM Corp. 2007, 2012
3 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 3 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
4 */ 4 */
5 5
@@ -17,6 +17,7 @@ void idset_fill(struct idset *set);
17struct idset *idset_sch_new(void); 17struct idset *idset_sch_new(void);
18void idset_sch_add(struct idset *set, struct subchannel_id id); 18void idset_sch_add(struct idset *set, struct subchannel_id id);
19void idset_sch_del(struct idset *set, struct subchannel_id id); 19void idset_sch_del(struct idset *set, struct subchannel_id id);
20void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid);
20int idset_sch_contains(struct idset *set, struct subchannel_id id); 21int idset_sch_contains(struct idset *set, struct subchannel_id id);
21int idset_sch_get_first(struct idset *set, struct subchannel_id *id); 22int idset_sch_get_first(struct idset *set, struct subchannel_id *id);
22int idset_is_empty(struct idset *set); 23int idset_is_empty(struct idset *set);
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index c7275e303a0d..899ffa19f5ec 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -39,7 +39,6 @@
39#include "zcrypt_msgtype6.h" 39#include "zcrypt_msgtype6.h"
40#include "zcrypt_pcixcc.h" 40#include "zcrypt_pcixcc.h"
41#include "zcrypt_cca_key.h" 41#include "zcrypt_cca_key.h"
42#include "zcrypt_msgtype6.h"
43 42
44#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */ 43#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */
45#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */ 44#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 2db409330c21..e67e0258aec5 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1141,11 +1141,12 @@ static int qeth_l2_recover(void *ptr)
1141 dev_info(&card->gdev->dev, 1141 dev_info(&card->gdev->dev,
1142 "Device successfully recovered!\n"); 1142 "Device successfully recovered!\n");
1143 else { 1143 else {
1144 rtnl_lock(); 1144 if (rtnl_trylock()) {
1145 dev_close(card->dev); 1145 dev_close(card->dev);
1146 rtnl_unlock(); 1146 rtnl_unlock();
1147 dev_warn(&card->gdev->dev, "The qeth device driver " 1147 dev_warn(&card->gdev->dev, "The qeth device driver "
1148 "failed to recover an error on the device\n"); 1148 "failed to recover an error on the device\n");
1149 }
1149 } 1150 }
1150 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 1151 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1151 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 1152 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 4cd310cb5bdf..5ba390658498 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3510,11 +3510,12 @@ static int qeth_l3_recover(void *ptr)
3510 dev_info(&card->gdev->dev, 3510 dev_info(&card->gdev->dev,
3511 "Device successfully recovered!\n"); 3511 "Device successfully recovered!\n");
3512 else { 3512 else {
3513 rtnl_lock(); 3513 if (rtnl_trylock()) {
3514 dev_close(card->dev); 3514 dev_close(card->dev);
3515 rtnl_unlock(); 3515 rtnl_unlock();
3516 dev_warn(&card->gdev->dev, "The qeth device driver " 3516 dev_warn(&card->gdev->dev, "The qeth device driver "
3517 "failed to recover an error on the device\n"); 3517 "failed to recover an error on the device\n");
3518 }
3518 } 3519 }
3519 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 3520 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
3520 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 3521 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 207b7d742443..d8f990b6b332 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -157,7 +157,7 @@ static int smsg_pm_restore_thaw(struct device *dev)
157#ifdef CONFIG_PM_DEBUG 157#ifdef CONFIG_PM_DEBUG
158 printk(KERN_WARNING "smsg_pm_restore_thaw\n"); 158 printk(KERN_WARNING "smsg_pm_restore_thaw\n");
159#endif 159#endif
160 if (smsg_path && iucv_path_connected) { 160 if (smsg_path && !iucv_path_connected) {
161 memset(smsg_path, 0, sizeof(*smsg_path)); 161 memset(smsg_path, 0, sizeof(*smsg_path));
162 smsg_path->msglim = 255; 162 smsg_path->msglim = 255;
163 smsg_path->flags = 0; 163 smsg_path->flags = 0;
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 6206a666a8ec..737554c37d9e 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -179,6 +179,7 @@ static void print_SCp(struct scsi_pointer *SCp, const char *prefix, const char *
179 SCp->buffers_residual, suffix); 179 SCp->buffers_residual, suffix);
180} 180}
181 181
182#ifdef CHECK_STRUCTURE
182static void fas216_dumpinfo(FAS216_Info *info) 183static void fas216_dumpinfo(FAS216_Info *info)
183{ 184{
184 static int used = 0; 185 static int used = 0;
@@ -223,7 +224,6 @@ static void fas216_dumpinfo(FAS216_Info *info)
223 info->internal_done, info->magic_end); 224 info->internal_done, info->magic_end);
224} 225}
225 226
226#ifdef CHECK_STRUCTURE
227static void __fas216_checkmagic(FAS216_Info *info, const char *func) 227static void __fas216_checkmagic(FAS216_Info *info, const char *func)
228{ 228{
229 int corruption = 0; 229 int corruption = 0;
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index d25f944b59c2..fc6a5aabf66e 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -21,6 +21,7 @@
21/*#define PSEUDO_DMA*/ 21/*#define PSEUDO_DMA*/
22 22
23#define OAKSCSI_PUBLIC_RELEASE 1 23#define OAKSCSI_PUBLIC_RELEASE 1
24#define DONT_USE_INTR
24 25
25#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) 26#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
26#define NCR5380_local_declare() void __iomem *_base 27#define NCR5380_local_declare() void __iomem *_base
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index b7c326f7a6d0..342d7d9c0997 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -165,6 +165,16 @@ bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
165 bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg); 165 bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg);
166} 166}
167 167
168static void
169bfa_com_fru_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
170{
171 struct bfa_fru_s *fru = BFA_FRU(bfa);
172 struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa);
173
174 bfa_fru_attach(fru, &bfa->ioc, bfa, bfa->trcmod, mincfg);
175 bfa_fru_memclaim(fru, fru_dma->kva_curp, fru_dma->dma_curp, mincfg);
176}
177
168/* 178/*
169 * BFA IOC FC related definitions 179 * BFA IOC FC related definitions
170 */ 180 */
@@ -274,6 +284,15 @@ bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
274 case IOCFC_E_IOC_ENABLED: 284 case IOCFC_E_IOC_ENABLED:
275 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read); 285 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
276 break; 286 break;
287
288 case IOCFC_E_DISABLE:
289 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
290 break;
291
292 case IOCFC_E_STOP:
293 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
294 break;
295
277 case IOCFC_E_IOC_FAILED: 296 case IOCFC_E_IOC_FAILED:
278 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); 297 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
279 break; 298 break;
@@ -298,6 +317,15 @@ bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
298 case IOCFC_E_DCONF_DONE: 317 case IOCFC_E_DCONF_DONE:
299 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait); 318 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait);
300 break; 319 break;
320
321 case IOCFC_E_DISABLE:
322 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
323 break;
324
325 case IOCFC_E_STOP:
326 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
327 break;
328
301 case IOCFC_E_IOC_FAILED: 329 case IOCFC_E_IOC_FAILED:
302 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); 330 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
303 break; 331 break;
@@ -322,6 +350,15 @@ bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
322 case IOCFC_E_CFG_DONE: 350 case IOCFC_E_CFG_DONE:
323 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done); 351 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done);
324 break; 352 break;
353
354 case IOCFC_E_DISABLE:
355 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
356 break;
357
358 case IOCFC_E_STOP:
359 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
360 break;
361
325 case IOCFC_E_IOC_FAILED: 362 case IOCFC_E_IOC_FAILED:
326 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); 363 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
327 break; 364 break;
@@ -433,6 +470,12 @@ bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
433 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe, 470 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe,
434 bfa_iocfc_stop_cb, iocfc->bfa); 471 bfa_iocfc_stop_cb, iocfc->bfa);
435 break; 472 break;
473
474 case IOCFC_E_IOC_ENABLED:
475 case IOCFC_E_DCONF_DONE:
476 case IOCFC_E_CFG_DONE:
477 break;
478
436 default: 479 default:
437 bfa_sm_fault(iocfc->bfa, event); 480 bfa_sm_fault(iocfc->bfa, event);
438 break; 481 break;
@@ -454,6 +497,15 @@ bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
454 case IOCFC_E_IOC_ENABLED: 497 case IOCFC_E_IOC_ENABLED:
455 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait); 498 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
456 break; 499 break;
500
501 case IOCFC_E_DISABLE:
502 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
503 break;
504
505 case IOCFC_E_STOP:
506 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
507 break;
508
457 case IOCFC_E_IOC_FAILED: 509 case IOCFC_E_IOC_FAILED:
458 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); 510 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
459 511
@@ -493,6 +545,13 @@ bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
493 bfa_iocfc_enable_cb, iocfc->bfa); 545 bfa_iocfc_enable_cb, iocfc->bfa);
494 iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; 546 iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
495 break; 547 break;
548 case IOCFC_E_DISABLE:
549 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
550 break;
551
552 case IOCFC_E_STOP:
553 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
554 break;
496 case IOCFC_E_IOC_FAILED: 555 case IOCFC_E_IOC_FAILED:
497 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); 556 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
498 if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) 557 if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
@@ -524,6 +583,10 @@ bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
524 case IOCFC_E_IOC_DISABLED: 583 case IOCFC_E_IOC_DISABLED:
525 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled); 584 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled);
526 break; 585 break;
586 case IOCFC_E_IOC_ENABLED:
587 case IOCFC_E_DCONF_DONE:
588 case IOCFC_E_CFG_DONE:
589 break;
527 default: 590 default:
528 bfa_sm_fault(iocfc->bfa, event); 591 bfa_sm_fault(iocfc->bfa, event);
529 break; 592 break;
@@ -785,19 +848,20 @@ void
785bfa_isr_enable(struct bfa_s *bfa) 848bfa_isr_enable(struct bfa_s *bfa)
786{ 849{
787 u32 umsk; 850 u32 umsk;
788 int pci_func = bfa_ioc_pcifn(&bfa->ioc); 851 int port_id = bfa_ioc_portid(&bfa->ioc);
789 852
790 bfa_trc(bfa, pci_func); 853 bfa_trc(bfa, bfa_ioc_pcifn(&bfa->ioc));
854 bfa_trc(bfa, port_id);
791 855
792 bfa_msix_ctrl_install(bfa); 856 bfa_msix_ctrl_install(bfa);
793 857
794 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { 858 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
795 umsk = __HFN_INT_ERR_MASK_CT2; 859 umsk = __HFN_INT_ERR_MASK_CT2;
796 umsk |= pci_func == 0 ? 860 umsk |= port_id == 0 ?
797 __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2; 861 __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
798 } else { 862 } else {
799 umsk = __HFN_INT_ERR_MASK; 863 umsk = __HFN_INT_ERR_MASK;
800 umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK; 864 umsk |= port_id == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
801 } 865 }
802 866
803 writel(umsk, bfa->iocfc.bfa_regs.intr_status); 867 writel(umsk, bfa->iocfc.bfa_regs.intr_status);
@@ -930,7 +994,8 @@ bfa_iocfc_send_cfg(void *bfa_arg)
930 cfg_info->single_msix_vec = 1; 994 cfg_info->single_msix_vec = 1;
931 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; 995 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
932 cfg_info->num_cqs = cfg->fwcfg.num_cqs; 996 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
933 cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs); 997 cfg_info->num_ioim_reqs = cpu_to_be16(bfa_fcpim_get_throttle_cfg(bfa,
998 cfg->fwcfg.num_ioim_reqs));
934 cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs); 999 cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
935 1000
936 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); 1001 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
@@ -1192,10 +1257,14 @@ bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
1192static void 1257static void
1193bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg) 1258bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg)
1194{ 1259{
1260 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1261 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
1262
1195 bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs); 1263 bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs);
1196 bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs); 1264 bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs);
1197 bfa_rport_res_recfg(bfa, fwcfg->num_rports); 1265 bfa_rport_res_recfg(bfa, fwcfg->num_rports);
1198 bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs); 1266 bfa_fcp_res_recfg(bfa, cpu_to_be16(cfg_info->num_ioim_reqs),
1267 fwcfg->num_ioim_reqs);
1199 bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs); 1268 bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs);
1200} 1269}
1201 1270
@@ -1693,6 +1762,7 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1693 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa); 1762 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
1694 struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa); 1763 struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
1695 struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa); 1764 struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
1765 struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa);
1696 1766
1697 WARN_ON((cfg == NULL) || (meminfo == NULL)); 1767 WARN_ON((cfg == NULL) || (meminfo == NULL));
1698 1768
@@ -1717,6 +1787,8 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1717 bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo()); 1787 bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo());
1718 bfa_mem_dma_setup(meminfo, phy_dma, 1788 bfa_mem_dma_setup(meminfo, phy_dma,
1719 bfa_phy_meminfo(cfg->drvcfg.min_cfg)); 1789 bfa_phy_meminfo(cfg->drvcfg.min_cfg));
1790 bfa_mem_dma_setup(meminfo, fru_dma,
1791 bfa_fru_meminfo(cfg->drvcfg.min_cfg));
1720} 1792}
1721 1793
1722/* 1794/*
@@ -1789,6 +1861,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1789 bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg); 1861 bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg);
1790 bfa_com_diag_attach(bfa); 1862 bfa_com_diag_attach(bfa);
1791 bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg); 1863 bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg);
1864 bfa_com_fru_attach(bfa, cfg->drvcfg.min_cfg);
1792} 1865}
1793 1866
1794/* 1867/*
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
index b5a1595cc0a5..0efdf312b42c 100644
--- a/drivers/scsi/bfa/bfa_defs.h
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -159,10 +159,13 @@ enum bfa_status {
159 BFA_STATUS_BEACON_ON = 72, /* Port Beacon already on */ 159 BFA_STATUS_BEACON_ON = 72, /* Port Beacon already on */
160 BFA_STATUS_ENOFSAVE = 78, /* No saved firmware trace */ 160 BFA_STATUS_ENOFSAVE = 78, /* No saved firmware trace */
161 BFA_STATUS_IOC_DISABLED = 82, /* IOC is already disabled */ 161 BFA_STATUS_IOC_DISABLED = 82, /* IOC is already disabled */
162 BFA_STATUS_ERROR_TRL_ENABLED = 87, /* TRL is enabled */
163 BFA_STATUS_ERROR_QOS_ENABLED = 88, /* QoS is enabled */
162 BFA_STATUS_NO_SFP_DEV = 89, /* No SFP device check or replace SFP */ 164 BFA_STATUS_NO_SFP_DEV = 89, /* No SFP device check or replace SFP */
163 BFA_STATUS_MEMTEST_FAILED = 90, /* Memory test failed contact support */ 165 BFA_STATUS_MEMTEST_FAILED = 90, /* Memory test failed contact support */
164 BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */ 166 BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */
165 BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */ 167 BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */
168 BFA_STATUS_CMD_NOTSUPP_CNA = 146, /* Command not supported for CNA */
166 BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot 169 BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot
167 * configuration */ 170 * configuration */
168 BFA_STATUS_BAD_FWCFG = 156, /* Bad firmware configuration */ 171 BFA_STATUS_BAD_FWCFG = 156, /* Bad firmware configuration */
@@ -184,6 +187,17 @@ enum bfa_status {
184 BFA_STATUS_FAA_ACQ_ADDR = 200, /* Acquiring addr */ 187 BFA_STATUS_FAA_ACQ_ADDR = 200, /* Acquiring addr */
185 BFA_STATUS_ERROR_TRUNK_ENABLED = 203, /* Trunk enabled on adapter */ 188 BFA_STATUS_ERROR_TRUNK_ENABLED = 203, /* Trunk enabled on adapter */
186 BFA_STATUS_MAX_ENTRY_REACHED = 212, /* MAX entry reached */ 189 BFA_STATUS_MAX_ENTRY_REACHED = 212, /* MAX entry reached */
190 BFA_STATUS_TOPOLOGY_LOOP = 230, /* Topology is set to Loop */
191 BFA_STATUS_LOOP_UNSUPP_MEZZ = 231, /* Loop topology is not supported
192 * on mezz cards */
193 BFA_STATUS_INVALID_BW = 233, /* Invalid bandwidth value */
194 BFA_STATUS_QOS_BW_INVALID = 234, /* Invalid QOS bandwidth
195 * configuration */
196 BFA_STATUS_DPORT_ENABLED = 235, /* D-port mode is already enabled */
197 BFA_STATUS_DPORT_DISABLED = 236, /* D-port mode is already disabled */
198 BFA_STATUS_CMD_NOTSUPP_MEZZ = 239, /* Cmd not supported for MEZZ card */
199 BFA_STATUS_FRU_NOT_PRESENT = 240, /* fru module not present */
200 BFA_STATUS_DPORT_ERR = 245, /* D-port mode is enabled */
187 BFA_STATUS_MAX_VAL /* Unknown error code */ 201 BFA_STATUS_MAX_VAL /* Unknown error code */
188}; 202};
189#define bfa_status_t enum bfa_status 203#define bfa_status_t enum bfa_status
@@ -249,6 +263,10 @@ struct bfa_adapter_attr_s {
249 263
250 u8 is_mezz; 264 u8 is_mezz;
251 u8 trunk_capable; 265 u8 trunk_capable;
266 u8 mfg_day; /* manufacturing day */
267 u8 mfg_month; /* manufacturing month */
268 u16 mfg_year; /* manufacturing year */
269 u16 rsvd;
252}; 270};
253 271
254/* 272/*
@@ -499,6 +517,17 @@ struct bfa_ioc_aen_data_s {
499}; 517};
500 518
501/* 519/*
520 * D-port states
521 *
522*/
523enum bfa_dport_state {
524 BFA_DPORT_ST_DISABLED = 0, /* D-port is Disabled */
525 BFA_DPORT_ST_DISABLING = 1, /* D-port is Disabling */
526 BFA_DPORT_ST_ENABLING = 2, /* D-port is Enabling */
527 BFA_DPORT_ST_ENABLED = 3, /* D-port is Enabled */
528};
529
530/*
502 * ---------------------- mfg definitions ------------ 531 * ---------------------- mfg definitions ------------
503 */ 532 */
504 533
@@ -722,7 +751,8 @@ struct bfa_ablk_cfg_pf_s {
722 u8 rsvd[1]; 751 u8 rsvd[1];
723 u16 num_qpairs; 752 u16 num_qpairs;
724 u16 num_vectors; 753 u16 num_vectors;
725 u32 bw; 754 u16 bw_min;
755 u16 bw_max;
726}; 756};
727 757
728struct bfa_ablk_cfg_port_s { 758struct bfa_ablk_cfg_port_s {
@@ -889,11 +919,40 @@ struct sfp_diag_ext_s {
889 u8 ext_status_ctl[2]; 919 u8 ext_status_ctl[2];
890}; 920};
891 921
922/*
923 * Diagnostic: Data Fields -- Address A2h
924 * General Use Fields: User Writable Table - Features's Control Registers
925 * Total 32 bytes
926 */
927struct sfp_usr_eeprom_s {
928 u8 rsvd1[2]; /* 128-129 */
929 u8 ewrap; /* 130 */
930 u8 rsvd2[2]; /* */
931 u8 owrap; /* 133 */
932 u8 rsvd3[2]; /* */
933 u8 prbs; /* 136: PRBS 7 generator */
934 u8 rsvd4[2]; /* */
935 u8 tx_eqz_16; /* 139: TX Equalizer (16xFC) */
936 u8 tx_eqz_8; /* 140: TX Equalizer (8xFC) */
937 u8 rsvd5[2]; /* */
938 u8 rx_emp_16; /* 143: RX Emphasis (16xFC) */
939 u8 rx_emp_8; /* 144: RX Emphasis (8xFC) */
940 u8 rsvd6[2]; /* */
941 u8 tx_eye_adj; /* 147: TX eye Threshold Adjust */
942 u8 rsvd7[3]; /* */
943 u8 tx_eye_qctl; /* 151: TX eye Quality Control */
944 u8 tx_eye_qres; /* 152: TX eye Quality Result */
945 u8 rsvd8[2]; /* */
946 u8 poh[3]; /* 155-157: Power On Hours */
947 u8 rsvd9[2]; /* */
948};
949
892struct sfp_mem_s { 950struct sfp_mem_s {
893 struct sfp_srlid_base_s srlid_base; 951 struct sfp_srlid_base_s srlid_base;
894 struct sfp_srlid_ext_s srlid_ext; 952 struct sfp_srlid_ext_s srlid_ext;
895 struct sfp_diag_base_s diag_base; 953 struct sfp_diag_base_s diag_base;
896 struct sfp_diag_ext_s diag_ext; 954 struct sfp_diag_ext_s diag_ext;
955 struct sfp_usr_eeprom_s usr_eeprom;
897}; 956};
898 957
899/* 958/*
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 36756ce0e58f..ec03c8cd8dac 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -258,6 +258,7 @@ struct bfa_fw_port_lksm_stats_s {
258 u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */ 258 u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */
259 u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */ 259 u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */
260 u32 bbsc_lr; /* LKSM LR tx for credit recovery */ 260 u32 bbsc_lr; /* LKSM LR tx for credit recovery */
261 u32 rsvd;
261}; 262};
262 263
263struct bfa_fw_port_snsm_stats_s { 264struct bfa_fw_port_snsm_stats_s {
@@ -270,6 +271,9 @@ struct bfa_fw_port_snsm_stats_s {
270 u32 sync_lost; /* Sync loss count */ 271 u32 sync_lost; /* Sync loss count */
271 u32 sig_lost; /* Signal loss count */ 272 u32 sig_lost; /* Signal loss count */
272 u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */ 273 u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */
274 u32 adapt_success; /* SNSM adaptation success */
275 u32 adapt_fails; /* SNSM adaptation failures */
276 u32 adapt_ign_fails; /* SNSM adaptation failures ignored */
273}; 277};
274 278
275struct bfa_fw_port_physm_stats_s { 279struct bfa_fw_port_physm_stats_s {
@@ -324,12 +328,46 @@ struct bfa_fw_fcoe_port_stats_s {
324 struct bfa_fw_fip_stats_s fip_stats; 328 struct bfa_fw_fip_stats_s fip_stats;
325}; 329};
326 330
331/**
332 * @brief LPSM statistics
333 */
334struct bfa_fw_lpsm_stats_s {
335 u32 cls_rx; /* LPSM cls_rx */
336 u32 cls_tx; /* LPSM cls_tx */
337 u32 arbf0_rx; /* LPSM abrf0 rcvd */
338 u32 arbf0_tx; /* LPSM abrf0 xmit */
339 u32 init_rx; /* LPSM loop init start */
340 u32 unexp_hwst; /* LPSM unknown hw state */
341 u32 unexp_frame; /* LPSM unknown_frame */
342 u32 unexp_prim; /* LPSM unexpected primitive */
343 u32 prev_alpa_unavail; /* LPSM prev alpa unavailable */
344 u32 alpa_unavail; /* LPSM alpa not available */
345 u32 lip_rx; /* LPSM lip rcvd */
346 u32 lip_f7f7_rx; /* LPSM lip f7f7 rcvd */
347 u32 lip_f8_rx; /* LPSM lip f8 rcvd */
348 u32 lip_f8f7_rx; /* LPSM lip f8f7 rcvd */
349 u32 lip_other_rx; /* LPSM lip other rcvd */
350 u32 lip_tx; /* LPSM lip xmit */
351 u32 retry_tov; /* LPSM retry TOV */
352 u32 lip_tov; /* LPSM LIP wait TOV */
353 u32 idle_tov; /* LPSM idle wait TOV */
354 u32 arbf0_tov; /* LPSM arbfo wait TOV */
355 u32 stop_loop_tov; /* LPSM stop loop wait TOV */
356 u32 lixa_tov; /* LPSM lisa wait TOV */
357 u32 lixx_tov; /* LPSM lilp/lirp wait TOV */
358 u32 cls_tov; /* LPSM cls wait TOV */
359 u32 sler; /* LPSM SLER recvd */
360 u32 failed; /* LPSM failed */
361 u32 success; /* LPSM online */
362};
363
327/* 364/*
328 * IOC firmware FC uport stats 365 * IOC firmware FC uport stats
329 */ 366 */
330struct bfa_fw_fc_uport_stats_s { 367struct bfa_fw_fc_uport_stats_s {
331 struct bfa_fw_port_snsm_stats_s snsm_stats; 368 struct bfa_fw_port_snsm_stats_s snsm_stats;
332 struct bfa_fw_port_lksm_stats_s lksm_stats; 369 struct bfa_fw_port_lksm_stats_s lksm_stats;
370 struct bfa_fw_lpsm_stats_s lpsm_stats;
333}; 371};
334 372
335/* 373/*
@@ -357,11 +395,6 @@ struct bfa_fw_fcxchg_stats_s {
357 u32 ua_state_inv; 395 u32 ua_state_inv;
358}; 396};
359 397
360struct bfa_fw_lpsm_stats_s {
361 u32 cls_rx;
362 u32 cls_tx;
363};
364
365/* 398/*
366 * Trunk statistics 399 * Trunk statistics
367 */ 400 */
@@ -454,7 +487,6 @@ struct bfa_fw_stats_s {
454 struct bfa_fw_io_stats_s io_stats; 487 struct bfa_fw_io_stats_s io_stats;
455 struct bfa_fw_port_stats_s port_stats; 488 struct bfa_fw_port_stats_s port_stats;
456 struct bfa_fw_fcxchg_stats_s fcxchg_stats; 489 struct bfa_fw_fcxchg_stats_s fcxchg_stats;
457 struct bfa_fw_lpsm_stats_s lpsm_stats;
458 struct bfa_fw_lps_stats_s lps_stats; 490 struct bfa_fw_lps_stats_s lps_stats;
459 struct bfa_fw_trunk_stats_s trunk_stats; 491 struct bfa_fw_trunk_stats_s trunk_stats;
460 struct bfa_fw_advsm_stats_s advsm_stats; 492 struct bfa_fw_advsm_stats_s advsm_stats;
@@ -494,13 +526,23 @@ enum bfa_qos_bw_alloc {
494 BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */ 526 BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */
495}; 527};
496#pragma pack(1) 528#pragma pack(1)
529
530struct bfa_qos_bw_s {
531 u8 qos_bw_set;
532 u8 high;
533 u8 med;
534 u8 low;
535};
536
497/* 537/*
498 * QoS attribute returned in QoS Query 538 * QoS attribute returned in QoS Query
499 */ 539 */
500struct bfa_qos_attr_s { 540struct bfa_qos_attr_s {
501 u8 state; /* QoS current state */ 541 u8 state; /* QoS current state */
502 u8 rsvd[3]; 542 u8 rsvd1[3];
503 u32 total_bb_cr; /* Total BB Credits */ 543 u32 total_bb_cr; /* Total BB Credits */
544 struct bfa_qos_bw_s qos_bw; /* QOS bw cfg */
545 struct bfa_qos_bw_s qos_bw_op; /* QOS bw operational */
504}; 546};
505 547
506/* 548/*
@@ -692,7 +734,8 @@ enum bfa_port_states {
692 BFA_PORT_ST_FWMISMATCH = 12, 734 BFA_PORT_ST_FWMISMATCH = 12,
693 BFA_PORT_ST_PREBOOT_DISABLED = 13, 735 BFA_PORT_ST_PREBOOT_DISABLED = 13,
694 BFA_PORT_ST_TOGGLING_QWAIT = 14, 736 BFA_PORT_ST_TOGGLING_QWAIT = 14,
695 BFA_PORT_ST_ACQ_ADDR = 15, 737 BFA_PORT_ST_FAA_MISCONFIG = 15,
738 BFA_PORT_ST_DPORT = 16,
696 BFA_PORT_ST_MAX_STATE, 739 BFA_PORT_ST_MAX_STATE,
697}; 740};
698 741
@@ -714,9 +757,11 @@ enum bfa_port_type {
714 */ 757 */
715enum bfa_port_topology { 758enum bfa_port_topology {
716 BFA_PORT_TOPOLOGY_NONE = 0, /* No valid topology */ 759 BFA_PORT_TOPOLOGY_NONE = 0, /* No valid topology */
717 BFA_PORT_TOPOLOGY_P2P = 1, /* P2P only */ 760 BFA_PORT_TOPOLOGY_P2P_OLD_VER = 1, /* P2P def for older ver */
718 BFA_PORT_TOPOLOGY_LOOP = 2, /* LOOP topology */ 761 BFA_PORT_TOPOLOGY_LOOP = 2, /* LOOP topology */
719 BFA_PORT_TOPOLOGY_AUTO = 3, /* auto topology selection */ 762 BFA_PORT_TOPOLOGY_AUTO_OLD_VER = 3, /* auto def for older ver */
763 BFA_PORT_TOPOLOGY_AUTO = 4, /* auto topology selection */
764 BFA_PORT_TOPOLOGY_P2P = 5, /* P2P only */
720}; 765};
721 766
722/* 767/*
@@ -760,6 +805,7 @@ enum bfa_port_linkstate_rsn {
760 BFA_PORT_LINKSTATE_RSN_LOCAL_FAULT = 9, 805 BFA_PORT_LINKSTATE_RSN_LOCAL_FAULT = 9,
761 BFA_PORT_LINKSTATE_RSN_REMOTE_FAULT = 10, 806 BFA_PORT_LINKSTATE_RSN_REMOTE_FAULT = 10,
762 BFA_PORT_LINKSTATE_RSN_TIMEOUT = 11, 807 BFA_PORT_LINKSTATE_RSN_TIMEOUT = 11,
808 BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG = 12,
763 809
764 810
765 811
@@ -833,6 +879,19 @@ struct bfa_lunmask_cfg_s {
833 struct bfa_lun_mask_s lun_list[MAX_LUN_MASK_CFG]; 879 struct bfa_lun_mask_s lun_list[MAX_LUN_MASK_CFG];
834}; 880};
835 881
882struct bfa_throttle_cfg_s {
883 u16 is_valid;
884 u16 value;
885 u32 rsvd;
886};
887
888struct bfa_defs_fcpim_throttle_s {
889 u16 max_value;
890 u16 cur_value;
891 u16 cfg_value;
892 u16 rsvd;
893};
894
836/* 895/*
837 * Physical port configuration 896 * Physical port configuration
838 */ 897 */
@@ -851,9 +910,10 @@ struct bfa_port_cfg_s {
851 u8 bb_scn; /* BB_SCN value from FLOGI Exchg */ 910 u8 bb_scn; /* BB_SCN value from FLOGI Exchg */
852 u8 bb_scn_state; /* Config state of BB_SCN */ 911 u8 bb_scn_state; /* Config state of BB_SCN */
853 u8 faa_state; /* FAA enabled/disabled */ 912 u8 faa_state; /* FAA enabled/disabled */
854 u8 rsvd[1]; 913 u8 rsvd1;
855 u16 path_tov; /* device path timeout */ 914 u16 path_tov; /* device path timeout */
856 u16 q_depth; /* SCSI Queue depth */ 915 u16 q_depth; /* SCSI Queue depth */
916 struct bfa_qos_bw_s qos_bw; /* QOS bandwidth */
857}; 917};
858#pragma pack() 918#pragma pack()
859 919
@@ -901,7 +961,7 @@ struct bfa_port_attr_s {
901 961
902 /* FCoE specific */ 962 /* FCoE specific */
903 u16 fcoe_vlan; 963 u16 fcoe_vlan;
904 u8 rsvd1[2]; 964 u8 rsvd1[6];
905}; 965};
906 966
907/* 967/*
@@ -971,6 +1031,13 @@ struct bfa_trunk_vc_attr_s {
971 u16 vc_credits[8]; 1031 u16 vc_credits[8];
972}; 1032};
973 1033
1034struct bfa_fcport_loop_info_s {
1035 u8 myalpa; /* alpa claimed */
1036 u8 alpabm_val; /* alpa bitmap valid or not (1 or 0) */
1037 u8 resvd[6];
1038 struct fc_alpabm_s alpabm; /* alpa bitmap */
1039};
1040
974/* 1041/*
975 * Link state information 1042 * Link state information
976 */ 1043 */
@@ -981,13 +1048,18 @@ struct bfa_port_link_s {
981 u8 speed; /* Link speed (1/2/4/8 G) */ 1048 u8 speed; /* Link speed (1/2/4/8 G) */
982 u32 linkstate_opt; /* Linkstate optional data (debug) */ 1049 u32 linkstate_opt; /* Linkstate optional data (debug) */
983 u8 trunked; /* Trunked or not (1 or 0) */ 1050 u8 trunked; /* Trunked or not (1 or 0) */
984 u8 resvd[3]; 1051 u8 resvd[7];
985 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ 1052 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
986 union { 1053 union {
987 struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */ 1054 struct bfa_fcport_loop_info_s loop_info;
988 struct bfa_trunk_vc_attr_s trunk_vc_attr; 1055 union {
989 struct bfa_fcport_fcf_s fcf; /* FCF information (for FCoE) */ 1056 struct bfa_qos_vc_attr_s qos_vc_attr;
990 } vc_fcf; 1057 /* VC info from ELP */
1058 struct bfa_trunk_vc_attr_s trunk_vc_attr;
1059 struct bfa_fcport_fcf_s fcf;
1060 /* FCF information (for FCoE) */
1061 } vc_fcf;
1062 } attr;
991}; 1063};
992#pragma pack() 1064#pragma pack()
993 1065
@@ -1112,6 +1184,9 @@ struct bfa_port_fc_stats_s {
1112 u64 tx_frames; /* Tx frames */ 1184 u64 tx_frames; /* Tx frames */
1113 u64 tx_words; /* Tx words */ 1185 u64 tx_words; /* Tx words */
1114 u64 tx_lip; /* Tx LIP */ 1186 u64 tx_lip; /* Tx LIP */
1187 u64 tx_lip_f7f7; /* Tx LIP_F7F7 */
1188 u64 tx_lip_f8f7; /* Tx LIP_F8F7 */
1189 u64 tx_arbf0; /* Tx ARB F0 */
1115 u64 tx_nos; /* Tx NOS */ 1190 u64 tx_nos; /* Tx NOS */
1116 u64 tx_ols; /* Tx OLS */ 1191 u64 tx_ols; /* Tx OLS */
1117 u64 tx_lr; /* Tx LR */ 1192 u64 tx_lr; /* Tx LR */
@@ -1119,6 +1194,9 @@ struct bfa_port_fc_stats_s {
1119 u64 rx_frames; /* Rx frames */ 1194 u64 rx_frames; /* Rx frames */
1120 u64 rx_words; /* Rx words */ 1195 u64 rx_words; /* Rx words */
1121 u64 lip_count; /* Rx LIP */ 1196 u64 lip_count; /* Rx LIP */
1197 u64 rx_lip_f7f7; /* Rx LIP_F7F7 */
1198 u64 rx_lip_f8f7; /* Rx LIP_F8F7 */
1199 u64 rx_arbf0; /* Rx ARB F0 */
1122 u64 nos_count; /* Rx NOS */ 1200 u64 nos_count; /* Rx NOS */
1123 u64 ols_count; /* Rx OLS */ 1201 u64 ols_count; /* Rx OLS */
1124 u64 lr_count; /* Rx LR */ 1202 u64 lr_count; /* Rx LR */
@@ -1140,6 +1218,7 @@ struct bfa_port_fc_stats_s {
1140 u64 bbsc_frames_lost; /* Credit Recovery-Frames Lost */ 1218 u64 bbsc_frames_lost; /* Credit Recovery-Frames Lost */
1141 u64 bbsc_credits_lost; /* Credit Recovery-Credits Lost */ 1219 u64 bbsc_credits_lost; /* Credit Recovery-Credits Lost */
1142 u64 bbsc_link_resets; /* Credit Recovery-Link Resets */ 1220 u64 bbsc_link_resets; /* Credit Recovery-Link Resets */
1221 u64 loop_timeouts; /* Loop timeouts */
1143}; 1222};
1144 1223
1145/* 1224/*
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index e0beb4d7e264..bea821b98030 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -24,6 +24,7 @@ typedef u64 wwn_t;
24 24
25#define WWN_NULL (0) 25#define WWN_NULL (0)
26#define FC_SYMNAME_MAX 256 /* max name server symbolic name size */ 26#define FC_SYMNAME_MAX 256 /* max name server symbolic name size */
27#define FC_ALPA_MAX 128
27 28
28#pragma pack(1) 29#pragma pack(1)
29 30
@@ -1015,6 +1016,10 @@ struct fc_symname_s {
1015 u8 symname[FC_SYMNAME_MAX]; 1016 u8 symname[FC_SYMNAME_MAX];
1016}; 1017};
1017 1018
1019struct fc_alpabm_s {
1020 u8 alpa_bm[FC_ALPA_MAX / 8];
1021};
1022
1018/* 1023/*
1019 * protocol default timeout values 1024 * protocol default timeout values
1020 */ 1025 */
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index 273cee90b3b4..dce787f6cca2 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -228,6 +228,10 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
228 228
229 memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s)); 229 memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s));
230 230
231 /* For FC AL bb_cr is 0 and altbbcred is 1 */
232 if (!bb_cr)
233 plogi->csp.altbbcred = 1;
234
231 plogi->els_cmd.els_code = els_code; 235 plogi->els_cmd.els_code = els_code;
232 if (els_code == FC_ELS_PLOGI) 236 if (els_code == FC_ELS_PLOGI)
233 fc_els_req_build(fchs, d_id, s_id, ox_id); 237 fc_els_req_build(fchs, d_id, s_id, ox_id);
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 1633963c66ca..27b560962357 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -158,6 +158,7 @@ enum bfa_tskim_event {
158 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */ 158 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
159 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */ 159 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
160 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */ 160 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
161 BFA_TSKIM_SM_UTAG = 10, /* TM completion unknown tag */
161}; 162};
162 163
163/* 164/*
@@ -3036,7 +3037,7 @@ bfa_ioim_abort(struct bfa_ioim_s *ioim)
3036static void 3037static void
3037bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 3038bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3038{ 3039{
3039 bfa_trc(tskim->bfa, event); 3040 bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3040 3041
3041 switch (event) { 3042 switch (event) {
3042 case BFA_TSKIM_SM_START: 3043 case BFA_TSKIM_SM_START:
@@ -3074,7 +3075,7 @@ bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3074static void 3075static void
3075bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 3076bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3076{ 3077{
3077 bfa_trc(tskim->bfa, event); 3078 bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3078 3079
3079 switch (event) { 3080 switch (event) {
3080 case BFA_TSKIM_SM_DONE: 3081 case BFA_TSKIM_SM_DONE:
@@ -3110,7 +3111,7 @@ bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3110static void 3111static void
3111bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 3112bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3112{ 3113{
3113 bfa_trc(tskim->bfa, event); 3114 bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3114 3115
3115 switch (event) { 3116 switch (event) {
3116 case BFA_TSKIM_SM_DONE: 3117 case BFA_TSKIM_SM_DONE:
@@ -3119,6 +3120,7 @@ bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3119 */ 3120 */
3120 break; 3121 break;
3121 3122
3123 case BFA_TSKIM_SM_UTAG:
3122 case BFA_TSKIM_SM_CLEANUP_DONE: 3124 case BFA_TSKIM_SM_CLEANUP_DONE:
3123 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); 3125 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3124 bfa_tskim_cleanup_ios(tskim); 3126 bfa_tskim_cleanup_ios(tskim);
@@ -3138,7 +3140,7 @@ bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3138static void 3140static void
3139bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 3141bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3140{ 3142{
3141 bfa_trc(tskim->bfa, event); 3143 bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3142 3144
3143 switch (event) { 3145 switch (event) {
3144 case BFA_TSKIM_SM_IOS_DONE: 3146 case BFA_TSKIM_SM_IOS_DONE:
@@ -3170,7 +3172,7 @@ bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3170static void 3172static void
3171bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 3173bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3172{ 3174{
3173 bfa_trc(tskim->bfa, event); 3175 bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3174 3176
3175 switch (event) { 3177 switch (event) {
3176 case BFA_TSKIM_SM_QRESUME: 3178 case BFA_TSKIM_SM_QRESUME:
@@ -3207,7 +3209,7 @@ static void
3207bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, 3209bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3208 enum bfa_tskim_event event) 3210 enum bfa_tskim_event event)
3209{ 3211{
3210 bfa_trc(tskim->bfa, event); 3212 bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3211 3213
3212 switch (event) { 3214 switch (event) {
3213 case BFA_TSKIM_SM_DONE: 3215 case BFA_TSKIM_SM_DONE:
@@ -3238,7 +3240,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3238static void 3240static void
3239bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 3241bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3240{ 3242{
3241 bfa_trc(tskim->bfa, event); 3243 bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3242 3244
3243 switch (event) { 3245 switch (event) {
3244 case BFA_TSKIM_SM_HCB: 3246 case BFA_TSKIM_SM_HCB:
@@ -3560,6 +3562,8 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3560 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) { 3562 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3561 bfa_stats(tskim->itnim, tm_cleanup_comps); 3563 bfa_stats(tskim->itnim, tm_cleanup_comps);
3562 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE); 3564 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3565 } else if (rsp->tsk_status == BFI_TSKIM_STS_UTAG) {
3566 bfa_sm_send_event(tskim, BFA_TSKIM_SM_UTAG);
3563 } else { 3567 } else {
3564 bfa_stats(tskim->itnim, tm_fw_rsps); 3568 bfa_stats(tskim->itnim, tm_fw_rsps);
3565 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE); 3569 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
@@ -3699,6 +3703,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3699 struct bfa_mem_dma_s *seg_ptr; 3703 struct bfa_mem_dma_s *seg_ptr;
3700 u16 idx, nsegs, num_io_req; 3704 u16 idx, nsegs, num_io_req;
3701 3705
3706 fcp->max_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
3702 fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs; 3707 fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
3703 fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs; 3708 fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs;
3704 fcp->num_itns = cfg->fwcfg.num_rports; 3709 fcp->num_itns = cfg->fwcfg.num_rports;
@@ -3721,6 +3726,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3721 bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa); 3726 bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
3722 } 3727 }
3723 3728
3729 fcp->throttle_update_required = 1;
3724 bfa_fcpim_attach(fcp, bfad, cfg, pcidev); 3730 bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
3725 3731
3726 bfa_iotag_attach(fcp); 3732 bfa_iotag_attach(fcp);
@@ -3759,23 +3765,33 @@ bfa_fcp_iocdisable(struct bfa_s *bfa)
3759{ 3765{
3760 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); 3766 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3761 3767
3762 /* Enqueue unused ioim resources to free_q */
3763 list_splice_tail_init(&fcp->iotag_unused_q, &fcp->iotag_ioim_free_q);
3764
3765 bfa_fcpim_iocdisable(fcp); 3768 bfa_fcpim_iocdisable(fcp);
3766} 3769}
3767 3770
3768void 3771void
3769bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw) 3772bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw)
3770{ 3773{
3771 struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa); 3774 struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa);
3772 struct list_head *qe; 3775 struct list_head *qe;
3773 int i; 3776 int i;
3774 3777
3778 /* Update io throttle value only once during driver load time */
3779 if (!mod->throttle_update_required)
3780 return;
3781
3775 for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) { 3782 for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
3776 bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe); 3783 bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
3777 list_add_tail(qe, &mod->iotag_unused_q); 3784 list_add_tail(qe, &mod->iotag_unused_q);
3778 } 3785 }
3786
3787 if (mod->num_ioim_reqs != num_ioim_fw) {
3788 bfa_trc(bfa, mod->num_ioim_reqs);
3789 bfa_trc(bfa, num_ioim_fw);
3790 }
3791
3792 mod->max_ioim_reqs = max_ioim_fw;
3793 mod->num_ioim_reqs = num_ioim_fw;
3794 mod->throttle_update_required = 0;
3779} 3795}
3780 3796
3781void 3797void
@@ -3833,3 +3849,88 @@ bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
3833 3849
3834 bfa_mem_kva_curp(fcp) = (u8 *) iotag; 3850 bfa_mem_kva_curp(fcp) = (u8 *) iotag;
3835} 3851}
3852
3853
3854/**
3855 * To send config req, first try to use throttle value from flash
3856 * If 0, then use driver parameter
3857 * We need to use min(flash_val, drv_val) because
3858 * memory allocation was done based on this cfg'd value
3859 */
3860u16
3861bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param)
3862{
3863 u16 tmp;
3864 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3865
3866 /*
3867 * If throttle value from flash is already in effect after driver is
3868 * loaded then until next load, always return current value instead
3869 * of actual flash value
3870 */
3871 if (!fcp->throttle_update_required)
3872 return (u16)fcp->num_ioim_reqs;
3873
3874 tmp = bfa_dconf_read_data_valid(bfa) ? bfa_fcpim_read_throttle(bfa) : 0;
3875 if (!tmp || (tmp > drv_cfg_param))
3876 tmp = drv_cfg_param;
3877
3878 return tmp;
3879}
3880
3881bfa_status_t
3882bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value)
3883{
3884 if (!bfa_dconf_get_min_cfg(bfa)) {
3885 BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.value = value;
3886 BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.is_valid = 1;
3887 return BFA_STATUS_OK;
3888 }
3889
3890 return BFA_STATUS_FAILED;
3891}
3892
3893u16
3894bfa_fcpim_read_throttle(struct bfa_s *bfa)
3895{
3896 struct bfa_throttle_cfg_s *throttle_cfg =
3897 &(BFA_DCONF_MOD(bfa)->dconf->throttle_cfg);
3898
3899 return ((!bfa_dconf_get_min_cfg(bfa)) ?
3900 ((throttle_cfg->is_valid == 1) ? (throttle_cfg->value) : 0) : 0);
3901}
3902
3903bfa_status_t
3904bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value)
3905{
3906 /* in min cfg no commands should run. */
3907 if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
3908 (!bfa_dconf_read_data_valid(bfa)))
3909 return BFA_STATUS_FAILED;
3910
3911 bfa_fcpim_write_throttle(bfa, value);
3912
3913 return bfa_dconf_update(bfa);
3914}
3915
3916bfa_status_t
3917bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf)
3918{
3919 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3920 struct bfa_defs_fcpim_throttle_s throttle;
3921
3922 if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
3923 (!bfa_dconf_read_data_valid(bfa)))
3924 return BFA_STATUS_FAILED;
3925
3926 memset(&throttle, 0, sizeof(struct bfa_defs_fcpim_throttle_s));
3927
3928 throttle.cur_value = (u16)(fcpim->fcp->num_ioim_reqs);
3929 throttle.cfg_value = bfa_fcpim_read_throttle(bfa);
3930 if (!throttle.cfg_value)
3931 throttle.cfg_value = throttle.cur_value;
3932 throttle.max_value = (u16)(fcpim->fcp->max_ioim_reqs);
3933 memcpy(buf, &throttle, sizeof(struct bfa_defs_fcpim_throttle_s));
3934
3935 return BFA_STATUS_OK;
3936}
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 36f26da80f76..e693af6e5930 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -42,7 +42,7 @@ void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
42 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m)); 42 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
43void bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m); 43void bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m);
44void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp); 44void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp);
45void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw); 45void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw);
46 46
47#define BFA_FCP_MOD(_hal) (&(_hal)->modules.fcp_mod) 47#define BFA_FCP_MOD(_hal) (&(_hal)->modules.fcp_mod)
48#define BFA_MEM_FCP_KVA(__bfa) (&(BFA_FCP_MOD(__bfa)->kva_seg)) 48#define BFA_MEM_FCP_KVA(__bfa) (&(BFA_FCP_MOD(__bfa)->kva_seg))
@@ -51,7 +51,9 @@ void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw);
51#define BFA_ITN_FROM_TAG(_fcp, _tag) \ 51#define BFA_ITN_FROM_TAG(_fcp, _tag) \
52 ((_fcp)->itn_arr + ((_tag) & ((_fcp)->num_itns - 1))) 52 ((_fcp)->itn_arr + ((_tag) & ((_fcp)->num_itns - 1)))
53#define BFA_SNSINFO_FROM_TAG(_fcp, _tag) \ 53#define BFA_SNSINFO_FROM_TAG(_fcp, _tag) \
54 bfa_mem_get_dmabuf_kva(_fcp, _tag, BFI_IOIM_SNSLEN) 54 bfa_mem_get_dmabuf_kva(_fcp, (_tag & BFA_IOIM_IOTAG_MASK), \
55 BFI_IOIM_SNSLEN)
56
55 57
56#define BFA_ITNIM_MIN 32 58#define BFA_ITNIM_MIN 32
57#define BFA_ITNIM_MAX 1024 59#define BFA_ITNIM_MAX 1024
@@ -148,6 +150,7 @@ struct bfa_fcp_mod_s {
148 struct list_head iotag_unused_q; /* unused IO resources*/ 150 struct list_head iotag_unused_q; /* unused IO resources*/
149 struct bfa_iotag_s *iotag_arr; 151 struct bfa_iotag_s *iotag_arr;
150 struct bfa_itn_s *itn_arr; 152 struct bfa_itn_s *itn_arr;
153 int max_ioim_reqs;
151 int num_ioim_reqs; 154 int num_ioim_reqs;
152 int num_fwtio_reqs; 155 int num_fwtio_reqs;
153 int num_itns; 156 int num_itns;
@@ -155,6 +158,7 @@ struct bfa_fcp_mod_s {
155 struct bfa_fcpim_s fcpim; 158 struct bfa_fcpim_s fcpim;
156 struct bfa_mem_dma_s dma_seg[BFA_FCP_DMA_SEGS]; 159 struct bfa_mem_dma_s dma_seg[BFA_FCP_DMA_SEGS];
157 struct bfa_mem_kva_s kva_seg; 160 struct bfa_mem_kva_s kva_seg;
161 int throttle_update_required;
158}; 162};
159 163
160/* 164/*
@@ -416,5 +420,10 @@ bfa_status_t bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id,
416bfa_status_t bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, 420bfa_status_t bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id,
417 wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun); 421 wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
418bfa_status_t bfa_fcpim_lunmask_clear(struct bfa_s *bfa); 422bfa_status_t bfa_fcpim_lunmask_clear(struct bfa_s *bfa);
423u16 bfa_fcpim_read_throttle(struct bfa_s *bfa);
424bfa_status_t bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value);
425bfa_status_t bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value);
426bfa_status_t bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf);
427u16 bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param);
419 428
420#endif /* __BFA_FCPIM_H__ */ 429#endif /* __BFA_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index fd3e84d32bd2..d428808fb37e 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -303,16 +303,30 @@ static void
303bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, 303bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
304 enum bfa_fcs_fabric_event event) 304 enum bfa_fcs_fabric_event event)
305{ 305{
306 struct bfa_s *bfa = fabric->fcs->bfa;
307
306 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); 308 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
307 bfa_trc(fabric->fcs, event); 309 bfa_trc(fabric->fcs, event);
308 310
309 switch (event) { 311 switch (event) {
310 case BFA_FCS_FABRIC_SM_START: 312 case BFA_FCS_FABRIC_SM_START:
311 if (bfa_fcport_is_linkup(fabric->fcs->bfa)) { 313 if (!bfa_fcport_is_linkup(fabric->fcs->bfa)) {
314 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
315 break;
316 }
317 if (bfa_fcport_get_topology(bfa) ==
318 BFA_PORT_TOPOLOGY_LOOP) {
319 fabric->fab_type = BFA_FCS_FABRIC_LOOP;
320 fabric->bport.pid = bfa_fcport_get_myalpa(bfa);
321 fabric->bport.pid = bfa_hton3b(fabric->bport.pid);
322 bfa_sm_set_state(fabric,
323 bfa_fcs_fabric_sm_online);
324 bfa_fcs_fabric_set_opertype(fabric);
325 bfa_fcs_lport_online(&fabric->bport);
326 } else {
312 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); 327 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
313 bfa_fcs_fabric_login(fabric); 328 bfa_fcs_fabric_login(fabric);
314 } else 329 }
315 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
316 break; 330 break;
317 331
318 case BFA_FCS_FABRIC_SM_LINK_UP: 332 case BFA_FCS_FABRIC_SM_LINK_UP:
@@ -337,16 +351,28 @@ static void
337bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, 351bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
338 enum bfa_fcs_fabric_event event) 352 enum bfa_fcs_fabric_event event)
339{ 353{
354 struct bfa_s *bfa = fabric->fcs->bfa;
355
340 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); 356 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
341 bfa_trc(fabric->fcs, event); 357 bfa_trc(fabric->fcs, event);
342 358
343 switch (event) { 359 switch (event) {
344 case BFA_FCS_FABRIC_SM_LINK_UP: 360 case BFA_FCS_FABRIC_SM_LINK_UP:
345 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); 361 if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP) {
346 bfa_fcs_fabric_login(fabric); 362 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
363 bfa_fcs_fabric_login(fabric);
364 break;
365 }
366 fabric->fab_type = BFA_FCS_FABRIC_LOOP;
367 fabric->bport.pid = bfa_fcport_get_myalpa(bfa);
368 fabric->bport.pid = bfa_hton3b(fabric->bport.pid);
369 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online);
370 bfa_fcs_fabric_set_opertype(fabric);
371 bfa_fcs_lport_online(&fabric->bport);
347 break; 372 break;
348 373
349 case BFA_FCS_FABRIC_SM_RETRY_OP: 374 case BFA_FCS_FABRIC_SM_RETRY_OP:
375 case BFA_FCS_FABRIC_SM_LOOPBACK:
350 break; 376 break;
351 377
352 case BFA_FCS_FABRIC_SM_DELETE: 378 case BFA_FCS_FABRIC_SM_DELETE:
@@ -595,14 +621,20 @@ void
595bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, 621bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
596 enum bfa_fcs_fabric_event event) 622 enum bfa_fcs_fabric_event event)
597{ 623{
624 struct bfa_s *bfa = fabric->fcs->bfa;
625
598 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); 626 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
599 bfa_trc(fabric->fcs, event); 627 bfa_trc(fabric->fcs, event);
600 628
601 switch (event) { 629 switch (event) {
602 case BFA_FCS_FABRIC_SM_LINK_DOWN: 630 case BFA_FCS_FABRIC_SM_LINK_DOWN:
603 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); 631 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
604 bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); 632 if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) {
605 bfa_fcs_fabric_notify_offline(fabric); 633 bfa_fcs_lport_offline(&fabric->bport);
634 } else {
635 bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
636 bfa_fcs_fabric_notify_offline(fabric);
637 }
606 break; 638 break;
607 639
608 case BFA_FCS_FABRIC_SM_DELETE: 640 case BFA_FCS_FABRIC_SM_DELETE:
@@ -719,20 +751,29 @@ static void
719bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric, 751bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric,
720 enum bfa_fcs_fabric_event event) 752 enum bfa_fcs_fabric_event event)
721{ 753{
754 struct bfa_s *bfa = fabric->fcs->bfa;
755
722 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); 756 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
723 bfa_trc(fabric->fcs, event); 757 bfa_trc(fabric->fcs, event);
724 758
725 switch (event) { 759 switch (event) {
726 case BFA_FCS_FABRIC_SM_STOPCOMP: 760 case BFA_FCS_FABRIC_SM_STOPCOMP:
727 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup); 761 if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) {
728 bfa_sm_send_event(fabric->lps, BFA_LPS_SM_LOGOUT); 762 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
763 } else {
764 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
765 bfa_sm_send_event(fabric->lps, BFA_LPS_SM_LOGOUT);
766 }
729 break; 767 break;
730 768
731 case BFA_FCS_FABRIC_SM_LINK_UP: 769 case BFA_FCS_FABRIC_SM_LINK_UP:
732 break; 770 break;
733 771
734 case BFA_FCS_FABRIC_SM_LINK_DOWN: 772 case BFA_FCS_FABRIC_SM_LINK_DOWN:
735 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup); 773 if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)
774 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
775 else
776 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
736 break; 777 break;
737 778
738 default: 779 default:
@@ -975,9 +1016,6 @@ bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric)
975 struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; 1016 struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg;
976 u8 alpa = 0, bb_scn = 0; 1017 u8 alpa = 0, bb_scn = 0;
977 1018
978 if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)
979 alpa = bfa_fcport_get_myalpa(bfa);
980
981 if (bfa_fcs_fabric_is_bbscn_enabled(fabric) && 1019 if (bfa_fcs_fabric_is_bbscn_enabled(fabric) &&
982 (!fabric->fcs->bbscn_flogi_rjt)) 1020 (!fabric->fcs->bbscn_flogi_rjt))
983 bb_scn = BFA_FCS_PORT_DEF_BB_SCN; 1021 bb_scn = BFA_FCS_PORT_DEF_BB_SCN;
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index 6c4377cb287f..a449706c6bc0 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -118,9 +118,9 @@ struct bfa_fcs_lport_fab_s {
118#define MAX_ALPA_COUNT 127 118#define MAX_ALPA_COUNT 127
119 119
120struct bfa_fcs_lport_loop_s { 120struct bfa_fcs_lport_loop_s {
121 u8 num_alpa; /* Num of ALPA entries in the map */ 121 u8 num_alpa; /* Num of ALPA entries in the map */
122 u8 alpa_pos_map[MAX_ALPA_COUNT]; /* ALPA Positional 122 u8 alpabm_valid; /* alpa bitmap valid or not (1 or 0) */
123 *Map */ 123 u8 alpa_pos_map[MAX_ALPA_COUNT]; /* ALPA Positional Map */
124 struct bfa_fcs_lport_s *port; /* parent port */ 124 struct bfa_fcs_lport_s *port; /* parent port */
125}; 125};
126 126
@@ -175,6 +175,7 @@ enum bfa_fcs_fabric_type {
175 BFA_FCS_FABRIC_UNKNOWN = 0, 175 BFA_FCS_FABRIC_UNKNOWN = 0,
176 BFA_FCS_FABRIC_SWITCHED = 1, 176 BFA_FCS_FABRIC_SWITCHED = 1,
177 BFA_FCS_FABRIC_N2N = 2, 177 BFA_FCS_FABRIC_N2N = 2,
178 BFA_FCS_FABRIC_LOOP = 3,
178}; 179};
179 180
180 181
@@ -350,9 +351,10 @@ void bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg,
350 struct bfa_fcxp_s *fcxp_alloced); 351 struct bfa_fcxp_s *fcxp_alloced);
351void bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *vport); 352void bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *vport);
352void bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *vport); 353void bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *vport);
353void bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *vport); 354void bfa_fcs_lport_fab_scn_online(struct bfa_fcs_lport_s *vport);
354void bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port, 355void bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
355 struct fchs_s *rx_frame, u32 len); 356 struct fchs_s *rx_frame, u32 len);
357void bfa_fcs_lport_lip_scn_online(bfa_fcs_lport_t *port);
356 358
357struct bfa_fcs_vport_s { 359struct bfa_fcs_vport_s {
358 struct list_head qe; /* queue elem */ 360 struct list_head qe; /* queue elem */
@@ -453,6 +455,7 @@ struct bfa_fcs_rport_s {
453 struct bfa_rport_stats_s stats; /* rport stats */ 455 struct bfa_rport_stats_s stats; /* rport stats */
454 enum bfa_rport_function scsi_function; /* Initiator/Target */ 456 enum bfa_rport_function scsi_function; /* Initiator/Target */
455 struct bfa_fcs_rpf_s rpf; /* Rport features module */ 457 struct bfa_fcs_rpf_s rpf; /* Rport features module */
458 bfa_boolean_t scn_online; /* SCN online flag */
456}; 459};
457 460
458static inline struct bfa_rport_s * 461static inline struct bfa_rport_s *
@@ -639,9 +642,9 @@ struct bfa_fcs_fdmi_hba_attr_s {
639 u8 model[16]; 642 u8 model[16];
640 u8 model_desc[256]; 643 u8 model_desc[256];
641 u8 hw_version[8]; 644 u8 hw_version[8];
642 u8 driver_version[8]; 645 u8 driver_version[BFA_VERSION_LEN];
643 u8 option_rom_ver[BFA_VERSION_LEN]; 646 u8 option_rom_ver[BFA_VERSION_LEN];
644 u8 fw_version[8]; 647 u8 fw_version[BFA_VERSION_LEN];
645 u8 os_name[256]; 648 u8 os_name[256];
646 __be32 max_ct_pyld; 649 __be32 max_ct_pyld;
647}; 650};
@@ -733,7 +736,7 @@ enum rport_event {
733 RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */ 736 RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */
734 RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */ 737 RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */
735 RPSM_EVENT_DELETE = 7, /* RPORT delete request */ 738 RPSM_EVENT_DELETE = 7, /* RPORT delete request */
736 RPSM_EVENT_SCN = 8, /* state change notification */ 739 RPSM_EVENT_FAB_SCN = 8, /* state change notification */
737 RPSM_EVENT_ACCEPTED = 9, /* Good response from remote device */ 740 RPSM_EVENT_ACCEPTED = 9, /* Good response from remote device */
738 RPSM_EVENT_FAILED = 10, /* Request to rport failed. */ 741 RPSM_EVENT_FAILED = 10, /* Request to rport failed. */
739 RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */ 742 RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */
@@ -744,7 +747,9 @@ enum rport_event {
744 RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */ 747 RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */
745 RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */ 748 RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */
746 RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continuously */ 749 RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continuously */
747 RPSM_EVENT_FC4_FCS_ONLINE = 19, /*!< FC-4 FCS online complete */ 750 RPSM_EVENT_SCN_OFFLINE = 19, /* loop scn offline */
751 RPSM_EVENT_SCN_ONLINE = 20, /* loop scn online */
752 RPSM_EVENT_FC4_FCS_ONLINE = 21, /* FC-4 FCS online complete */
748}; 753};
749 754
750/* 755/*
@@ -763,7 +768,7 @@ enum bfa_fcs_itnim_event {
763 BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */ 768 BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */
764 BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */ 769 BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */
765 BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */ 770 BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */
766 BFA_FCS_ITNIM_SM_HAL_ONLINE = 13, /*!< bfa rport online event */ 771 BFA_FCS_ITNIM_SM_HAL_ONLINE = 13, /* bfa rport online event */
767}; 772};
768 773
769/* 774/*
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 3b75f6fb2de1..1224d0462a49 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -23,6 +23,34 @@
23 23
24BFA_TRC_FILE(FCS, PORT); 24BFA_TRC_FILE(FCS, PORT);
25 25
26/*
27 * ALPA to LIXA bitmap mapping
28 *
29 * ALPA 0x00 (Word 0, Bit 30) is invalid for N_Ports. Also Word 0 Bit 31
30 * is for L_bit (login required) and is filled as ALPA 0x00 here.
31 */
32static const u8 loop_alpa_map[] = {
33 0x00, 0x00, 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, /* Word 0 Bits 31..24 */
34 0x17, 0x18, 0x1B, 0x1D, 0x1E, 0x1F, 0x23, 0x25, /* Word 0 Bits 23..16 */
35 0x26, 0x27, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, /* Word 0 Bits 15..08 */
36 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x39, 0x3A, /* Word 0 Bits 07..00 */
37
38 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49, 0x4A, 0x4B, /* Word 1 Bits 31..24 */
39 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54, 0x55, /* Word 1 Bits 23..16 */
40 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67, /* Word 1 Bits 15..08 */
41 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, /* Word 1 Bits 07..00 */
42
43 0x73, 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, /* Word 2 Bits 31..24 */
44 0x81, 0x82, 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, /* Word 2 Bits 23..16 */
45 0x9B, 0x9D, 0x9E, 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, /* Word 2 Bits 15..08 */
46 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xB1, 0xB2, /* Word 2 Bits 07..00 */
47
48 0xB3, 0xB4, 0xB5, 0xB6, 0xB9, 0xBA, 0xBC, 0xC3, /* Word 3 Bits 31..24 */
49 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, /* Word 3 Bits 23..16 */
50 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD9, /* Word 3 Bits 15..08 */
51 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF, /* Word 3 Bits 07..00 */
52};
53
26static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, 54static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port,
27 struct fchs_s *rx_fchs, u8 reason_code, 55 struct fchs_s *rx_fchs, u8 reason_code,
28 u8 reason_code_expl); 56 u8 reason_code_expl);
@@ -51,6 +79,10 @@ static void bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port);
51static void bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port); 79static void bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port);
52static void bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port); 80static void bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port);
53 81
82static void bfa_fcs_lport_loop_init(struct bfa_fcs_lport_s *port);
83static void bfa_fcs_lport_loop_online(struct bfa_fcs_lport_s *port);
84static void bfa_fcs_lport_loop_offline(struct bfa_fcs_lport_s *port);
85
54static struct { 86static struct {
55 void (*init) (struct bfa_fcs_lport_s *port); 87 void (*init) (struct bfa_fcs_lport_s *port);
56 void (*online) (struct bfa_fcs_lport_s *port); 88 void (*online) (struct bfa_fcs_lport_s *port);
@@ -62,7 +94,9 @@ static struct {
62 bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online, 94 bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
63 bfa_fcs_lport_fab_offline}, { 95 bfa_fcs_lport_fab_offline}, {
64 bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online, 96 bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
65 bfa_fcs_lport_n2n_offline}, 97 bfa_fcs_lport_n2n_offline}, {
98 bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
99 bfa_fcs_lport_loop_offline},
66 }; 100 };
67 101
68/* 102/*
@@ -1127,7 +1161,7 @@ static void
1127bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port) 1161bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port)
1128{ 1162{
1129 bfa_fcs_lport_ns_online(port); 1163 bfa_fcs_lport_ns_online(port);
1130 bfa_fcs_lport_scn_online(port); 1164 bfa_fcs_lport_fab_scn_online(port);
1131} 1165}
1132 1166
1133/* 1167/*
@@ -1221,6 +1255,98 @@ bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port)
1221 n2n_port->reply_oxid = 0; 1255 n2n_port->reply_oxid = 0;
1222} 1256}
1223 1257
1258void
1259bfa_fcport_get_loop_attr(struct bfa_fcs_lport_s *port)
1260{
1261 int i = 0, j = 0, bit = 0, alpa_bit = 0;
1262 u8 k = 0;
1263 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(port->fcs->bfa);
1264
1265 port->port_topo.ploop.alpabm_valid = fcport->alpabm_valid;
1266 port->pid = fcport->myalpa;
1267 port->pid = bfa_hton3b(port->pid);
1268
1269 for (i = 0; i < (FC_ALPA_MAX / 8); i++) {
1270 for (j = 0, alpa_bit = 0; j < 8; j++, alpa_bit++) {
1271 bfa_trc(port->fcs->bfa, fcport->alpabm.alpa_bm[i]);
1272 bit = (fcport->alpabm.alpa_bm[i] & (1 << (7 - j)));
1273 if (bit) {
1274 port->port_topo.ploop.alpa_pos_map[k] =
1275 loop_alpa_map[(i * 8) + alpa_bit];
1276 k++;
1277 bfa_trc(port->fcs->bfa, k);
1278 bfa_trc(port->fcs->bfa,
1279 port->port_topo.ploop.alpa_pos_map[k]);
1280 }
1281 }
1282 }
1283 port->port_topo.ploop.num_alpa = k;
1284}
1285
1286/*
1287 * Called by fcs/port to initialize Loop topology.
1288 */
1289static void
1290bfa_fcs_lport_loop_init(struct bfa_fcs_lport_s *port)
1291{
1292}
1293
1294/*
1295 * Called by fcs/port to notify transition to online state.
1296 */
1297static void
1298bfa_fcs_lport_loop_online(struct bfa_fcs_lport_s *port)
1299{
1300 u8 num_alpa = 0, alpabm_valid = 0;
1301 struct bfa_fcs_rport_s *rport;
1302 u8 *alpa_map = NULL;
1303 int i = 0;
1304 u32 pid;
1305
1306 bfa_fcport_get_loop_attr(port);
1307
1308 num_alpa = port->port_topo.ploop.num_alpa;
1309 alpabm_valid = port->port_topo.ploop.alpabm_valid;
1310 alpa_map = port->port_topo.ploop.alpa_pos_map;
1311
1312 bfa_trc(port->fcs->bfa, port->pid);
1313 bfa_trc(port->fcs->bfa, num_alpa);
1314 if (alpabm_valid == 1) {
1315 for (i = 0; i < num_alpa; i++) {
1316 bfa_trc(port->fcs->bfa, alpa_map[i]);
1317 if (alpa_map[i] != bfa_hton3b(port->pid)) {
1318 pid = alpa_map[i];
1319 bfa_trc(port->fcs->bfa, pid);
1320 rport = bfa_fcs_lport_get_rport_by_pid(port,
1321 bfa_hton3b(pid));
1322 if (!rport)
1323 rport = bfa_fcs_rport_create(port,
1324 bfa_hton3b(pid));
1325 }
1326 }
1327 } else {
1328 for (i = 0; i < MAX_ALPA_COUNT; i++) {
1329 if (alpa_map[i] != port->pid) {
1330 pid = loop_alpa_map[i];
1331 bfa_trc(port->fcs->bfa, pid);
1332 rport = bfa_fcs_lport_get_rport_by_pid(port,
1333 bfa_hton3b(pid));
1334 if (!rport)
1335 rport = bfa_fcs_rport_create(port,
1336 bfa_hton3b(pid));
1337 }
1338 }
1339 }
1340}
1341
1342/*
1343 * Called by fcs/port to notify transition to offline state.
1344 */
1345static void
1346bfa_fcs_lport_loop_offline(struct bfa_fcs_lport_s *port)
1347{
1348}
1349
1224#define BFA_FCS_FDMI_CMD_MAX_RETRIES 2 1350#define BFA_FCS_FDMI_CMD_MAX_RETRIES 2
1225 1351
1226/* 1352/*
@@ -1888,13 +2014,10 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1888 sizeof(templen)); 2014 sizeof(templen));
1889 } 2015 }
1890 2016
1891 /*
1892 * f/w Version = driver version
1893 */
1894 attr = (struct fdmi_attr_s *) curr_ptr; 2017 attr = (struct fdmi_attr_s *) curr_ptr;
1895 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION); 2018 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION);
1896 templen = (u16) strlen(fcs_hba_attr->driver_version); 2019 templen = (u16) strlen(fcs_hba_attr->fw_version);
1897 memcpy(attr->value, fcs_hba_attr->driver_version, templen); 2020 memcpy(attr->value, fcs_hba_attr->fw_version, templen);
1898 templen = fc_roundup(templen, sizeof(u32)); 2021 templen = fc_roundup(templen, sizeof(u32));
1899 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; 2022 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
1900 len += templen; 2023 len += templen;
@@ -2296,6 +2419,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2296{ 2419{
2297 struct bfa_fcs_lport_s *port = fdmi->ms->port; 2420 struct bfa_fcs_lport_s *port = fdmi->ms->port;
2298 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; 2421 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
2422 struct bfa_fcs_fdmi_port_attr_s fcs_port_attr;
2299 2423
2300 memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s)); 2424 memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
2301 2425
@@ -2331,7 +2455,9 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2331 sizeof(driver_info->host_os_patch)); 2455 sizeof(driver_info->host_os_patch));
2332 } 2456 }
2333 2457
2334 hba_attr->max_ct_pyld = cpu_to_be32(FC_MAX_PDUSZ); 2458 /* Retrieve the max frame size from the port attr */
2459 bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
2460 hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size;
2335} 2461}
2336 2462
2337static void 2463static void
@@ -2391,7 +2517,7 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2391 /* 2517 /*
2392 * Max PDU Size. 2518 * Max PDU Size.
2393 */ 2519 */
2394 port_attr->max_frm_size = cpu_to_be32(FC_MAX_PDUSZ); 2520 port_attr->max_frm_size = cpu_to_be32(pport_attr.pport_cfg.maxfrsize);
2395 2521
2396 /* 2522 /*
2397 * OS device Name 2523 * OS device Name
@@ -5199,7 +5325,7 @@ bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *port)
5199} 5325}
5200 5326
5201void 5327void
5202bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *port) 5328bfa_fcs_lport_fab_scn_online(struct bfa_fcs_lport_s *port)
5203{ 5329{
5204 struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port); 5330 struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
5205 5331
@@ -5621,6 +5747,15 @@ bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port)
5621} 5747}
5622 5748
5623/* 5749/*
5750 * Let new loop map create missing rports
5751 */
5752void
5753bfa_fcs_lport_lip_scn_online(struct bfa_fcs_lport_s *port)
5754{
5755 bfa_fcs_lport_loop_online(port);
5756}
5757
5758/*
5624 * FCS virtual port state machine 5759 * FCS virtual port state machine
5625 */ 5760 */
5626 5761
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index cc43b2a58ce3..58ac643ba9f3 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -106,9 +106,13 @@ static void bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
106 enum rport_event event); 106 enum rport_event event);
107static void bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, 107static void bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport,
108 enum rport_event event); 108 enum rport_event event);
109static void bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, 109static void bfa_fcs_rport_sm_adisc_online_sending(
110 enum rport_event event); 110 struct bfa_fcs_rport_s *rport, enum rport_event event);
111static void bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, 111static void bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport,
112 enum rport_event event);
113static void bfa_fcs_rport_sm_adisc_offline_sending(struct bfa_fcs_rport_s
114 *rport, enum rport_event event);
115static void bfa_fcs_rport_sm_adisc_offline(struct bfa_fcs_rport_s *rport,
112 enum rport_event event); 116 enum rport_event event);
113static void bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, 117static void bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
114 enum rport_event event); 118 enum rport_event event);
@@ -150,8 +154,10 @@ static struct bfa_sm_table_s rport_sm_table[] = {
150 {BFA_SM(bfa_fcs_rport_sm_online), BFA_RPORT_ONLINE}, 154 {BFA_SM(bfa_fcs_rport_sm_online), BFA_RPORT_ONLINE},
151 {BFA_SM(bfa_fcs_rport_sm_nsquery_sending), BFA_RPORT_NSQUERY}, 155 {BFA_SM(bfa_fcs_rport_sm_nsquery_sending), BFA_RPORT_NSQUERY},
152 {BFA_SM(bfa_fcs_rport_sm_nsquery), BFA_RPORT_NSQUERY}, 156 {BFA_SM(bfa_fcs_rport_sm_nsquery), BFA_RPORT_NSQUERY},
153 {BFA_SM(bfa_fcs_rport_sm_adisc_sending), BFA_RPORT_ADISC}, 157 {BFA_SM(bfa_fcs_rport_sm_adisc_online_sending), BFA_RPORT_ADISC},
154 {BFA_SM(bfa_fcs_rport_sm_adisc), BFA_RPORT_ADISC}, 158 {BFA_SM(bfa_fcs_rport_sm_adisc_online), BFA_RPORT_ADISC},
159 {BFA_SM(bfa_fcs_rport_sm_adisc_offline_sending), BFA_RPORT_ADISC},
160 {BFA_SM(bfa_fcs_rport_sm_adisc_offline), BFA_RPORT_ADISC},
155 {BFA_SM(bfa_fcs_rport_sm_fc4_logorcv), BFA_RPORT_LOGORCV}, 161 {BFA_SM(bfa_fcs_rport_sm_fc4_logorcv), BFA_RPORT_LOGORCV},
156 {BFA_SM(bfa_fcs_rport_sm_fc4_logosend), BFA_RPORT_LOGO}, 162 {BFA_SM(bfa_fcs_rport_sm_fc4_logosend), BFA_RPORT_LOGO},
157 {BFA_SM(bfa_fcs_rport_sm_fc4_offline), BFA_RPORT_OFFLINE}, 163 {BFA_SM(bfa_fcs_rport_sm_fc4_offline), BFA_RPORT_OFFLINE},
@@ -231,10 +237,19 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
231 bfa_fcs_rport_send_plogiacc(rport, NULL); 237 bfa_fcs_rport_send_plogiacc(rport, NULL);
232 break; 238 break;
233 239
240 case RPSM_EVENT_SCN_OFFLINE:
241 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
242 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
243 bfa_timer_start(rport->fcs->bfa, &rport->timer,
244 bfa_fcs_rport_timeout, rport,
245 bfa_fcs_rport_del_timeout);
246 break;
234 case RPSM_EVENT_ADDRESS_CHANGE: 247 case RPSM_EVENT_ADDRESS_CHANGE:
235 case RPSM_EVENT_SCN: 248 case RPSM_EVENT_FAB_SCN:
236 /* query the NS */ 249 /* query the NS */
237 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); 250 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
251 WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) !=
252 BFA_PORT_TOPOLOGY_LOOP));
238 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 253 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
239 rport->ns_retries = 0; 254 rport->ns_retries = 0;
240 bfa_fcs_rport_send_nsdisc(rport, NULL); 255 bfa_fcs_rport_send_nsdisc(rport, NULL);
@@ -280,12 +295,20 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
280 295
281 case RPSM_EVENT_PLOGI_RCVD: 296 case RPSM_EVENT_PLOGI_RCVD:
282 case RPSM_EVENT_PLOGI_COMP: 297 case RPSM_EVENT_PLOGI_COMP:
283 case RPSM_EVENT_SCN: 298 case RPSM_EVENT_FAB_SCN:
284 /* 299 /*
285 * Ignore, SCN is possibly online notification. 300 * Ignore, SCN is possibly online notification.
286 */ 301 */
287 break; 302 break;
288 303
304 case RPSM_EVENT_SCN_OFFLINE:
305 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
306 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
307 bfa_timer_start(rport->fcs->bfa, &rport->timer,
308 bfa_fcs_rport_timeout, rport,
309 bfa_fcs_rport_del_timeout);
310 break;
311
289 case RPSM_EVENT_ADDRESS_CHANGE: 312 case RPSM_EVENT_ADDRESS_CHANGE:
290 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); 313 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
291 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 314 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
@@ -346,9 +369,19 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
346 bfa_fcs_rport_send_plogiacc(rport, NULL); 369 bfa_fcs_rport_send_plogiacc(rport, NULL);
347 break; 370 break;
348 371
372 case RPSM_EVENT_SCN_OFFLINE:
373 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
374 bfa_timer_stop(&rport->timer);
375 bfa_timer_start(rport->fcs->bfa, &rport->timer,
376 bfa_fcs_rport_timeout, rport,
377 bfa_fcs_rport_del_timeout);
378 break;
379
349 case RPSM_EVENT_ADDRESS_CHANGE: 380 case RPSM_EVENT_ADDRESS_CHANGE:
350 case RPSM_EVENT_SCN: 381 case RPSM_EVENT_FAB_SCN:
351 bfa_timer_stop(&rport->timer); 382 bfa_timer_stop(&rport->timer);
383 WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) !=
384 BFA_PORT_TOPOLOGY_LOOP));
352 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 385 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
353 rport->ns_retries = 0; 386 rport->ns_retries = 0;
354 bfa_fcs_rport_send_nsdisc(rport, NULL); 387 bfa_fcs_rport_send_nsdisc(rport, NULL);
@@ -422,7 +455,18 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
422 } 455 }
423 break; 456 break;
424 457
425 case RPSM_EVENT_PLOGI_RETRY: 458 case RPSM_EVENT_SCN_ONLINE:
459 break;
460
461 case RPSM_EVENT_SCN_OFFLINE:
462 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
463 bfa_fcxp_discard(rport->fcxp);
464 bfa_timer_start(rport->fcs->bfa, &rport->timer,
465 bfa_fcs_rport_timeout, rport,
466 bfa_fcs_rport_del_timeout);
467 break;
468
469 case RPSM_EVENT_PLOGI_RETRY:
426 rport->plogi_retries = 0; 470 rport->plogi_retries = 0;
427 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry); 471 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry);
428 bfa_timer_start(rport->fcs->bfa, &rport->timer, 472 bfa_timer_start(rport->fcs->bfa, &rport->timer,
@@ -440,8 +484,10 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
440 break; 484 break;
441 485
442 case RPSM_EVENT_ADDRESS_CHANGE: 486 case RPSM_EVENT_ADDRESS_CHANGE:
443 case RPSM_EVENT_SCN: 487 case RPSM_EVENT_FAB_SCN:
444 bfa_fcxp_discard(rport->fcxp); 488 bfa_fcxp_discard(rport->fcxp);
489 WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) !=
490 BFA_PORT_TOPOLOGY_LOOP));
445 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 491 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
446 rport->ns_retries = 0; 492 rport->ns_retries = 0;
447 bfa_fcs_rport_send_nsdisc(rport, NULL); 493 bfa_fcs_rport_send_nsdisc(rport, NULL);
@@ -512,7 +558,8 @@ bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport,
512 case RPSM_EVENT_PLOGI_COMP: 558 case RPSM_EVENT_PLOGI_COMP:
513 case RPSM_EVENT_LOGO_IMP: 559 case RPSM_EVENT_LOGO_IMP:
514 case RPSM_EVENT_ADDRESS_CHANGE: 560 case RPSM_EVENT_ADDRESS_CHANGE:
515 case RPSM_EVENT_SCN: 561 case RPSM_EVENT_FAB_SCN:
562 case RPSM_EVENT_SCN_OFFLINE:
516 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); 563 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
517 bfa_fcs_rport_fcs_offline_action(rport); 564 bfa_fcs_rport_fcs_offline_action(rport);
518 break; 565 break;
@@ -561,9 +608,10 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
561 bfa_fcs_rport_fcs_offline_action(rport); 608 bfa_fcs_rport_fcs_offline_action(rport);
562 break; 609 break;
563 610
564 case RPSM_EVENT_SCN: 611 case RPSM_EVENT_FAB_SCN:
565 case RPSM_EVENT_LOGO_IMP: 612 case RPSM_EVENT_LOGO_IMP:
566 case RPSM_EVENT_ADDRESS_CHANGE: 613 case RPSM_EVENT_ADDRESS_CHANGE:
614 case RPSM_EVENT_SCN_OFFLINE:
567 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); 615 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
568 bfa_fcs_rport_fcs_offline_action(rport); 616 bfa_fcs_rport_fcs_offline_action(rport);
569 break; 617 break;
@@ -595,14 +643,15 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
595 bfa_trc(rport->fcs, event); 643 bfa_trc(rport->fcs, event);
596 644
597 switch (event) { 645 switch (event) {
598 case RPSM_EVENT_SCN: 646 case RPSM_EVENT_FAB_SCN:
599 if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { 647 if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
600 bfa_sm_set_state(rport, 648 bfa_sm_set_state(rport,
601 bfa_fcs_rport_sm_nsquery_sending); 649 bfa_fcs_rport_sm_nsquery_sending);
602 rport->ns_retries = 0; 650 rport->ns_retries = 0;
603 bfa_fcs_rport_send_nsdisc(rport, NULL); 651 bfa_fcs_rport_send_nsdisc(rport, NULL);
604 } else { 652 } else {
605 bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending); 653 bfa_sm_set_state(rport,
654 bfa_fcs_rport_sm_adisc_online_sending);
606 bfa_fcs_rport_send_adisc(rport, NULL); 655 bfa_fcs_rport_send_adisc(rport, NULL);
607 } 656 }
608 break; 657 break;
@@ -610,6 +659,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
610 case RPSM_EVENT_PLOGI_RCVD: 659 case RPSM_EVENT_PLOGI_RCVD:
611 case RPSM_EVENT_LOGO_IMP: 660 case RPSM_EVENT_LOGO_IMP:
612 case RPSM_EVENT_ADDRESS_CHANGE: 661 case RPSM_EVENT_ADDRESS_CHANGE:
662 case RPSM_EVENT_SCN_OFFLINE:
613 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); 663 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
614 bfa_fcs_rport_hal_offline_action(rport); 664 bfa_fcs_rport_hal_offline_action(rport);
615 break; 665 break;
@@ -625,6 +675,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
625 bfa_fcs_rport_hal_offline_action(rport); 675 bfa_fcs_rport_hal_offline_action(rport);
626 break; 676 break;
627 677
678 case RPSM_EVENT_SCN_ONLINE:
628 case RPSM_EVENT_PLOGI_COMP: 679 case RPSM_EVENT_PLOGI_COMP:
629 break; 680 break;
630 681
@@ -656,7 +707,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
656 bfa_fcs_rport_hal_offline_action(rport); 707 bfa_fcs_rport_hal_offline_action(rport);
657 break; 708 break;
658 709
659 case RPSM_EVENT_SCN: 710 case RPSM_EVENT_FAB_SCN:
660 /* 711 /*
661 * ignore SCN, wait for response to query itself 712 * ignore SCN, wait for response to query itself
662 */ 713 */
@@ -696,7 +747,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
696 747
697 switch (event) { 748 switch (event) {
698 case RPSM_EVENT_ACCEPTED: 749 case RPSM_EVENT_ACCEPTED:
699 bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending); 750 bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online_sending);
700 bfa_fcs_rport_send_adisc(rport, NULL); 751 bfa_fcs_rport_send_adisc(rport, NULL);
701 break; 752 break;
702 753
@@ -718,7 +769,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
718 bfa_fcs_rport_hal_offline_action(rport); 769 bfa_fcs_rport_hal_offline_action(rport);
719 break; 770 break;
720 771
721 case RPSM_EVENT_SCN: 772 case RPSM_EVENT_FAB_SCN:
722 break; 773 break;
723 774
724 case RPSM_EVENT_LOGO_RCVD: 775 case RPSM_EVENT_LOGO_RCVD:
@@ -747,7 +798,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
747 * authenticating with rport. FC-4s are paused. 798 * authenticating with rport. FC-4s are paused.
748 */ 799 */
749static void 800static void
750bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, 801bfa_fcs_rport_sm_adisc_online_sending(struct bfa_fcs_rport_s *rport,
751 enum rport_event event) 802 enum rport_event event)
752{ 803{
753 bfa_trc(rport->fcs, rport->pwwn); 804 bfa_trc(rport->fcs, rport->pwwn);
@@ -756,7 +807,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
756 807
757 switch (event) { 808 switch (event) {
758 case RPSM_EVENT_FCXP_SENT: 809 case RPSM_EVENT_FCXP_SENT:
759 bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc); 810 bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online);
760 break; 811 break;
761 812
762 case RPSM_EVENT_DELETE: 813 case RPSM_EVENT_DELETE:
@@ -779,7 +830,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
779 bfa_fcs_rport_hal_offline_action(rport); 830 bfa_fcs_rport_hal_offline_action(rport);
780 break; 831 break;
781 832
782 case RPSM_EVENT_SCN: 833 case RPSM_EVENT_FAB_SCN:
783 break; 834 break;
784 835
785 case RPSM_EVENT_PLOGI_RCVD: 836 case RPSM_EVENT_PLOGI_RCVD:
@@ -798,7 +849,8 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
798 * FC-4s are paused. 849 * FC-4s are paused.
799 */ 850 */
800static void 851static void
801bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event) 852bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport,
853 enum rport_event event)
802{ 854{
803 bfa_trc(rport->fcs, rport->pwwn); 855 bfa_trc(rport->fcs, rport->pwwn);
804 bfa_trc(rport->fcs, rport->pid); 856 bfa_trc(rport->fcs, rport->pid);
@@ -831,7 +883,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
831 bfa_fcs_rport_hal_offline_action(rport); 883 bfa_fcs_rport_hal_offline_action(rport);
832 break; 884 break;
833 885
834 case RPSM_EVENT_SCN: 886 case RPSM_EVENT_FAB_SCN:
835 /* 887 /*
836 * already processing RSCN 888 * already processing RSCN
837 */ 889 */
@@ -856,7 +908,96 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
856} 908}
857 909
858/* 910/*
859 * Rport has sent LOGO. Awaiting FC-4 offline completion callback. 911 * ADISC is being sent for authenticating with rport
912 * Already did offline actions.
913 */
914static void
915bfa_fcs_rport_sm_adisc_offline_sending(struct bfa_fcs_rport_s *rport,
916 enum rport_event event)
917{
918 bfa_trc(rport->fcs, rport->pwwn);
919 bfa_trc(rport->fcs, rport->pid);
920 bfa_trc(rport->fcs, event);
921
922 switch (event) {
923 case RPSM_EVENT_FCXP_SENT:
924 bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_offline);
925 break;
926
927 case RPSM_EVENT_DELETE:
928 case RPSM_EVENT_SCN_OFFLINE:
929 case RPSM_EVENT_LOGO_IMP:
930 case RPSM_EVENT_LOGO_RCVD:
931 case RPSM_EVENT_PRLO_RCVD:
932 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
933 bfa_fcxp_walloc_cancel(rport->fcs->bfa,
934 &rport->fcxp_wqe);
935 bfa_timer_start(rport->fcs->bfa, &rport->timer,
936 bfa_fcs_rport_timeout, rport,
937 bfa_fcs_rport_del_timeout);
938 break;
939
940 case RPSM_EVENT_PLOGI_RCVD:
941 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
942 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
943 bfa_fcs_rport_send_plogiacc(rport, NULL);
944 break;
945
946 default:
947 bfa_sm_fault(rport->fcs, event);
948 }
949}
950
951/*
952 * ADISC to rport
953 * Already did offline actions
954 */
955static void
956bfa_fcs_rport_sm_adisc_offline(struct bfa_fcs_rport_s *rport,
957 enum rport_event event)
958{
959 bfa_trc(rport->fcs, rport->pwwn);
960 bfa_trc(rport->fcs, rport->pid);
961 bfa_trc(rport->fcs, event);
962
963 switch (event) {
964 case RPSM_EVENT_ACCEPTED:
965 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
966 bfa_fcs_rport_hal_online(rport);
967 break;
968
969 case RPSM_EVENT_PLOGI_RCVD:
970 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
971 bfa_fcxp_discard(rport->fcxp);
972 bfa_fcs_rport_send_plogiacc(rport, NULL);
973 break;
974
975 case RPSM_EVENT_FAILED:
976 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
977 bfa_timer_start(rport->fcs->bfa, &rport->timer,
978 bfa_fcs_rport_timeout, rport,
979 bfa_fcs_rport_del_timeout);
980 break;
981
982 case RPSM_EVENT_DELETE:
983 case RPSM_EVENT_SCN_OFFLINE:
984 case RPSM_EVENT_LOGO_IMP:
985 case RPSM_EVENT_LOGO_RCVD:
986 case RPSM_EVENT_PRLO_RCVD:
987 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
988 bfa_fcxp_discard(rport->fcxp);
989 bfa_timer_start(rport->fcs->bfa, &rport->timer,
990 bfa_fcs_rport_timeout, rport,
991 bfa_fcs_rport_del_timeout);
992 break;
993
994 default:
995 bfa_sm_fault(rport->fcs, event);
996 }
997}
998
999/*
1000 * Rport has sent LOGO. Awaiting FC-4 offline completion callback.
860 */ 1001 */
861static void 1002static void
862bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, 1003bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
@@ -881,6 +1022,8 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
881 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete); 1022 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete);
882 break; 1023 break;
883 1024
1025 case RPSM_EVENT_SCN_ONLINE:
1026 case RPSM_EVENT_SCN_OFFLINE:
884 case RPSM_EVENT_HCB_ONLINE: 1027 case RPSM_EVENT_HCB_ONLINE:
885 case RPSM_EVENT_LOGO_RCVD: 1028 case RPSM_EVENT_LOGO_RCVD:
886 case RPSM_EVENT_PRLO_RCVD: 1029 case RPSM_EVENT_PRLO_RCVD:
@@ -945,6 +1088,8 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
945 bfa_fcs_rport_hal_offline(rport); 1088 bfa_fcs_rport_hal_offline(rport);
946 break; 1089 break;
947 1090
1091 case RPSM_EVENT_SCN_ONLINE:
1092 break;
948 case RPSM_EVENT_LOGO_RCVD: 1093 case RPSM_EVENT_LOGO_RCVD:
949 /* 1094 /*
950 * Rport is going offline. Just ack the logo 1095 * Rport is going offline. Just ack the logo
@@ -956,8 +1101,9 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
956 bfa_fcs_rport_send_prlo_acc(rport); 1101 bfa_fcs_rport_send_prlo_acc(rport);
957 break; 1102 break;
958 1103
1104 case RPSM_EVENT_SCN_OFFLINE:
959 case RPSM_EVENT_HCB_ONLINE: 1105 case RPSM_EVENT_HCB_ONLINE:
960 case RPSM_EVENT_SCN: 1106 case RPSM_EVENT_FAB_SCN:
961 case RPSM_EVENT_LOGO_IMP: 1107 case RPSM_EVENT_LOGO_IMP:
962 case RPSM_EVENT_ADDRESS_CHANGE: 1108 case RPSM_EVENT_ADDRESS_CHANGE:
963 /* 1109 /*
@@ -1015,6 +1161,19 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
1015 bfa_fcs_rport_sm_nsdisc_sending); 1161 bfa_fcs_rport_sm_nsdisc_sending);
1016 rport->ns_retries = 0; 1162 rport->ns_retries = 0;
1017 bfa_fcs_rport_send_nsdisc(rport, NULL); 1163 bfa_fcs_rport_send_nsdisc(rport, NULL);
1164 } else if (bfa_fcport_get_topology(rport->port->fcs->bfa) ==
1165 BFA_PORT_TOPOLOGY_LOOP) {
1166 if (rport->scn_online) {
1167 bfa_sm_set_state(rport,
1168 bfa_fcs_rport_sm_adisc_offline_sending);
1169 bfa_fcs_rport_send_adisc(rport, NULL);
1170 } else {
1171 bfa_sm_set_state(rport,
1172 bfa_fcs_rport_sm_offline);
1173 bfa_timer_start(rport->fcs->bfa, &rport->timer,
1174 bfa_fcs_rport_timeout, rport,
1175 bfa_fcs_rport_del_timeout);
1176 }
1018 } else { 1177 } else {
1019 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); 1178 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
1020 rport->plogi_retries = 0; 1179 rport->plogi_retries = 0;
@@ -1027,7 +1186,9 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
1027 bfa_fcs_rport_free(rport); 1186 bfa_fcs_rport_free(rport);
1028 break; 1187 break;
1029 1188
1030 case RPSM_EVENT_SCN: 1189 case RPSM_EVENT_SCN_ONLINE:
1190 case RPSM_EVENT_SCN_OFFLINE:
1191 case RPSM_EVENT_FAB_SCN:
1031 case RPSM_EVENT_LOGO_RCVD: 1192 case RPSM_EVENT_LOGO_RCVD:
1032 case RPSM_EVENT_PRLO_RCVD: 1193 case RPSM_EVENT_PRLO_RCVD:
1033 case RPSM_EVENT_PLOGI_RCVD: 1194 case RPSM_EVENT_PLOGI_RCVD:
@@ -1106,6 +1267,8 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
1106 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); 1267 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
1107 break; 1268 break;
1108 1269
1270 case RPSM_EVENT_SCN_ONLINE:
1271 case RPSM_EVENT_SCN_OFFLINE:
1109 case RPSM_EVENT_LOGO_RCVD: 1272 case RPSM_EVENT_LOGO_RCVD:
1110 case RPSM_EVENT_PRLO_RCVD: 1273 case RPSM_EVENT_PRLO_RCVD:
1111 /* 1274 /*
@@ -1146,6 +1309,8 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
1146 bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending); 1309 bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
1147 break; 1310 break;
1148 1311
1312 case RPSM_EVENT_SCN_ONLINE:
1313 case RPSM_EVENT_SCN_OFFLINE:
1149 case RPSM_EVENT_ADDRESS_CHANGE: 1314 case RPSM_EVENT_ADDRESS_CHANGE:
1150 break; 1315 break;
1151 1316
@@ -1172,7 +1337,9 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
1172 bfa_fcs_rport_free(rport); 1337 bfa_fcs_rport_free(rport);
1173 break; 1338 break;
1174 1339
1175 case RPSM_EVENT_SCN: 1340 case RPSM_EVENT_SCN_ONLINE:
1341 case RPSM_EVENT_SCN_OFFLINE:
1342 case RPSM_EVENT_FAB_SCN:
1176 case RPSM_EVENT_ADDRESS_CHANGE: 1343 case RPSM_EVENT_ADDRESS_CHANGE:
1177 break; 1344 break;
1178 1345
@@ -1209,10 +1376,12 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
1209 bfa_fcs_rport_free(rport); 1376 bfa_fcs_rport_free(rport);
1210 break; 1377 break;
1211 1378
1212 case RPSM_EVENT_SCN: 1379 case RPSM_EVENT_FAB_SCN:
1213 case RPSM_EVENT_ADDRESS_CHANGE: 1380 case RPSM_EVENT_ADDRESS_CHANGE:
1214 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
1215 bfa_timer_stop(&rport->timer); 1381 bfa_timer_stop(&rport->timer);
1382 WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) !=
1383 BFA_PORT_TOPOLOGY_LOOP));
1384 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
1216 rport->ns_retries = 0; 1385 rport->ns_retries = 0;
1217 bfa_fcs_rport_send_nsdisc(rport, NULL); 1386 bfa_fcs_rport_send_nsdisc(rport, NULL);
1218 break; 1387 break;
@@ -1232,6 +1401,7 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
1232 case RPSM_EVENT_LOGO_RCVD: 1401 case RPSM_EVENT_LOGO_RCVD:
1233 case RPSM_EVENT_PRLO_RCVD: 1402 case RPSM_EVENT_PRLO_RCVD:
1234 case RPSM_EVENT_LOGO_IMP: 1403 case RPSM_EVENT_LOGO_IMP:
1404 case RPSM_EVENT_SCN_OFFLINE:
1235 break; 1405 break;
1236 1406
1237 case RPSM_EVENT_PLOGI_COMP: 1407 case RPSM_EVENT_PLOGI_COMP:
@@ -1240,6 +1410,12 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
1240 bfa_fcs_rport_fcs_online_action(rport); 1410 bfa_fcs_rport_fcs_online_action(rport);
1241 break; 1411 break;
1242 1412
1413 case RPSM_EVENT_SCN_ONLINE:
1414 bfa_timer_stop(&rport->timer);
1415 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
1416 bfa_fcs_rport_send_plogi(rport, NULL);
1417 break;
1418
1243 case RPSM_EVENT_PLOGI_SEND: 1419 case RPSM_EVENT_PLOGI_SEND:
1244 bfa_timer_stop(&rport->timer); 1420 bfa_timer_stop(&rport->timer);
1245 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); 1421 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
@@ -1280,7 +1456,7 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
1280 bfa_fcs_rport_send_plogiacc(rport, NULL); 1456 bfa_fcs_rport_send_plogiacc(rport, NULL);
1281 break; 1457 break;
1282 1458
1283 case RPSM_EVENT_SCN: 1459 case RPSM_EVENT_FAB_SCN:
1284 case RPSM_EVENT_LOGO_RCVD: 1460 case RPSM_EVENT_LOGO_RCVD:
1285 case RPSM_EVENT_PRLO_RCVD: 1461 case RPSM_EVENT_PRLO_RCVD:
1286 case RPSM_EVENT_PLOGI_SEND: 1462 case RPSM_EVENT_PLOGI_SEND:
@@ -1326,7 +1502,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
1326 bfa_fcs_rport_send_nsdisc(rport, NULL); 1502 bfa_fcs_rport_send_nsdisc(rport, NULL);
1327 break; 1503 break;
1328 1504
1329 case RPSM_EVENT_SCN: 1505 case RPSM_EVENT_FAB_SCN:
1330 case RPSM_EVENT_ADDRESS_CHANGE: 1506 case RPSM_EVENT_ADDRESS_CHANGE:
1331 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 1507 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
1332 bfa_timer_stop(&rport->timer); 1508 bfa_timer_stop(&rport->timer);
@@ -1439,7 +1615,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
1439 case RPSM_EVENT_PRLO_RCVD: 1615 case RPSM_EVENT_PRLO_RCVD:
1440 bfa_fcs_rport_send_prlo_acc(rport); 1616 bfa_fcs_rport_send_prlo_acc(rport);
1441 break; 1617 break;
1442 case RPSM_EVENT_SCN: 1618 case RPSM_EVENT_FAB_SCN:
1443 /* 1619 /*
1444 * ignore, wait for NS query response 1620 * ignore, wait for NS query response
1445 */ 1621 */
@@ -2546,7 +2722,7 @@ void
2546bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport) 2722bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
2547{ 2723{
2548 rport->stats.rscns++; 2724 rport->stats.rscns++;
2549 bfa_sm_send_event(rport, RPSM_EVENT_SCN); 2725 bfa_sm_send_event(rport, RPSM_EVENT_FAB_SCN);
2550} 2726}
2551 2727
2552/* 2728/*
@@ -2621,6 +2797,48 @@ bfa_cb_rport_qos_scn_flowid(void *cbarg,
2621 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data); 2797 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data);
2622} 2798}
2623 2799
2800void
2801bfa_cb_rport_scn_online(struct bfa_s *bfa)
2802{
2803 struct bfa_fcs_s *fcs = &((struct bfad_s *)bfa->bfad)->bfa_fcs;
2804 struct bfa_fcs_lport_s *port = bfa_fcs_get_base_port(fcs);
2805 struct bfa_fcs_rport_s *rp;
2806 struct list_head *qe;
2807
2808 list_for_each(qe, &port->rport_q) {
2809 rp = (struct bfa_fcs_rport_s *) qe;
2810 bfa_sm_send_event(rp, RPSM_EVENT_SCN_ONLINE);
2811 rp->scn_online = BFA_TRUE;
2812 }
2813
2814 if (bfa_fcs_lport_is_online(port))
2815 bfa_fcs_lport_lip_scn_online(port);
2816}
2817
2818void
2819bfa_cb_rport_scn_no_dev(void *rport)
2820{
2821 struct bfa_fcs_rport_s *rp = rport;
2822
2823 bfa_sm_send_event(rp, RPSM_EVENT_SCN_OFFLINE);
2824 rp->scn_online = BFA_FALSE;
2825}
2826
2827void
2828bfa_cb_rport_scn_offline(struct bfa_s *bfa)
2829{
2830 struct bfa_fcs_s *fcs = &((struct bfad_s *)bfa->bfad)->bfa_fcs;
2831 struct bfa_fcs_lport_s *port = bfa_fcs_get_base_port(fcs);
2832 struct bfa_fcs_rport_s *rp;
2833 struct list_head *qe;
2834
2835 list_for_each(qe, &port->rport_q) {
2836 rp = (struct bfa_fcs_rport_s *) qe;
2837 bfa_sm_send_event(rp, RPSM_EVENT_SCN_OFFLINE);
2838 rp->scn_online = BFA_FALSE;
2839 }
2840}
2841
2624/* 2842/*
2625 * brief 2843 * brief
2626 * This routine is a static BFA callback when there is a QoS priority 2844 * This routine is a static BFA callback when there is a QoS priority
@@ -2808,6 +3026,9 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
2808 struct bfa_rport_qos_attr_s qos_attr; 3026 struct bfa_rport_qos_attr_s qos_attr;
2809 struct bfa_fcs_lport_s *port = rport->port; 3027 struct bfa_fcs_lport_s *port = rport->port;
2810 bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed; 3028 bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed;
3029 struct bfa_port_attr_s port_attr;
3030
3031 bfa_fcport_get_attr(rport->fcs->bfa, &port_attr);
2811 3032
2812 memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s)); 3033 memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
2813 memset(&qos_attr, 0, sizeof(struct bfa_rport_qos_attr_s)); 3034 memset(&qos_attr, 0, sizeof(struct bfa_rport_qos_attr_s));
@@ -2838,7 +3059,8 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
2838 rport_speed = 3059 rport_speed =
2839 bfa_fcport_get_ratelim_speed(rport->fcs->bfa); 3060 bfa_fcport_get_ratelim_speed(rport->fcs->bfa);
2840 3061
2841 if (rport_speed < bfa_fcs_lport_get_rport_max_speed(port)) 3062 if ((bfa_fcs_lport_get_rport_max_speed(port) !=
3063 BFA_PORT_SPEED_UNKNOWN) && (rport_speed < port_attr.speed))
2842 rport_attr->trl_enforced = BFA_TRUE; 3064 rport_attr->trl_enforced = BFA_TRUE;
2843 } 3065 }
2844} 3066}
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 75ca8752b9f4..0116c1032e25 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -731,8 +731,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
731 /* 731 /*
732 * Unlock the hw semaphore. Should be here only once per boot. 732 * Unlock the hw semaphore. Should be here only once per boot.
733 */ 733 */
734 readl(iocpf->ioc->ioc_regs.ioc_sem_reg); 734 bfa_ioc_ownership_reset(iocpf->ioc);
735 writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
736 735
737 /* 736 /*
738 * unlock init semaphore. 737 * unlock init semaphore.
@@ -1751,6 +1750,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1751 attr->card_type = be32_to_cpu(attr->card_type); 1750 attr->card_type = be32_to_cpu(attr->card_type);
1752 attr->maxfrsize = be16_to_cpu(attr->maxfrsize); 1751 attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
1753 ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC); 1752 ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC);
1753 attr->mfg_year = be16_to_cpu(attr->mfg_year);
1754 1754
1755 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); 1755 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1756} 1756}
@@ -2497,6 +2497,9 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2497 ad_attr->cna_capable = bfa_ioc_is_cna(ioc); 2497 ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2498 ad_attr->trunk_capable = (ad_attr->nports > 1) && 2498 ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2499 !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz; 2499 !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2500 ad_attr->mfg_day = ioc_attr->mfg_day;
2501 ad_attr->mfg_month = ioc_attr->mfg_month;
2502 ad_attr->mfg_year = ioc_attr->mfg_year;
2500} 2503}
2501 2504
2502enum bfa_ioc_type_e 2505enum bfa_ioc_type_e
@@ -2923,7 +2926,7 @@ bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
2923 return; 2926 return;
2924 } 2927 }
2925 2928
2926 if (ioc->iocpf.poll_time >= BFA_IOC_TOV) 2929 if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV))
2927 bfa_iocpf_timeout(ioc); 2930 bfa_iocpf_timeout(ioc);
2928 else { 2931 else {
2929 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; 2932 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
@@ -3016,7 +3019,6 @@ bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3016 struct bfa_ablk_cfg_inst_s *cfg_inst; 3019 struct bfa_ablk_cfg_inst_s *cfg_inst;
3017 int i, j; 3020 int i, j;
3018 u16 be16; 3021 u16 be16;
3019 u32 be32;
3020 3022
3021 for (i = 0; i < BFA_ABLK_MAX; i++) { 3023 for (i = 0; i < BFA_ABLK_MAX; i++) {
3022 cfg_inst = &cfg->inst[i]; 3024 cfg_inst = &cfg->inst[i];
@@ -3027,8 +3029,10 @@ bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3027 cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16); 3029 cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3028 be16 = cfg_inst->pf_cfg[j].num_vectors; 3030 be16 = cfg_inst->pf_cfg[j].num_vectors;
3029 cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16); 3031 cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3030 be32 = cfg_inst->pf_cfg[j].bw; 3032 be16 = cfg_inst->pf_cfg[j].bw_min;
3031 cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32); 3033 cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16);
3034 be16 = cfg_inst->pf_cfg[j].bw_max;
3035 cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16);
3032 } 3036 }
3033 } 3037 }
3034} 3038}
@@ -3170,7 +3174,8 @@ bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3170 3174
3171bfa_status_t 3175bfa_status_t
3172bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn, 3176bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3173 u8 port, enum bfi_pcifn_class personality, int bw, 3177 u8 port, enum bfi_pcifn_class personality,
3178 u16 bw_min, u16 bw_max,
3174 bfa_ablk_cbfn_t cbfn, void *cbarg) 3179 bfa_ablk_cbfn_t cbfn, void *cbarg)
3175{ 3180{
3176 struct bfi_ablk_h2i_pf_req_s *m; 3181 struct bfi_ablk_h2i_pf_req_s *m;
@@ -3194,7 +3199,8 @@ bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3194 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE, 3199 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3195 bfa_ioc_portid(ablk->ioc)); 3200 bfa_ioc_portid(ablk->ioc));
3196 m->pers = cpu_to_be16((u16)personality); 3201 m->pers = cpu_to_be16((u16)personality);
3197 m->bw = cpu_to_be32(bw); 3202 m->bw_min = cpu_to_be16(bw_min);
3203 m->bw_max = cpu_to_be16(bw_max);
3198 m->port = port; 3204 m->port = port;
3199 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); 3205 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3200 3206
@@ -3294,8 +3300,8 @@ bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3294} 3300}
3295 3301
3296bfa_status_t 3302bfa_status_t
3297bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw, 3303bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min,
3298 bfa_ablk_cbfn_t cbfn, void *cbarg) 3304 u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg)
3299{ 3305{
3300 struct bfi_ablk_h2i_pf_req_s *m; 3306 struct bfi_ablk_h2i_pf_req_s *m;
3301 3307
@@ -3317,7 +3323,8 @@ bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
3317 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE, 3323 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3318 bfa_ioc_portid(ablk->ioc)); 3324 bfa_ioc_portid(ablk->ioc));
3319 m->pcifn = (u8)pcifn; 3325 m->pcifn = (u8)pcifn;
3320 m->bw = cpu_to_be32(bw); 3326 m->bw_min = cpu_to_be16(bw_min);
3327 m->bw_max = cpu_to_be16(bw_max);
3321 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); 3328 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3322 3329
3323 return BFA_STATUS_OK; 3330 return BFA_STATUS_OK;
@@ -4680,22 +4687,25 @@ diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4680 diag->tsensor.temp->temp = be16_to_cpu(rsp->temp); 4687 diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4681 diag->tsensor.temp->ts_junc = rsp->ts_junc; 4688 diag->tsensor.temp->ts_junc = rsp->ts_junc;
4682 diag->tsensor.temp->ts_brd = rsp->ts_brd; 4689 diag->tsensor.temp->ts_brd = rsp->ts_brd;
4683 diag->tsensor.temp->status = BFA_STATUS_OK;
4684 4690
4685 if (rsp->ts_brd) { 4691 if (rsp->ts_brd) {
4692 /* tsensor.temp->status is brd_temp status */
4693 diag->tsensor.temp->status = rsp->status;
4686 if (rsp->status == BFA_STATUS_OK) { 4694 if (rsp->status == BFA_STATUS_OK) {
4687 diag->tsensor.temp->brd_temp = 4695 diag->tsensor.temp->brd_temp =
4688 be16_to_cpu(rsp->brd_temp); 4696 be16_to_cpu(rsp->brd_temp);
4689 } else { 4697 } else
4690 bfa_trc(diag, rsp->status);
4691 diag->tsensor.temp->brd_temp = 0; 4698 diag->tsensor.temp->brd_temp = 0;
4692 diag->tsensor.temp->status = BFA_STATUS_DEVBUSY;
4693 }
4694 } 4699 }
4700
4701 bfa_trc(diag, rsp->status);
4695 bfa_trc(diag, rsp->ts_junc); 4702 bfa_trc(diag, rsp->ts_junc);
4696 bfa_trc(diag, rsp->temp); 4703 bfa_trc(diag, rsp->temp);
4697 bfa_trc(diag, rsp->ts_brd); 4704 bfa_trc(diag, rsp->ts_brd);
4698 bfa_trc(diag, rsp->brd_temp); 4705 bfa_trc(diag, rsp->brd_temp);
4706
4707 /* tsensor status is always good bcos we always have junction temp */
4708 diag->tsensor.status = BFA_STATUS_OK;
4699 diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status); 4709 diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4700 diag->tsensor.lock = 0; 4710 diag->tsensor.lock = 0;
4701} 4711}
@@ -4924,6 +4934,7 @@ bfa_diag_tsensor_query(struct bfa_diag_s *diag,
4924 diag->tsensor.temp = result; 4934 diag->tsensor.temp = result;
4925 diag->tsensor.cbfn = cbfn; 4935 diag->tsensor.cbfn = cbfn;
4926 diag->tsensor.cbarg = cbarg; 4936 diag->tsensor.cbarg = cbarg;
4937 diag->tsensor.status = BFA_STATUS_OK;
4927 4938
4928 /* Send msg to fw */ 4939 /* Send msg to fw */
4929 diag_tempsensor_send(diag); 4940 diag_tempsensor_send(diag);
@@ -5615,7 +5626,7 @@ bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5615 } 5626 }
5616 bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read); 5627 bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5617 bfa_timer_start(dconf->bfa, &dconf->timer, 5628 bfa_timer_start(dconf->bfa, &dconf->timer,
5618 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); 5629 bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV);
5619 bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa), 5630 bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5620 BFA_FLASH_PART_DRV, dconf->instance, 5631 BFA_FLASH_PART_DRV, dconf->instance,
5621 dconf->dconf, 5632 dconf->dconf,
@@ -5655,7 +5666,7 @@ bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5655 break; 5666 break;
5656 case BFA_DCONF_SM_TIMEOUT: 5667 case BFA_DCONF_SM_TIMEOUT:
5657 bfa_sm_set_state(dconf, bfa_dconf_sm_ready); 5668 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5658 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_IOC_FAILED); 5669 bfa_ioc_suspend(&dconf->bfa->ioc);
5659 break; 5670 break;
5660 case BFA_DCONF_SM_EXIT: 5671 case BFA_DCONF_SM_EXIT:
5661 bfa_timer_stop(&dconf->timer); 5672 bfa_timer_stop(&dconf->timer);
@@ -5853,7 +5864,6 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status)
5853 struct bfa_s *bfa = arg; 5864 struct bfa_s *bfa = arg;
5854 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); 5865 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5855 5866
5856 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5857 if (status == BFA_STATUS_OK) { 5867 if (status == BFA_STATUS_OK) {
5858 bfa_dconf_read_data_valid(bfa) = BFA_TRUE; 5868 bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
5859 if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE) 5869 if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
@@ -5861,6 +5871,7 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status)
5861 if (dconf->dconf->hdr.version != BFI_DCONF_VERSION) 5871 if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
5862 dconf->dconf->hdr.version = BFI_DCONF_VERSION; 5872 dconf->dconf->hdr.version = BFI_DCONF_VERSION;
5863 } 5873 }
5874 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5864 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE); 5875 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
5865} 5876}
5866 5877
@@ -5945,3 +5956,448 @@ bfa_dconf_modexit(struct bfa_s *bfa)
5945 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); 5956 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5946 bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT); 5957 bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
5947} 5958}
5959
5960/*
5961 * FRU specific functions
5962 */
5963
5964#define BFA_FRU_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
5965#define BFA_FRU_CHINOOK_MAX_SIZE 0x10000
5966#define BFA_FRU_LIGHTNING_MAX_SIZE 0x200
5967
5968static void
5969bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event)
5970{
5971 struct bfa_fru_s *fru = cbarg;
5972
5973 bfa_trc(fru, event);
5974
5975 switch (event) {
5976 case BFA_IOC_E_DISABLED:
5977 case BFA_IOC_E_FAILED:
5978 if (fru->op_busy) {
5979 fru->status = BFA_STATUS_IOC_FAILURE;
5980 fru->cbfn(fru->cbarg, fru->status);
5981 fru->op_busy = 0;
5982 }
5983 break;
5984
5985 default:
5986 break;
5987 }
5988}
5989
5990/*
5991 * Send fru write request.
5992 *
5993 * @param[in] cbarg - callback argument
5994 */
5995static void
5996bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
5997{
5998 struct bfa_fru_s *fru = cbarg;
5999 struct bfi_fru_write_req_s *msg =
6000 (struct bfi_fru_write_req_s *) fru->mb.msg;
6001 u32 len;
6002
6003 msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6004 len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6005 fru->residue : BFA_FRU_DMA_BUF_SZ;
6006 msg->length = cpu_to_be32(len);
6007
6008 /*
6009 * indicate if it's the last msg of the whole write operation
6010 */
6011 msg->last = (len == fru->residue) ? 1 : 0;
6012
6013 bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6014 bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6015
6016 memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len);
6017 bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6018
6019 fru->residue -= len;
6020 fru->offset += len;
6021}
6022
6023/*
6024 * Send fru read request.
6025 *
6026 * @param[in] cbarg - callback argument
6027 */
6028static void
6029bfa_fru_read_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6030{
6031 struct bfa_fru_s *fru = cbarg;
6032 struct bfi_fru_read_req_s *msg =
6033 (struct bfi_fru_read_req_s *) fru->mb.msg;
6034 u32 len;
6035
6036 msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6037 len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6038 fru->residue : BFA_FRU_DMA_BUF_SZ;
6039 msg->length = cpu_to_be32(len);
6040 bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6041 bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6042 bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6043}
6044
6045/*
6046 * Flash memory info API.
6047 *
6048 * @param[in] mincfg - minimal cfg variable
6049 */
6050u32
6051bfa_fru_meminfo(bfa_boolean_t mincfg)
6052{
6053 /* min driver doesn't need fru */
6054 if (mincfg)
6055 return 0;
6056
6057 return BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6058}
6059
6060/*
6061 * Flash attach API.
6062 *
6063 * @param[in] fru - fru structure
6064 * @param[in] ioc - ioc structure
6065 * @param[in] dev - device structure
6066 * @param[in] trcmod - trace module
6067 * @param[in] logmod - log module
6068 */
6069void
6070bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, void *dev,
6071 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
6072{
6073 fru->ioc = ioc;
6074 fru->trcmod = trcmod;
6075 fru->cbfn = NULL;
6076 fru->cbarg = NULL;
6077 fru->op_busy = 0;
6078
6079 bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru);
6080 bfa_q_qe_init(&fru->ioc_notify);
6081 bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru);
6082 list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q);
6083
6084 /* min driver doesn't need fru */
6085 if (mincfg) {
6086 fru->dbuf_kva = NULL;
6087 fru->dbuf_pa = 0;
6088 }
6089}
6090
6091/*
6092 * Claim memory for fru
6093 *
6094 * @param[in] fru - fru structure
6095 * @param[in] dm_kva - pointer to virtual memory address
6096 * @param[in] dm_pa - frusical memory address
6097 * @param[in] mincfg - minimal cfg variable
6098 */
6099void
6100bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa,
6101 bfa_boolean_t mincfg)
6102{
6103 if (mincfg)
6104 return;
6105
6106 fru->dbuf_kva = dm_kva;
6107 fru->dbuf_pa = dm_pa;
6108 memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ);
6109 dm_kva += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6110 dm_pa += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6111}
6112
6113/*
6114 * Update fru vpd image.
6115 *
6116 * @param[in] fru - fru structure
6117 * @param[in] buf - update data buffer
6118 * @param[in] len - data buffer length
6119 * @param[in] offset - offset relative to starting address
6120 * @param[in] cbfn - callback function
6121 * @param[in] cbarg - callback argument
6122 *
6123 * Return status.
6124 */
6125bfa_status_t
6126bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6127 bfa_cb_fru_t cbfn, void *cbarg)
6128{
6129 bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6130 bfa_trc(fru, len);
6131 bfa_trc(fru, offset);
6132
6133 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6134 return BFA_STATUS_FRU_NOT_PRESENT;
6135
6136 if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
6137 return BFA_STATUS_CMD_NOTSUPP;
6138
6139 if (!bfa_ioc_is_operational(fru->ioc))
6140 return BFA_STATUS_IOC_NON_OP;
6141
6142 if (fru->op_busy) {
6143 bfa_trc(fru, fru->op_busy);
6144 return BFA_STATUS_DEVBUSY;
6145 }
6146
6147 fru->op_busy = 1;
6148
6149 fru->cbfn = cbfn;
6150 fru->cbarg = cbarg;
6151 fru->residue = len;
6152 fru->offset = 0;
6153 fru->addr_off = offset;
6154 fru->ubuf = buf;
6155
6156 bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6157
6158 return BFA_STATUS_OK;
6159}
6160
6161/*
6162 * Read fru vpd image.
6163 *
6164 * @param[in] fru - fru structure
6165 * @param[in] buf - read data buffer
6166 * @param[in] len - data buffer length
6167 * @param[in] offset - offset relative to starting address
6168 * @param[in] cbfn - callback function
6169 * @param[in] cbarg - callback argument
6170 *
6171 * Return status.
6172 */
6173bfa_status_t
6174bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6175 bfa_cb_fru_t cbfn, void *cbarg)
6176{
6177 bfa_trc(fru, BFI_FRUVPD_H2I_READ_REQ);
6178 bfa_trc(fru, len);
6179 bfa_trc(fru, offset);
6180
6181 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6182 return BFA_STATUS_FRU_NOT_PRESENT;
6183
6184 if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
6185 return BFA_STATUS_CMD_NOTSUPP;
6186
6187 if (!bfa_ioc_is_operational(fru->ioc))
6188 return BFA_STATUS_IOC_NON_OP;
6189
6190 if (fru->op_busy) {
6191 bfa_trc(fru, fru->op_busy);
6192 return BFA_STATUS_DEVBUSY;
6193 }
6194
6195 fru->op_busy = 1;
6196
6197 fru->cbfn = cbfn;
6198 fru->cbarg = cbarg;
6199 fru->residue = len;
6200 fru->offset = 0;
6201 fru->addr_off = offset;
6202 fru->ubuf = buf;
6203 bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ);
6204
6205 return BFA_STATUS_OK;
6206}
6207
6208/*
6209 * Get maximum size fru vpd image.
6210 *
6211 * @param[in] fru - fru structure
6212 * @param[out] size - maximum size of fru vpd data
6213 *
6214 * Return status.
6215 */
6216bfa_status_t
6217bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size)
6218{
6219 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6220 return BFA_STATUS_FRU_NOT_PRESENT;
6221
6222 if (!bfa_ioc_is_operational(fru->ioc))
6223 return BFA_STATUS_IOC_NON_OP;
6224
6225 if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK)
6226 *max_size = BFA_FRU_CHINOOK_MAX_SIZE;
6227 else
6228 return BFA_STATUS_CMD_NOTSUPP;
6229 return BFA_STATUS_OK;
6230}
6231/*
6232 * tfru write.
6233 *
6234 * @param[in] fru - fru structure
6235 * @param[in] buf - update data buffer
6236 * @param[in] len - data buffer length
6237 * @param[in] offset - offset relative to starting address
6238 * @param[in] cbfn - callback function
6239 * @param[in] cbarg - callback argument
6240 *
6241 * Return status.
6242 */
6243bfa_status_t
6244bfa_tfru_write(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6245 bfa_cb_fru_t cbfn, void *cbarg)
6246{
6247 bfa_trc(fru, BFI_TFRU_H2I_WRITE_REQ);
6248 bfa_trc(fru, len);
6249 bfa_trc(fru, offset);
6250 bfa_trc(fru, *((u8 *) buf));
6251
6252 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6253 return BFA_STATUS_FRU_NOT_PRESENT;
6254
6255 if (!bfa_ioc_is_operational(fru->ioc))
6256 return BFA_STATUS_IOC_NON_OP;
6257
6258 if (fru->op_busy) {
6259 bfa_trc(fru, fru->op_busy);
6260 return BFA_STATUS_DEVBUSY;
6261 }
6262
6263 fru->op_busy = 1;
6264
6265 fru->cbfn = cbfn;
6266 fru->cbarg = cbarg;
6267 fru->residue = len;
6268 fru->offset = 0;
6269 fru->addr_off = offset;
6270 fru->ubuf = buf;
6271
6272 bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ);
6273
6274 return BFA_STATUS_OK;
6275}
6276
6277/*
6278 * tfru read.
6279 *
6280 * @param[in] fru - fru structure
6281 * @param[in] buf - read data buffer
6282 * @param[in] len - data buffer length
6283 * @param[in] offset - offset relative to starting address
6284 * @param[in] cbfn - callback function
6285 * @param[in] cbarg - callback argument
6286 *
6287 * Return status.
6288 */
6289bfa_status_t
6290bfa_tfru_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6291 bfa_cb_fru_t cbfn, void *cbarg)
6292{
6293 bfa_trc(fru, BFI_TFRU_H2I_READ_REQ);
6294 bfa_trc(fru, len);
6295 bfa_trc(fru, offset);
6296
6297 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6298 return BFA_STATUS_FRU_NOT_PRESENT;
6299
6300 if (!bfa_ioc_is_operational(fru->ioc))
6301 return BFA_STATUS_IOC_NON_OP;
6302
6303 if (fru->op_busy) {
6304 bfa_trc(fru, fru->op_busy);
6305 return BFA_STATUS_DEVBUSY;
6306 }
6307
6308 fru->op_busy = 1;
6309
6310 fru->cbfn = cbfn;
6311 fru->cbarg = cbarg;
6312 fru->residue = len;
6313 fru->offset = 0;
6314 fru->addr_off = offset;
6315 fru->ubuf = buf;
6316 bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ);
6317
6318 return BFA_STATUS_OK;
6319}
6320
6321/*
6322 * Process fru response messages upon receiving interrupts.
6323 *
6324 * @param[in] fruarg - fru structure
6325 * @param[in] msg - message structure
6326 */
6327void
6328bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg)
6329{
6330 struct bfa_fru_s *fru = fruarg;
6331 struct bfi_fru_rsp_s *rsp = (struct bfi_fru_rsp_s *)msg;
6332 u32 status;
6333
6334 bfa_trc(fru, msg->mh.msg_id);
6335
6336 if (!fru->op_busy) {
6337 /*
6338 * receiving response after ioc failure
6339 */
6340 bfa_trc(fru, 0x9999);
6341 return;
6342 }
6343
6344 switch (msg->mh.msg_id) {
6345 case BFI_FRUVPD_I2H_WRITE_RSP:
6346 case BFI_TFRU_I2H_WRITE_RSP:
6347 status = be32_to_cpu(rsp->status);
6348 bfa_trc(fru, status);
6349
6350 if (status != BFA_STATUS_OK || fru->residue == 0) {
6351 fru->status = status;
6352 fru->op_busy = 0;
6353 if (fru->cbfn)
6354 fru->cbfn(fru->cbarg, fru->status);
6355 } else {
6356 bfa_trc(fru, fru->offset);
6357 if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP)
6358 bfa_fru_write_send(fru,
6359 BFI_FRUVPD_H2I_WRITE_REQ);
6360 else
6361 bfa_fru_write_send(fru,
6362 BFI_TFRU_H2I_WRITE_REQ);
6363 }
6364 break;
6365 case BFI_FRUVPD_I2H_READ_RSP:
6366 case BFI_TFRU_I2H_READ_RSP:
6367 status = be32_to_cpu(rsp->status);
6368 bfa_trc(fru, status);
6369
6370 if (status != BFA_STATUS_OK) {
6371 fru->status = status;
6372 fru->op_busy = 0;
6373 if (fru->cbfn)
6374 fru->cbfn(fru->cbarg, fru->status);
6375 } else {
6376 u32 len = be32_to_cpu(rsp->length);
6377
6378 bfa_trc(fru, fru->offset);
6379 bfa_trc(fru, len);
6380
6381 memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len);
6382 fru->residue -= len;
6383 fru->offset += len;
6384
6385 if (fru->residue == 0) {
6386 fru->status = status;
6387 fru->op_busy = 0;
6388 if (fru->cbfn)
6389 fru->cbfn(fru->cbarg, fru->status);
6390 } else {
6391 if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP)
6392 bfa_fru_read_send(fru,
6393 BFI_FRUVPD_H2I_READ_REQ);
6394 else
6395 bfa_fru_read_send(fru,
6396 BFI_TFRU_H2I_READ_REQ);
6397 }
6398 }
6399 break;
6400 default:
6401 WARN_ON(1);
6402 }
6403}
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index b2856f96567c..23a90e7b7107 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -702,6 +702,55 @@ void bfa_phy_memclaim(struct bfa_phy_s *phy,
702void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg); 702void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg);
703 703
704/* 704/*
705 * FRU module specific
706 */
707typedef void (*bfa_cb_fru_t) (void *cbarg, bfa_status_t status);
708
709struct bfa_fru_s {
710 struct bfa_ioc_s *ioc; /* back pointer to ioc */
711 struct bfa_trc_mod_s *trcmod; /* trace module */
712 u8 op_busy; /* operation busy flag */
713 u8 rsv[3];
714 u32 residue; /* residual length */
715 u32 offset; /* offset */
716 bfa_status_t status; /* status */
717 u8 *dbuf_kva; /* dma buf virtual address */
718 u64 dbuf_pa; /* dma buf physical address */
719 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
720 bfa_cb_fru_t cbfn; /* user callback function */
721 void *cbarg; /* user callback arg */
722 u8 *ubuf; /* user supplied buffer */
723 struct bfa_cb_qe_s hcb_qe; /* comp: BFA callback qelem */
724 u32 addr_off; /* fru address offset */
725 struct bfa_mbox_cmd_s mb; /* mailbox */
726 struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */
727 struct bfa_mem_dma_s fru_dma;
728};
729
730#define BFA_FRU(__bfa) (&(__bfa)->modules.fru)
731#define BFA_MEM_FRU_DMA(__bfa) (&(BFA_FRU(__bfa)->fru_dma))
732
733bfa_status_t bfa_fruvpd_update(struct bfa_fru_s *fru,
734 void *buf, u32 len, u32 offset,
735 bfa_cb_fru_t cbfn, void *cbarg);
736bfa_status_t bfa_fruvpd_read(struct bfa_fru_s *fru,
737 void *buf, u32 len, u32 offset,
738 bfa_cb_fru_t cbfn, void *cbarg);
739bfa_status_t bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size);
740bfa_status_t bfa_tfru_write(struct bfa_fru_s *fru,
741 void *buf, u32 len, u32 offset,
742 bfa_cb_fru_t cbfn, void *cbarg);
743bfa_status_t bfa_tfru_read(struct bfa_fru_s *fru,
744 void *buf, u32 len, u32 offset,
745 bfa_cb_fru_t cbfn, void *cbarg);
746u32 bfa_fru_meminfo(bfa_boolean_t mincfg);
747void bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc,
748 void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
749void bfa_fru_memclaim(struct bfa_fru_s *fru,
750 u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
751void bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg);
752
753/*
705 * Driver Config( dconf) specific 754 * Driver Config( dconf) specific
706 */ 755 */
707#define BFI_DCONF_SIGNATURE 0xabcdabcd 756#define BFI_DCONF_SIGNATURE 0xabcdabcd
@@ -716,6 +765,7 @@ struct bfa_dconf_hdr_s {
716struct bfa_dconf_s { 765struct bfa_dconf_s {
717 struct bfa_dconf_hdr_s hdr; 766 struct bfa_dconf_hdr_s hdr;
718 struct bfa_lunmask_cfg_s lun_mask; 767 struct bfa_lunmask_cfg_s lun_mask;
768 struct bfa_throttle_cfg_s throttle_cfg;
719}; 769};
720#pragma pack() 770#pragma pack()
721 771
@@ -738,6 +788,8 @@ struct bfa_dconf_mod_s {
738#define bfa_dconf_read_data_valid(__bfa) \ 788#define bfa_dconf_read_data_valid(__bfa) \
739 (BFA_DCONF_MOD(__bfa)->read_data_valid) 789 (BFA_DCONF_MOD(__bfa)->read_data_valid)
740#define BFA_DCONF_UPDATE_TOV 5000 /* memtest timeout in msec */ 790#define BFA_DCONF_UPDATE_TOV 5000 /* memtest timeout in msec */
791#define bfa_dconf_get_min_cfg(__bfa) \
792 (BFA_DCONF_MOD(__bfa)->min_cfg)
741 793
742void bfa_dconf_modinit(struct bfa_s *bfa); 794void bfa_dconf_modinit(struct bfa_s *bfa);
743void bfa_dconf_modexit(struct bfa_s *bfa); 795void bfa_dconf_modexit(struct bfa_s *bfa);
@@ -761,7 +813,8 @@ bfa_status_t bfa_dconf_update(struct bfa_s *bfa);
761#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize) 813#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize)
762#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit) 814#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
763#define bfa_ioc_speed_sup(__ioc) \ 815#define bfa_ioc_speed_sup(__ioc) \
764 BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop) 816 ((bfa_ioc_is_cna(__ioc)) ? BFA_PORT_SPEED_10GBPS : \
817 BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop))
765#define bfa_ioc_get_nports(__ioc) \ 818#define bfa_ioc_get_nports(__ioc) \
766 BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop) 819 BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
767 820
@@ -885,12 +938,12 @@ bfa_status_t bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port,
885 enum bfa_mode_s mode, int max_pf, int max_vf, 938 enum bfa_mode_s mode, int max_pf, int max_vf,
886 bfa_ablk_cbfn_t cbfn, void *cbarg); 939 bfa_ablk_cbfn_t cbfn, void *cbarg);
887bfa_status_t bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn, 940bfa_status_t bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
888 u8 port, enum bfi_pcifn_class personality, int bw, 941 u8 port, enum bfi_pcifn_class personality,
889 bfa_ablk_cbfn_t cbfn, void *cbarg); 942 u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg);
890bfa_status_t bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn, 943bfa_status_t bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
891 bfa_ablk_cbfn_t cbfn, void *cbarg); 944 bfa_ablk_cbfn_t cbfn, void *cbarg);
892bfa_status_t bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw, 945bfa_status_t bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn,
893 bfa_ablk_cbfn_t cbfn, void *cbarg); 946 u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg);
894bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, 947bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk,
895 bfa_ablk_cbfn_t cbfn, void *cbarg); 948 bfa_ablk_cbfn_t cbfn, void *cbarg);
896bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, 949bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk,
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 2eb0c6a2938d..de4e726a1263 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -57,13 +57,6 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
57 u32 usecnt; 57 u32 usecnt;
58 struct bfi_ioc_image_hdr_s fwhdr; 58 struct bfi_ioc_image_hdr_s fwhdr;
59 59
60 /*
61 * If bios boot (flash based) -- do not increment usage count
62 */
63 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
64 BFA_IOC_FWIMG_MINSZ)
65 return BFA_TRUE;
66
67 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 60 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
68 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 61 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
69 62
@@ -115,13 +108,6 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
115 u32 usecnt; 108 u32 usecnt;
116 109
117 /* 110 /*
118 * If bios boot (flash based) -- do not decrement usage count
119 */
120 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
121 BFA_IOC_FWIMG_MINSZ)
122 return;
123
124 /*
125 * decrement usage count 111 * decrement usage count
126 */ 112 */
127 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 113 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
@@ -400,13 +386,12 @@ static void
400bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc) 386bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
401{ 387{
402 388
403 if (bfa_ioc_is_cna(ioc)) { 389 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
404 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 390 writel(0, ioc->ioc_regs.ioc_usage_reg);
405 writel(0, ioc->ioc_regs.ioc_usage_reg); 391 readl(ioc->ioc_regs.ioc_usage_sem_reg);
406 readl(ioc->ioc_regs.ioc_usage_sem_reg); 392 writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
407 writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
408 }
409 393
394 writel(0, ioc->ioc_regs.ioc_fail_sync);
410 /* 395 /*
411 * Read the hw sem reg to make sure that it is locked 396 * Read the hw sem reg to make sure that it is locked
412 * before we clear it. If it is not locked, writing 1 397 * before we clear it. If it is not locked, writing 1
@@ -759,25 +744,6 @@ bfa_ioc_ct2_mem_init(void __iomem *rb)
759void 744void
760bfa_ioc_ct2_mac_reset(void __iomem *rb) 745bfa_ioc_ct2_mac_reset(void __iomem *rb)
761{ 746{
762 u32 r32;
763
764 bfa_ioc_ct2_sclk_init(rb);
765 bfa_ioc_ct2_lclk_init(rb);
766
767 /*
768 * release soft reset on s_clk & l_clk
769 */
770 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
771 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
772 (rb + CT2_APP_PLL_SCLK_CTL_REG));
773
774 /*
775 * release soft reset on s_clk & l_clk
776 */
777 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
778 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
779 (rb + CT2_APP_PLL_LCLK_CTL_REG));
780
781 /* put port0, port1 MAC & AHB in reset */ 747 /* put port0, port1 MAC & AHB in reset */
782 writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), 748 writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
783 rb + CT2_CSI_MAC_CONTROL_REG(0)); 749 rb + CT2_CSI_MAC_CONTROL_REG(0));
@@ -785,8 +751,21 @@ bfa_ioc_ct2_mac_reset(void __iomem *rb)
785 rb + CT2_CSI_MAC_CONTROL_REG(1)); 751 rb + CT2_CSI_MAC_CONTROL_REG(1));
786} 752}
787 753
754static void
755bfa_ioc_ct2_enable_flash(void __iomem *rb)
756{
757 u32 r32;
758
759 r32 = readl((rb + PSS_GPIO_OUT_REG));
760 writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
761 r32 = readl((rb + PSS_GPIO_OE_REG));
762 writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
763}
764
788#define CT2_NFC_MAX_DELAY 1000 765#define CT2_NFC_MAX_DELAY 1000
789#define CT2_NFC_VER_VALID 0x143 766#define CT2_NFC_PAUSE_MAX_DELAY 4000
767#define CT2_NFC_VER_VALID 0x147
768#define CT2_NFC_STATE_RUNNING 0x20000001
790#define BFA_IOC_PLL_POLL 1000000 769#define BFA_IOC_PLL_POLL 1000000
791 770
792static bfa_boolean_t 771static bfa_boolean_t
@@ -802,6 +781,20 @@ bfa_ioc_ct2_nfc_halted(void __iomem *rb)
802} 781}
803 782
804static void 783static void
784bfa_ioc_ct2_nfc_halt(void __iomem *rb)
785{
786 int i;
787
788 writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
789 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
790 if (bfa_ioc_ct2_nfc_halted(rb))
791 break;
792 udelay(1000);
793 }
794 WARN_ON(!bfa_ioc_ct2_nfc_halted(rb));
795}
796
797static void
805bfa_ioc_ct2_nfc_resume(void __iomem *rb) 798bfa_ioc_ct2_nfc_resume(void __iomem *rb)
806{ 799{
807 u32 r32; 800 u32 r32;
@@ -817,105 +810,142 @@ bfa_ioc_ct2_nfc_resume(void __iomem *rb)
817 WARN_ON(1); 810 WARN_ON(1);
818} 811}
819 812
820bfa_status_t 813static void
821bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode) 814bfa_ioc_ct2_clk_reset(void __iomem *rb)
822{ 815{
823 u32 wgn, r32, nfc_ver, i; 816 u32 r32;
824 817
825 wgn = readl(rb + CT2_WGN_STATUS); 818 bfa_ioc_ct2_sclk_init(rb);
826 nfc_ver = readl(rb + CT2_RSC_GPR15_REG); 819 bfa_ioc_ct2_lclk_init(rb);
827 820
828 if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) && 821 /*
829 (nfc_ver >= CT2_NFC_VER_VALID)) { 822 * release soft reset on s_clk & l_clk
830 if (bfa_ioc_ct2_nfc_halted(rb)) 823 */
831 bfa_ioc_ct2_nfc_resume(rb); 824 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
825 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
826 (rb + CT2_APP_PLL_SCLK_CTL_REG));
832 827
833 writel(__RESET_AND_START_SCLK_LCLK_PLLS, 828 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
834 rb + CT2_CSI_FW_CTL_SET_REG); 829 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
830 (rb + CT2_APP_PLL_LCLK_CTL_REG));
835 831
836 for (i = 0; i < BFA_IOC_PLL_POLL; i++) { 832}
837 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
838 if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
839 break;
840 }
841 833
842 WARN_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)); 834static void
835bfa_ioc_ct2_nfc_clk_reset(void __iomem *rb)
836{
837 u32 r32, i;
843 838
844 for (i = 0; i < BFA_IOC_PLL_POLL; i++) { 839 r32 = readl((rb + PSS_CTL_REG));
845 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); 840 r32 |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
846 if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)) 841 writel(r32, (rb + PSS_CTL_REG));
847 break; 842
848 } 843 writel(__RESET_AND_START_SCLK_LCLK_PLLS, rb + CT2_CSI_FW_CTL_SET_REG);
849 844
850 WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); 845 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
846 r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
847
848 if ((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
849 break;
850 }
851 WARN_ON(!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
852
853 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
854 r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
855
856 if (!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
857 break;
858 }
859 WARN_ON((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
860
861 r32 = readl(rb + CT2_CSI_FW_CTL_REG);
862 WARN_ON((r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
863}
864
865static void
866bfa_ioc_ct2_wait_till_nfc_running(void __iomem *rb)
867{
868 u32 r32;
869 int i;
870
871 if (bfa_ioc_ct2_nfc_halted(rb))
872 bfa_ioc_ct2_nfc_resume(rb);
873 for (i = 0; i < CT2_NFC_PAUSE_MAX_DELAY; i++) {
874 r32 = readl(rb + CT2_NFC_STS_REG);
875 if (r32 == CT2_NFC_STATE_RUNNING)
876 return;
851 udelay(1000); 877 udelay(1000);
878 }
852 879
853 r32 = readl(rb + CT2_CSI_FW_CTL_REG); 880 r32 = readl(rb + CT2_NFC_STS_REG);
854 WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); 881 WARN_ON(!(r32 == CT2_NFC_STATE_RUNNING));
855 } else { 882}
856 writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
857 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
858 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
859 if (r32 & __NFC_CONTROLLER_HALTED)
860 break;
861 udelay(1000);
862 }
863 883
864 bfa_ioc_ct2_mac_reset(rb); 884bfa_status_t
865 bfa_ioc_ct2_sclk_init(rb); 885bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
866 bfa_ioc_ct2_lclk_init(rb); 886{
887 u32 wgn, r32, nfc_ver;
867 888
868 /* 889 wgn = readl(rb + CT2_WGN_STATUS);
869 * release soft reset on s_clk & l_clk
870 */
871 r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
872 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
873 (rb + CT2_APP_PLL_SCLK_CTL_REG));
874 890
891 if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
875 /* 892 /*
876 * release soft reset on s_clk & l_clk 893 * If flash is corrupted, enable flash explicitly
877 */ 894 */
878 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); 895 bfa_ioc_ct2_clk_reset(rb);
879 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, 896 bfa_ioc_ct2_enable_flash(rb);
880 (rb + CT2_APP_PLL_LCLK_CTL_REG));
881 }
882 897
883 /* 898 bfa_ioc_ct2_mac_reset(rb);
884 * Announce flash device presence, if flash was corrupted. 899
885 */ 900 bfa_ioc_ct2_clk_reset(rb);
886 if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { 901 bfa_ioc_ct2_enable_flash(rb);
887 r32 = readl(rb + PSS_GPIO_OUT_REG); 902
888 writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG)); 903 } else {
889 r32 = readl(rb + PSS_GPIO_OE_REG); 904 nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
890 writel(r32 | 1, (rb + PSS_GPIO_OE_REG)); 905
906 if ((nfc_ver >= CT2_NFC_VER_VALID) &&
907 (wgn == (__A2T_AHB_LOAD | __WGN_READY))) {
908
909 bfa_ioc_ct2_wait_till_nfc_running(rb);
910
911 bfa_ioc_ct2_nfc_clk_reset(rb);
912 } else {
913 bfa_ioc_ct2_nfc_halt(rb);
914
915 bfa_ioc_ct2_clk_reset(rb);
916 bfa_ioc_ct2_mac_reset(rb);
917 bfa_ioc_ct2_clk_reset(rb);
918
919 }
891 } 920 }
892 921
893 /* 922 /*
894 * Mask the interrupts and clear any 923 * Mask the interrupts and clear any
895 * pending interrupts. 924 * pending interrupts left by BIOS/EFI
896 */ 925 */
926
897 writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); 927 writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
898 writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); 928 writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
899 929
900 /* For first time initialization, no need to clear interrupts */ 930 /* For first time initialization, no need to clear interrupts */
901 r32 = readl(rb + HOST_SEM5_REG); 931 r32 = readl(rb + HOST_SEM5_REG);
902 if (r32 & 0x1) { 932 if (r32 & 0x1) {
903 r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT); 933 r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
904 if (r32 == 1) { 934 if (r32 == 1) {
905 writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT); 935 writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
906 readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); 936 readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
907 } 937 }
908 r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT); 938 r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
909 if (r32 == 1) { 939 if (r32 == 1) {
910 writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT); 940 writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
911 readl(rb + CT2_LPU1_HOSTFN_CMD_STAT); 941 readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
912 } 942 }
913 } 943 }
914 944
915 bfa_ioc_ct2_mem_init(rb); 945 bfa_ioc_ct2_mem_init(rb);
916 946
917 writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG); 947 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
918 writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG); 948 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
919 949
920 return BFA_STATUS_OK; 950 return BFA_STATUS_OK;
921} 951}
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
index 189fff71e3c2..a14c784ff3fc 100644
--- a/drivers/scsi/bfa/bfa_modules.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -45,6 +45,7 @@ struct bfa_modules_s {
45 struct bfa_diag_s diag_mod; /* diagnostics module */ 45 struct bfa_diag_s diag_mod; /* diagnostics module */
46 struct bfa_phy_s phy; /* phy module */ 46 struct bfa_phy_s phy; /* phy module */
47 struct bfa_dconf_mod_s dconf_mod; /* DCONF common module */ 47 struct bfa_dconf_mod_s dconf_mod; /* DCONF common module */
48 struct bfa_fru_s fru; /* fru module */
48}; 49};
49 50
50/* 51/*
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index 95e4ad8759ac..8ea7697deb9b 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -250,6 +250,12 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
250 return BFA_STATUS_IOC_FAILURE; 250 return BFA_STATUS_IOC_FAILURE;
251 } 251 }
252 252
253 /* if port is d-port enabled, return error */
254 if (port->dport_enabled) {
255 bfa_trc(port, BFA_STATUS_DPORT_ERR);
256 return BFA_STATUS_DPORT_ERR;
257 }
258
253 if (port->endis_pending) { 259 if (port->endis_pending) {
254 bfa_trc(port, BFA_STATUS_DEVBUSY); 260 bfa_trc(port, BFA_STATUS_DEVBUSY);
255 return BFA_STATUS_DEVBUSY; 261 return BFA_STATUS_DEVBUSY;
@@ -300,6 +306,12 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
300 return BFA_STATUS_IOC_FAILURE; 306 return BFA_STATUS_IOC_FAILURE;
301 } 307 }
302 308
309 /* if port is d-port enabled, return error */
310 if (port->dport_enabled) {
311 bfa_trc(port, BFA_STATUS_DPORT_ERR);
312 return BFA_STATUS_DPORT_ERR;
313 }
314
303 if (port->endis_pending) { 315 if (port->endis_pending) {
304 bfa_trc(port, BFA_STATUS_DEVBUSY); 316 bfa_trc(port, BFA_STATUS_DEVBUSY);
305 return BFA_STATUS_DEVBUSY; 317 return BFA_STATUS_DEVBUSY;
@@ -431,6 +443,10 @@ bfa_port_notify(void *arg, enum bfa_ioc_event_e event)
431 port->endis_cbfn = NULL; 443 port->endis_cbfn = NULL;
432 port->endis_pending = BFA_FALSE; 444 port->endis_pending = BFA_FALSE;
433 } 445 }
446
447 /* clear D-port mode */
448 if (port->dport_enabled)
449 bfa_port_set_dportenabled(port, BFA_FALSE);
434 break; 450 break;
435 default: 451 default:
436 break; 452 break;
@@ -467,6 +483,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
467 port->stats_cbfn = NULL; 483 port->stats_cbfn = NULL;
468 port->endis_cbfn = NULL; 484 port->endis_cbfn = NULL;
469 port->pbc_disabled = BFA_FALSE; 485 port->pbc_disabled = BFA_FALSE;
486 port->dport_enabled = BFA_FALSE;
470 487
471 bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port); 488 bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port);
472 bfa_q_qe_init(&port->ioc_notify); 489 bfa_q_qe_init(&port->ioc_notify);
@@ -483,6 +500,21 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
483} 500}
484 501
485/* 502/*
503 * bfa_port_set_dportenabled();
504 *
505 * Port module- set pbc disabled flag
506 *
507 * @param[in] port - Pointer to the Port module data structure
508 *
509 * @return void
510 */
511void
512bfa_port_set_dportenabled(struct bfa_port_s *port, bfa_boolean_t enabled)
513{
514 port->dport_enabled = enabled;
515}
516
517/*
486 * CEE module specific definitions 518 * CEE module specific definitions
487 */ 519 */
488 520
diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h
index 947f897328d6..2fcab6bc6280 100644
--- a/drivers/scsi/bfa/bfa_port.h
+++ b/drivers/scsi/bfa/bfa_port.h
@@ -45,6 +45,7 @@ struct bfa_port_s {
45 bfa_status_t endis_status; 45 bfa_status_t endis_status;
46 struct bfa_ioc_notify_s ioc_notify; 46 struct bfa_ioc_notify_s ioc_notify;
47 bfa_boolean_t pbc_disabled; 47 bfa_boolean_t pbc_disabled;
48 bfa_boolean_t dport_enabled;
48 struct bfa_mem_dma_s port_dma; 49 struct bfa_mem_dma_s port_dma;
49}; 50};
50 51
@@ -66,6 +67,8 @@ bfa_status_t bfa_port_disable(struct bfa_port_s *port,
66u32 bfa_port_meminfo(void); 67u32 bfa_port_meminfo(void);
67void bfa_port_mem_claim(struct bfa_port_s *port, 68void bfa_port_mem_claim(struct bfa_port_s *port,
68 u8 *dma_kva, u64 dma_pa); 69 u8 *dma_kva, u64 dma_pa);
70void bfa_port_set_dportenabled(struct bfa_port_s *port,
71 bfa_boolean_t enabled);
69 72
70/* 73/*
71 * CEE declaration 74 * CEE declaration
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index b2538d60db34..299c1c889b33 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -67,6 +67,9 @@ enum bfa_fcport_sm_event {
67 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */ 67 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
68 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */ 68 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
69 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */ 69 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
70 BFA_FCPORT_SM_DPORTENABLE = 10, /* enable dport */
71 BFA_FCPORT_SM_DPORTDISABLE = 11,/* disable dport */
72 BFA_FCPORT_SM_FAA_MISCONFIG = 12, /* FAA misconfiguratin */
70}; 73};
71 74
72/* 75/*
@@ -197,6 +200,10 @@ static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
197 enum bfa_fcport_sm_event event); 200 enum bfa_fcport_sm_event event);
198static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, 201static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
199 enum bfa_fcport_sm_event event); 202 enum bfa_fcport_sm_event event);
203static void bfa_fcport_sm_dport(struct bfa_fcport_s *fcport,
204 enum bfa_fcport_sm_event event);
205static void bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
206 enum bfa_fcport_sm_event event);
200 207
201static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, 208static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
202 enum bfa_fcport_ln_sm_event event); 209 enum bfa_fcport_ln_sm_event event);
@@ -226,6 +233,8 @@ static struct bfa_sm_table_s hal_port_sm_table[] = {
226 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED}, 233 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
227 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN}, 234 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
228 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN}, 235 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
236 {BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT},
237 {BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG},
229}; 238};
230 239
231 240
@@ -1244,6 +1253,12 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1244 * Just ignore 1253 * Just ignore
1245 */ 1254 */
1246 break; 1255 break;
1256 case BFA_LPS_SM_SET_N2N_PID:
1257 /*
1258 * When topology is set to loop, bfa_lps_set_n2n_pid() sends
1259 * this event. Ignore this event.
1260 */
1261 break;
1247 1262
1248 default: 1263 default:
1249 bfa_sm_fault(lps->bfa, event); 1264 bfa_sm_fault(lps->bfa, event);
@@ -1261,6 +1276,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1261 1276
1262 switch (event) { 1277 switch (event) {
1263 case BFA_LPS_SM_FWRSP: 1278 case BFA_LPS_SM_FWRSP:
1279 case BFA_LPS_SM_OFFLINE:
1264 if (lps->status == BFA_STATUS_OK) { 1280 if (lps->status == BFA_STATUS_OK) {
1265 bfa_sm_set_state(lps, bfa_lps_sm_online); 1281 bfa_sm_set_state(lps, bfa_lps_sm_online);
1266 if (lps->fdisc) 1282 if (lps->fdisc)
@@ -1289,7 +1305,6 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1289 bfa_lps_login_comp(lps); 1305 bfa_lps_login_comp(lps);
1290 break; 1306 break;
1291 1307
1292 case BFA_LPS_SM_OFFLINE:
1293 case BFA_LPS_SM_DELETE: 1308 case BFA_LPS_SM_DELETE:
1294 bfa_sm_set_state(lps, bfa_lps_sm_init); 1309 bfa_sm_set_state(lps, bfa_lps_sm_init);
1295 break; 1310 break;
@@ -2169,6 +2184,12 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2169 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); 2184 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2170 break; 2185 break;
2171 2186
2187 case BFA_FCPORT_SM_FAA_MISCONFIG:
2188 bfa_fcport_reset_linkinfo(fcport);
2189 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2190 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2191 break;
2192
2172 default: 2193 default:
2173 bfa_sm_fault(fcport->bfa, event); 2194 bfa_sm_fault(fcport->bfa, event);
2174 } 2195 }
@@ -2225,6 +2246,12 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2225 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); 2246 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2226 break; 2247 break;
2227 2248
2249 case BFA_FCPORT_SM_FAA_MISCONFIG:
2250 bfa_fcport_reset_linkinfo(fcport);
2251 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2252 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2253 break;
2254
2228 default: 2255 default:
2229 bfa_sm_fault(fcport->bfa, event); 2256 bfa_sm_fault(fcport->bfa, event);
2230 } 2257 }
@@ -2250,11 +2277,11 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2250 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { 2277 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2251 2278
2252 bfa_trc(fcport->bfa, 2279 bfa_trc(fcport->bfa,
2253 pevent->link_state.vc_fcf.fcf.fipenabled); 2280 pevent->link_state.attr.vc_fcf.fcf.fipenabled);
2254 bfa_trc(fcport->bfa, 2281 bfa_trc(fcport->bfa,
2255 pevent->link_state.vc_fcf.fcf.fipfailed); 2282 pevent->link_state.attr.vc_fcf.fcf.fipfailed);
2256 2283
2257 if (pevent->link_state.vc_fcf.fcf.fipfailed) 2284 if (pevent->link_state.attr.vc_fcf.fcf.fipfailed)
2258 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2285 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2259 BFA_PL_EID_FIP_FCF_DISC, 0, 2286 BFA_PL_EID_FIP_FCF_DISC, 0,
2260 "FIP FCF Discovery Failed"); 2287 "FIP FCF Discovery Failed");
@@ -2311,6 +2338,12 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2311 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); 2338 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2312 break; 2339 break;
2313 2340
2341 case BFA_FCPORT_SM_FAA_MISCONFIG:
2342 bfa_fcport_reset_linkinfo(fcport);
2343 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2344 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2345 break;
2346
2314 default: 2347 default:
2315 bfa_sm_fault(fcport->bfa, event); 2348 bfa_sm_fault(fcport->bfa, event);
2316 } 2349 }
@@ -2404,6 +2437,12 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2404 } 2437 }
2405 break; 2438 break;
2406 2439
2440 case BFA_FCPORT_SM_FAA_MISCONFIG:
2441 bfa_fcport_reset_linkinfo(fcport);
2442 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2443 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2444 break;
2445
2407 default: 2446 default:
2408 bfa_sm_fault(fcport->bfa, event); 2447 bfa_sm_fault(fcport->bfa, event);
2409 } 2448 }
@@ -2449,6 +2488,12 @@ bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2449 bfa_reqq_wcancel(&fcport->reqq_wait); 2488 bfa_reqq_wcancel(&fcport->reqq_wait);
2450 break; 2489 break;
2451 2490
2491 case BFA_FCPORT_SM_FAA_MISCONFIG:
2492 bfa_fcport_reset_linkinfo(fcport);
2493 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2494 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2495 break;
2496
2452 default: 2497 default:
2453 bfa_sm_fault(fcport->bfa, event); 2498 bfa_sm_fault(fcport->bfa, event);
2454 } 2499 }
@@ -2600,6 +2645,10 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2600 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); 2645 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2601 break; 2646 break;
2602 2647
2648 case BFA_FCPORT_SM_DPORTENABLE:
2649 bfa_sm_set_state(fcport, bfa_fcport_sm_dport);
2650 break;
2651
2603 default: 2652 default:
2604 bfa_sm_fault(fcport->bfa, event); 2653 bfa_sm_fault(fcport->bfa, event);
2605 } 2654 }
@@ -2680,6 +2729,81 @@ bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2680 } 2729 }
2681} 2730}
2682 2731
2732static void
2733bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event)
2734{
2735 bfa_trc(fcport->bfa, event);
2736
2737 switch (event) {
2738 case BFA_FCPORT_SM_DPORTENABLE:
2739 case BFA_FCPORT_SM_DISABLE:
2740 case BFA_FCPORT_SM_ENABLE:
2741 case BFA_FCPORT_SM_START:
2742 /*
2743 * Ignore event for a port that is dport
2744 */
2745 break;
2746
2747 case BFA_FCPORT_SM_STOP:
2748 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2749 break;
2750
2751 case BFA_FCPORT_SM_HWFAIL:
2752 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2753 break;
2754
2755 case BFA_FCPORT_SM_DPORTDISABLE:
2756 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2757 break;
2758
2759 default:
2760 bfa_sm_fault(fcport->bfa, event);
2761 }
2762}
2763
2764static void
2765bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
2766 enum bfa_fcport_sm_event event)
2767{
2768 bfa_trc(fcport->bfa, event);
2769
2770 switch (event) {
2771 case BFA_FCPORT_SM_DPORTENABLE:
2772 case BFA_FCPORT_SM_ENABLE:
2773 case BFA_FCPORT_SM_START:
2774 /*
2775 * Ignore event for a port as there is FAA misconfig
2776 */
2777 break;
2778
2779 case BFA_FCPORT_SM_DISABLE:
2780 if (bfa_fcport_send_disable(fcport))
2781 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2782 else
2783 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2784
2785 bfa_fcport_reset_linkinfo(fcport);
2786 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2787 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2788 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2789 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2790 break;
2791
2792 case BFA_FCPORT_SM_STOP:
2793 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2794 break;
2795
2796 case BFA_FCPORT_SM_HWFAIL:
2797 bfa_fcport_reset_linkinfo(fcport);
2798 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2799 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2800 break;
2801
2802 default:
2803 bfa_sm_fault(fcport->bfa, event);
2804 }
2805}
2806
2683/* 2807/*
2684 * Link state is down 2808 * Link state is down
2685 */ 2809 */
@@ -2943,6 +3067,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2943 */ 3067 */
2944 do_gettimeofday(&tv); 3068 do_gettimeofday(&tv);
2945 fcport->stats_reset_time = tv.tv_sec; 3069 fcport->stats_reset_time = tv.tv_sec;
3070 fcport->stats_dma_ready = BFA_FALSE;
2946 3071
2947 /* 3072 /*
2948 * initialize and set default configuration 3073 * initialize and set default configuration
@@ -2953,6 +3078,9 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2953 port_cfg->maxfrsize = 0; 3078 port_cfg->maxfrsize = 0;
2954 3079
2955 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS; 3080 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
3081 port_cfg->qos_bw.high = BFA_QOS_BW_HIGH;
3082 port_cfg->qos_bw.med = BFA_QOS_BW_MED;
3083 port_cfg->qos_bw.low = BFA_QOS_BW_LOW;
2956 3084
2957 INIT_LIST_HEAD(&fcport->stats_pending_q); 3085 INIT_LIST_HEAD(&fcport->stats_pending_q);
2958 INIT_LIST_HEAD(&fcport->statsclr_pending_q); 3086 INIT_LIST_HEAD(&fcport->statsclr_pending_q);
@@ -2996,6 +3124,21 @@ bfa_fcport_iocdisable(struct bfa_s *bfa)
2996 bfa_trunk_iocdisable(bfa); 3124 bfa_trunk_iocdisable(bfa);
2997} 3125}
2998 3126
3127/*
3128 * Update loop info in fcport for SCN online
3129 */
3130static void
3131bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport,
3132 struct bfa_fcport_loop_info_s *loop_info)
3133{
3134 fcport->myalpa = loop_info->myalpa;
3135 fcport->alpabm_valid =
3136 loop_info->alpabm_val;
3137 memcpy(fcport->alpabm.alpa_bm,
3138 loop_info->alpabm.alpa_bm,
3139 sizeof(struct fc_alpabm_s));
3140}
3141
2999static void 3142static void
3000bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport) 3143bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
3001{ 3144{
@@ -3005,12 +3148,15 @@ bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
3005 fcport->speed = pevent->link_state.speed; 3148 fcport->speed = pevent->link_state.speed;
3006 fcport->topology = pevent->link_state.topology; 3149 fcport->topology = pevent->link_state.topology;
3007 3150
3008 if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) 3151 if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) {
3009 fcport->myalpa = 0; 3152 bfa_fcport_update_loop_info(fcport,
3153 &pevent->link_state.attr.loop_info);
3154 return;
3155 }
3010 3156
3011 /* QoS Details */ 3157 /* QoS Details */
3012 fcport->qos_attr = pevent->link_state.qos_attr; 3158 fcport->qos_attr = pevent->link_state.qos_attr;
3013 fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr; 3159 fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr;
3014 3160
3015 /* 3161 /*
3016 * update trunk state if applicable 3162 * update trunk state if applicable
@@ -3019,7 +3165,8 @@ bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
3019 trunk->attr.state = BFA_TRUNK_DISABLED; 3165 trunk->attr.state = BFA_TRUNK_DISABLED;
3020 3166
3021 /* update FCoE specific */ 3167 /* update FCoE specific */
3022 fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan); 3168 fcport->fcoe_vlan =
3169 be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan);
3023 3170
3024 bfa_trc(fcport->bfa, fcport->speed); 3171 bfa_trc(fcport->bfa, fcport->speed);
3025 bfa_trc(fcport->bfa, fcport->topology); 3172 bfa_trc(fcport->bfa, fcport->topology);
@@ -3453,6 +3600,7 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3453 case BFI_FCPORT_I2H_ENABLE_RSP: 3600 case BFI_FCPORT_I2H_ENABLE_RSP:
3454 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) { 3601 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3455 3602
3603 fcport->stats_dma_ready = BFA_TRUE;
3456 if (fcport->use_flash_cfg) { 3604 if (fcport->use_flash_cfg) {
3457 fcport->cfg = i2hmsg.penable_rsp->port_cfg; 3605 fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3458 fcport->cfg.maxfrsize = 3606 fcport->cfg.maxfrsize =
@@ -3468,6 +3616,8 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3468 else 3616 else
3469 fcport->trunk.attr.state = 3617 fcport->trunk.attr.state =
3470 BFA_TRUNK_DISABLED; 3618 BFA_TRUNK_DISABLED;
3619 fcport->qos_attr.qos_bw =
3620 i2hmsg.penable_rsp->port_cfg.qos_bw;
3471 fcport->use_flash_cfg = BFA_FALSE; 3621 fcport->use_flash_cfg = BFA_FALSE;
3472 } 3622 }
3473 3623
@@ -3476,6 +3626,9 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3476 else 3626 else
3477 fcport->qos_attr.state = BFA_QOS_DISABLED; 3627 fcport->qos_attr.state = BFA_QOS_DISABLED;
3478 3628
3629 fcport->qos_attr.qos_bw_op =
3630 i2hmsg.penable_rsp->port_cfg.qos_bw;
3631
3479 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); 3632 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3480 } 3633 }
3481 break; 3634 break;
@@ -3488,8 +3641,17 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3488 case BFI_FCPORT_I2H_EVENT: 3641 case BFI_FCPORT_I2H_EVENT:
3489 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP) 3642 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3490 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP); 3643 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3491 else 3644 else {
3492 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN); 3645 if (i2hmsg.event->link_state.linkstate_rsn ==
3646 BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG)
3647 bfa_sm_send_event(fcport,
3648 BFA_FCPORT_SM_FAA_MISCONFIG);
3649 else
3650 bfa_sm_send_event(fcport,
3651 BFA_FCPORT_SM_LINKDOWN);
3652 }
3653 fcport->qos_attr.qos_bw_op =
3654 i2hmsg.event->link_state.qos_attr.qos_bw_op;
3493 break; 3655 break;
3494 3656
3495 case BFI_FCPORT_I2H_TRUNK_SCN: 3657 case BFI_FCPORT_I2H_TRUNK_SCN:
@@ -3609,6 +3771,9 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3609 3771
3610 if (fcport->cfg.trunked == BFA_TRUE) 3772 if (fcport->cfg.trunked == BFA_TRUE)
3611 return BFA_STATUS_TRUNK_ENABLED; 3773 return BFA_STATUS_TRUNK_ENABLED;
3774 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
3775 (speed == BFA_PORT_SPEED_16GBPS))
3776 return BFA_STATUS_UNSUPP_SPEED;
3612 if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) { 3777 if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3613 bfa_trc(bfa, fcport->speed_sup); 3778 bfa_trc(bfa, fcport->speed_sup);
3614 return BFA_STATUS_UNSUPP_SPEED; 3779 return BFA_STATUS_UNSUPP_SPEED;
@@ -3663,7 +3828,26 @@ bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3663 3828
3664 switch (topology) { 3829 switch (topology) {
3665 case BFA_PORT_TOPOLOGY_P2P: 3830 case BFA_PORT_TOPOLOGY_P2P:
3831 break;
3832
3666 case BFA_PORT_TOPOLOGY_LOOP: 3833 case BFA_PORT_TOPOLOGY_LOOP:
3834 if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) ||
3835 (fcport->qos_attr.state != BFA_QOS_DISABLED))
3836 return BFA_STATUS_ERROR_QOS_ENABLED;
3837 if (fcport->cfg.ratelimit != BFA_FALSE)
3838 return BFA_STATUS_ERROR_TRL_ENABLED;
3839 if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) ||
3840 (fcport->trunk.attr.state != BFA_TRUNK_DISABLED))
3841 return BFA_STATUS_ERROR_TRUNK_ENABLED;
3842 if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) ||
3843 (fcport->cfg.speed == BFA_PORT_SPEED_16GBPS))
3844 return BFA_STATUS_UNSUPP_SPEED;
3845 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type))
3846 return BFA_STATUS_LOOP_UNSUPP_MEZZ;
3847 if (bfa_fcport_is_dport(bfa) != BFA_FALSE)
3848 return BFA_STATUS_DPORT_ERR;
3849 break;
3850
3667 case BFA_PORT_TOPOLOGY_AUTO: 3851 case BFA_PORT_TOPOLOGY_AUTO:
3668 break; 3852 break;
3669 3853
@@ -3686,6 +3870,17 @@ bfa_fcport_get_topology(struct bfa_s *bfa)
3686 return fcport->topology; 3870 return fcport->topology;
3687} 3871}
3688 3872
3873/**
3874 * Get config topology.
3875 */
3876enum bfa_port_topology
3877bfa_fcport_get_cfg_topology(struct bfa_s *bfa)
3878{
3879 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3880
3881 return fcport->cfg.topology;
3882}
3883
3689bfa_status_t 3884bfa_status_t
3690bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa) 3885bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3691{ 3886{
@@ -3761,9 +3956,11 @@ bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3761u8 3956u8
3762bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa) 3957bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3763{ 3958{
3764 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3959 if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP)
3960 return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit;
3765 3961
3766 return fcport->cfg.rx_bbcredit; 3962 else
3963 return 0;
3767} 3964}
3768 3965
3769void 3966void
@@ -3850,8 +4047,9 @@ bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
3850{ 4047{
3851 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 4048 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3852 4049
3853 if (bfa_ioc_is_disabled(&bfa->ioc)) 4050 if (!bfa_iocfc_is_operational(bfa) ||
3854 return BFA_STATUS_IOC_DISABLED; 4051 !fcport->stats_dma_ready)
4052 return BFA_STATUS_IOC_NON_OP;
3855 4053
3856 if (!list_empty(&fcport->statsclr_pending_q)) 4054 if (!list_empty(&fcport->statsclr_pending_q))
3857 return BFA_STATUS_DEVBUSY; 4055 return BFA_STATUS_DEVBUSY;
@@ -3876,6 +4074,10 @@ bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
3876{ 4074{
3877 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 4075 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3878 4076
4077 if (!bfa_iocfc_is_operational(bfa) ||
4078 !fcport->stats_dma_ready)
4079 return BFA_STATUS_IOC_NON_OP;
4080
3879 if (!list_empty(&fcport->stats_pending_q)) 4081 if (!list_empty(&fcport->stats_pending_q))
3880 return BFA_STATUS_DEVBUSY; 4082 return BFA_STATUS_DEVBUSY;
3881 4083
@@ -3905,6 +4107,40 @@ bfa_fcport_is_disabled(struct bfa_s *bfa)
3905} 4107}
3906 4108
3907bfa_boolean_t 4109bfa_boolean_t
4110bfa_fcport_is_dport(struct bfa_s *bfa)
4111{
4112 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4113
4114 return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4115 BFA_PORT_ST_DPORT);
4116}
4117
4118bfa_status_t
4119bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw)
4120{
4121 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4122 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
4123
4124 bfa_trc(bfa, ioc_type);
4125
4126 if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0))
4127 return BFA_STATUS_QOS_BW_INVALID;
4128
4129 if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100)
4130 return BFA_STATUS_QOS_BW_INVALID;
4131
4132 if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) ||
4133 (qos_bw->low > qos_bw->high))
4134 return BFA_STATUS_QOS_BW_INVALID;
4135
4136 if ((ioc_type == BFA_IOC_TYPE_FC) &&
4137 (fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP))
4138 fcport->cfg.qos_bw = *qos_bw;
4139
4140 return BFA_STATUS_OK;
4141}
4142
4143bfa_boolean_t
3908bfa_fcport_is_ratelim(struct bfa_s *bfa) 4144bfa_fcport_is_ratelim(struct bfa_s *bfa)
3909{ 4145{
3910 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 4146 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
@@ -3981,6 +4217,26 @@ bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
3981 return fcport->cfg.trunked; 4217 return fcport->cfg.trunked;
3982} 4218}
3983 4219
4220void
4221bfa_fcport_dportenable(struct bfa_s *bfa)
4222{
4223 /*
4224 * Assume caller check for port is in disable state
4225 */
4226 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE);
4227 bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE);
4228}
4229
4230void
4231bfa_fcport_dportdisable(struct bfa_s *bfa)
4232{
4233 /*
4234 * Assume caller check for port is in disable state
4235 */
4236 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE);
4237 bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
4238}
4239
3984/* 4240/*
3985 * Rport State machine functions 4241 * Rport State machine functions
3986 */ 4242 */
@@ -4707,6 +4963,21 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4707 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN); 4963 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4708 break; 4964 break;
4709 4965
4966 case BFI_RPORT_I2H_LIP_SCN_ONLINE:
4967 bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa),
4968 &msg.lip_scn->loop_info);
4969 bfa_cb_rport_scn_online(bfa);
4970 break;
4971
4972 case BFI_RPORT_I2H_LIP_SCN_OFFLINE:
4973 bfa_cb_rport_scn_offline(bfa);
4974 break;
4975
4976 case BFI_RPORT_I2H_NO_DEV:
4977 rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle);
4978 bfa_cb_rport_scn_no_dev(rp->rport_drv);
4979 break;
4980
4710 default: 4981 default:
4711 bfa_trc(bfa, m->mhdr.msg_id); 4982 bfa_trc(bfa, m->mhdr.msg_id);
4712 WARN_ON(1); 4983 WARN_ON(1);
@@ -5348,6 +5619,37 @@ bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
5348} 5619}
5349 5620
5350/* 5621/*
5622 * Dport forward declaration
5623 */
5624
5625/*
5626 * BFA DPORT state machine events
5627 */
5628enum bfa_dport_sm_event {
5629 BFA_DPORT_SM_ENABLE = 1, /* dport enable event */
5630 BFA_DPORT_SM_DISABLE = 2, /* dport disable event */
5631 BFA_DPORT_SM_FWRSP = 3, /* fw enable/disable rsp */
5632 BFA_DPORT_SM_QRESUME = 4, /* CQ space available */
5633 BFA_DPORT_SM_HWFAIL = 5, /* IOC h/w failure */
5634};
5635
5636static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
5637 enum bfa_dport_sm_event event);
5638static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
5639 enum bfa_dport_sm_event event);
5640static void bfa_dport_sm_enabling(struct bfa_dport_s *dport,
5641 enum bfa_dport_sm_event event);
5642static void bfa_dport_sm_enabled(struct bfa_dport_s *dport,
5643 enum bfa_dport_sm_event event);
5644static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
5645 enum bfa_dport_sm_event event);
5646static void bfa_dport_sm_disabling(struct bfa_dport_s *dport,
5647 enum bfa_dport_sm_event event);
5648static void bfa_dport_qresume(void *cbarg);
5649static void bfa_dport_req_comp(struct bfa_dport_s *dport,
5650 bfi_diag_dport_rsp_t *msg);
5651
5652/*
5351 * BFA fcdiag module 5653 * BFA fcdiag module
5352 */ 5654 */
5353#define BFA_DIAG_QTEST_TOV 1000 /* msec */ 5655#define BFA_DIAG_QTEST_TOV 1000 /* msec */
@@ -5377,15 +5679,24 @@ bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5377 struct bfa_pcidev_s *pcidev) 5679 struct bfa_pcidev_s *pcidev)
5378{ 5680{
5379 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); 5681 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5682 struct bfa_dport_s *dport = &fcdiag->dport;
5683
5380 fcdiag->bfa = bfa; 5684 fcdiag->bfa = bfa;
5381 fcdiag->trcmod = bfa->trcmod; 5685 fcdiag->trcmod = bfa->trcmod;
5382 /* The common DIAG attach bfa_diag_attach() will do all memory claim */ 5686 /* The common DIAG attach bfa_diag_attach() will do all memory claim */
5687 dport->bfa = bfa;
5688 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
5689 bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport);
5690 dport->cbfn = NULL;
5691 dport->cbarg = NULL;
5383} 5692}
5384 5693
5385static void 5694static void
5386bfa_fcdiag_iocdisable(struct bfa_s *bfa) 5695bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5387{ 5696{
5388 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); 5697 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5698 struct bfa_dport_s *dport = &fcdiag->dport;
5699
5389 bfa_trc(fcdiag, fcdiag->lb.lock); 5700 bfa_trc(fcdiag, fcdiag->lb.lock);
5390 if (fcdiag->lb.lock) { 5701 if (fcdiag->lb.lock) {
5391 fcdiag->lb.status = BFA_STATUS_IOC_FAILURE; 5702 fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
@@ -5393,6 +5704,8 @@ bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5393 fcdiag->lb.lock = 0; 5704 fcdiag->lb.lock = 0;
5394 bfa_fcdiag_set_busy_status(fcdiag); 5705 bfa_fcdiag_set_busy_status(fcdiag);
5395 } 5706 }
5707
5708 bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL);
5396} 5709}
5397 5710
5398static void 5711static void
@@ -5577,6 +5890,9 @@ bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5577 case BFI_DIAG_I2H_QTEST: 5890 case BFI_DIAG_I2H_QTEST:
5578 bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg); 5891 bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
5579 break; 5892 break;
5893 case BFI_DIAG_I2H_DPORT:
5894 bfa_dport_req_comp(&fcdiag->dport, (bfi_diag_dport_rsp_t *)msg);
5895 break;
5580 default: 5896 default:
5581 bfa_trc(fcdiag, msg->mhdr.msg_id); 5897 bfa_trc(fcdiag, msg->mhdr.msg_id);
5582 WARN_ON(1); 5898 WARN_ON(1);
@@ -5646,12 +5962,18 @@ bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
5646 } 5962 }
5647 } 5963 }
5648 5964
5965 /*
5966 * For CT2, 1G is not supported
5967 */
5968 if ((speed == BFA_PORT_SPEED_1GBPS) &&
5969 (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) {
5970 bfa_trc(fcdiag, speed);
5971 return BFA_STATUS_UNSUPP_SPEED;
5972 }
5973
5649 /* For Mezz card, port speed entered needs to be checked */ 5974 /* For Mezz card, port speed entered needs to be checked */
5650 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) { 5975 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
5651 if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) { 5976 if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5652 if ((speed == BFA_PORT_SPEED_1GBPS) &&
5653 (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
5654 return BFA_STATUS_UNSUPP_SPEED;
5655 if (!(speed == BFA_PORT_SPEED_1GBPS || 5977 if (!(speed == BFA_PORT_SPEED_1GBPS ||
5656 speed == BFA_PORT_SPEED_2GBPS || 5978 speed == BFA_PORT_SPEED_2GBPS ||
5657 speed == BFA_PORT_SPEED_4GBPS || 5979 speed == BFA_PORT_SPEED_4GBPS ||
@@ -5764,3 +6086,379 @@ bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
5764 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); 6086 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5765 return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK; 6087 return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
5766} 6088}
6089
6090/*
6091 * D-port
6092 */
6093static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
6094 enum bfi_dport_req req);
6095static void
6096bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status)
6097{
6098 if (dport->cbfn != NULL) {
6099 dport->cbfn(dport->cbarg, bfa_status);
6100 dport->cbfn = NULL;
6101 dport->cbarg = NULL;
6102 }
6103}
6104
6105static void
6106bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6107{
6108 bfa_trc(dport->bfa, event);
6109
6110 switch (event) {
6111 case BFA_DPORT_SM_ENABLE:
6112 bfa_fcport_dportenable(dport->bfa);
6113 if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE))
6114 bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6115 else
6116 bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait);
6117 break;
6118
6119 case BFA_DPORT_SM_DISABLE:
6120 /* Already disabled */
6121 break;
6122
6123 case BFA_DPORT_SM_HWFAIL:
6124 /* ignore */
6125 break;
6126
6127 default:
6128 bfa_sm_fault(dport->bfa, event);
6129 }
6130}
6131
6132static void
6133bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
6134 enum bfa_dport_sm_event event)
6135{
6136 bfa_trc(dport->bfa, event);
6137
6138 switch (event) {
6139 case BFA_DPORT_SM_QRESUME:
6140 bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6141 bfa_dport_send_req(dport, BFI_DPORT_ENABLE);
6142 break;
6143
6144 case BFA_DPORT_SM_HWFAIL:
6145 bfa_reqq_wcancel(&dport->reqq_wait);
6146 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6147 bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6148 break;
6149
6150 default:
6151 bfa_sm_fault(dport->bfa, event);
6152 }
6153}
6154
6155static void
6156bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6157{
6158 bfa_trc(dport->bfa, event);
6159
6160 switch (event) {
6161 case BFA_DPORT_SM_FWRSP:
6162 bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6163 break;
6164
6165 case BFA_DPORT_SM_HWFAIL:
6166 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6167 bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6168 break;
6169
6170 default:
6171 bfa_sm_fault(dport->bfa, event);
6172 }
6173}
6174
6175static void
6176bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6177{
6178 bfa_trc(dport->bfa, event);
6179
6180 switch (event) {
6181 case BFA_DPORT_SM_ENABLE:
6182 /* Already enabled */
6183 break;
6184
6185 case BFA_DPORT_SM_DISABLE:
6186 bfa_fcport_dportdisable(dport->bfa);
6187 if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE))
6188 bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6189 else
6190 bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait);
6191 break;
6192
6193 case BFA_DPORT_SM_HWFAIL:
6194 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6195 break;
6196
6197 default:
6198 bfa_sm_fault(dport->bfa, event);
6199 }
6200}
6201
6202static void
6203bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
6204 enum bfa_dport_sm_event event)
6205{
6206 bfa_trc(dport->bfa, event);
6207
6208 switch (event) {
6209 case BFA_DPORT_SM_QRESUME:
6210 bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6211 bfa_dport_send_req(dport, BFI_DPORT_DISABLE);
6212 break;
6213
6214 case BFA_DPORT_SM_HWFAIL:
6215 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6216 bfa_reqq_wcancel(&dport->reqq_wait);
6217 bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6218 break;
6219
6220 default:
6221 bfa_sm_fault(dport->bfa, event);
6222 }
6223}
6224
6225static void
6226bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6227{
6228 bfa_trc(dport->bfa, event);
6229
6230 switch (event) {
6231 case BFA_DPORT_SM_FWRSP:
6232 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6233 break;
6234
6235 case BFA_DPORT_SM_HWFAIL:
6236 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6237 bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6238 break;
6239
6240 default:
6241 bfa_sm_fault(dport->bfa, event);
6242 }
6243}
6244
6245
6246static bfa_boolean_t
6247bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
6248{
6249 struct bfi_diag_dport_req_s *m;
6250
6251 /*
6252 * Increment message tag before queue check, so that responses to old
6253 * requests are discarded.
6254 */
6255 dport->msgtag++;
6256
6257 /*
6258 * check for room in queue to send request now
6259 */
6260 m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG);
6261 if (!m) {
6262 bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait);
6263 return BFA_FALSE;
6264 }
6265
6266 bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT,
6267 bfa_fn_lpu(dport->bfa));
6268 m->req = req;
6269 m->msgtag = dport->msgtag;
6270
6271 /*
6272 * queue I/O message to firmware
6273 */
6274 bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh);
6275
6276 return BFA_TRUE;
6277}
6278
6279static void
6280bfa_dport_qresume(void *cbarg)
6281{
6282 struct bfa_dport_s *dport = cbarg;
6283
6284 bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME);
6285}
6286
6287static void
6288bfa_dport_req_comp(struct bfa_dport_s *dport, bfi_diag_dport_rsp_t *msg)
6289{
6290 bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP);
6291 bfa_cb_fcdiag_dport(dport, msg->status);
6292}
6293
6294/*
6295 * Dport enable
6296 *
6297 * @param[in] *bfa - bfa data struct
6298 */
6299bfa_status_t
6300bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
6301{
6302 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6303 struct bfa_dport_s *dport = &fcdiag->dport;
6304
6305 /*
6306 * Dport is not support in MEZZ card
6307 */
6308 if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) {
6309 bfa_trc(dport->bfa, BFA_STATUS_PBC);
6310 return BFA_STATUS_CMD_NOTSUPP_MEZZ;
6311 }
6312
6313 /*
6314 * Check to see if IOC is down
6315 */
6316 if (!bfa_iocfc_is_operational(bfa))
6317 return BFA_STATUS_IOC_NON_OP;
6318
6319 /* if port is PBC disabled, return error */
6320 if (bfa_fcport_is_pbcdisabled(bfa)) {
6321 bfa_trc(dport->bfa, BFA_STATUS_PBC);
6322 return BFA_STATUS_PBC;
6323 }
6324
6325 /*
6326 * Check if port mode is FC port
6327 */
6328 if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) {
6329 bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc));
6330 return BFA_STATUS_CMD_NOTSUPP_CNA;
6331 }
6332
6333 /*
6334 * Check if port is in LOOP mode
6335 */
6336 if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) ||
6337 (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) {
6338 bfa_trc(dport->bfa, 0);
6339 return BFA_STATUS_TOPOLOGY_LOOP;
6340 }
6341
6342 /*
6343 * Check if port is TRUNK mode
6344 */
6345 if (bfa_fcport_is_trunk_enabled(bfa)) {
6346 bfa_trc(dport->bfa, 0);
6347 return BFA_STATUS_ERROR_TRUNK_ENABLED;
6348 }
6349
6350 /*
6351 * Check to see if port is disable or in dport state
6352 */
6353 if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6354 (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6355 bfa_trc(dport->bfa, 0);
6356 return BFA_STATUS_PORT_NOT_DISABLED;
6357 }
6358
6359 /*
6360 * Check if dport is busy
6361 */
6362 if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
6363 bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
6364 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
6365 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait)) {
6366 return BFA_STATUS_DEVBUSY;
6367 }
6368
6369 /*
6370 * Check if dport is already enabled
6371 */
6372 if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6373 bfa_trc(dport->bfa, 0);
6374 return BFA_STATUS_DPORT_ENABLED;
6375 }
6376
6377 dport->cbfn = cbfn;
6378 dport->cbarg = cbarg;
6379
6380 bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE);
6381 return BFA_STATUS_OK;
6382}
6383
6384/*
6385 * Dport disable
6386 *
6387 * @param[in] *bfa - bfa data struct
6388 */
6389bfa_status_t
6390bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
6391{
6392 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6393 struct bfa_dport_s *dport = &fcdiag->dport;
6394
6395 if (bfa_ioc_is_disabled(&bfa->ioc))
6396 return BFA_STATUS_IOC_DISABLED;
6397
6398 /* if port is PBC disabled, return error */
6399 if (bfa_fcport_is_pbcdisabled(bfa)) {
6400 bfa_trc(dport->bfa, BFA_STATUS_PBC);
6401 return BFA_STATUS_PBC;
6402 }
6403
6404 /*
6405 * Check to see if port is disable or in dport state
6406 */
6407 if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6408 (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6409 bfa_trc(dport->bfa, 0);
6410 return BFA_STATUS_PORT_NOT_DISABLED;
6411 }
6412
6413 /*
6414 * Check if dport is busy
6415 */
6416 if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
6417 bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
6418 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
6419 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait))
6420 return BFA_STATUS_DEVBUSY;
6421
6422 /*
6423 * Check if dport is already disabled
6424 */
6425 if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) {
6426 bfa_trc(dport->bfa, 0);
6427 return BFA_STATUS_DPORT_DISABLED;
6428 }
6429
6430 dport->cbfn = cbfn;
6431 dport->cbarg = cbarg;
6432
6433 bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE);
6434 return BFA_STATUS_OK;
6435}
6436
6437/*
6438 * Get D-port state
6439 *
6440 * @param[in] *bfa - bfa data struct
6441 */
6442
6443bfa_status_t
6444bfa_dport_get_state(struct bfa_s *bfa, enum bfa_dport_state *state)
6445{
6446 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6447 struct bfa_dport_s *dport = &fcdiag->dport;
6448
6449 if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled))
6450 *state = BFA_DPORT_ST_ENABLED;
6451 else if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
6452 bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait))
6453 *state = BFA_DPORT_ST_ENABLING;
6454 else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled))
6455 *state = BFA_DPORT_ST_DISABLED;
6456 else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
6457 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait))
6458 *state = BFA_DPORT_ST_DISABLING;
6459 else {
6460 bfa_trc(dport->bfa, BFA_STATUS_EINVAL);
6461 return BFA_STATUS_EINVAL;
6462 }
6463 return BFA_STATUS_OK;
6464}
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index 1abcf7c51661..8d7fbecfcb22 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -474,8 +474,10 @@ struct bfa_fcport_s {
474 /* supported speeds */ 474 /* supported speeds */
475 enum bfa_port_speed speed; /* current speed */ 475 enum bfa_port_speed speed; /* current speed */
476 enum bfa_port_topology topology; /* current topology */ 476 enum bfa_port_topology topology; /* current topology */
477 u8 myalpa; /* my ALPA in LOOP topology */
478 u8 rsvd[3]; 477 u8 rsvd[3];
478 u8 myalpa; /* my ALPA in LOOP topology */
479 u8 alpabm_valid; /* alpa bitmap valid or not */
480 struct fc_alpabm_s alpabm; /* alpa bitmap */
479 struct bfa_port_cfg_s cfg; /* current port configuration */ 481 struct bfa_port_cfg_s cfg; /* current port configuration */
480 bfa_boolean_t use_flash_cfg; /* get port cfg from flash */ 482 bfa_boolean_t use_flash_cfg; /* get port cfg from flash */
481 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ 483 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
@@ -512,6 +514,7 @@ struct bfa_fcport_s {
512 struct bfa_fcport_trunk_s trunk; 514 struct bfa_fcport_trunk_s trunk;
513 u16 fcoe_vlan; 515 u16 fcoe_vlan;
514 struct bfa_mem_dma_s fcport_dma; 516 struct bfa_mem_dma_s fcport_dma;
517 bfa_boolean_t stats_dma_ready;
515}; 518};
516 519
517#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport) 520#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport)
@@ -534,6 +537,7 @@ enum bfa_port_speed bfa_fcport_get_speed(struct bfa_s *bfa);
534bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa, 537bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa,
535 enum bfa_port_topology topo); 538 enum bfa_port_topology topo);
536enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa); 539enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa);
540enum bfa_port_topology bfa_fcport_get_cfg_topology(struct bfa_s *bfa);
537bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa); 541bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa);
538bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa); 542bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa);
539u8 bfa_fcport_get_myalpa(struct bfa_s *bfa); 543u8 bfa_fcport_get_myalpa(struct bfa_s *bfa);
@@ -547,6 +551,9 @@ void bfa_fcport_event_register(struct bfa_s *bfa,
547 void (*event_cbfn) (void *cbarg, 551 void (*event_cbfn) (void *cbarg,
548 enum bfa_port_linkstate event), void *event_cbarg); 552 enum bfa_port_linkstate event), void *event_cbarg);
549bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa); 553bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
554bfa_boolean_t bfa_fcport_is_dport(struct bfa_s *bfa);
555bfa_status_t bfa_fcport_set_qos_bw(struct bfa_s *bfa,
556 struct bfa_qos_bw_s *qos_bw);
550enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa); 557enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
551 558
552void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn); 559void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn);
@@ -560,6 +567,8 @@ bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa,
560 struct bfa_cb_pending_q_s *cb); 567 struct bfa_cb_pending_q_s *cb);
561bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa); 568bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
562bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa); 569bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa);
570void bfa_fcport_dportenable(struct bfa_s *bfa);
571void bfa_fcport_dportdisable(struct bfa_s *bfa);
563bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa); 572bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa);
564void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state); 573void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state);
565 574
@@ -575,6 +584,9 @@ void bfa_cb_rport_offline(void *rport);
575void bfa_cb_rport_qos_scn_flowid(void *rport, 584void bfa_cb_rport_qos_scn_flowid(void *rport,
576 struct bfa_rport_qos_attr_s old_qos_attr, 585 struct bfa_rport_qos_attr_s old_qos_attr,
577 struct bfa_rport_qos_attr_s new_qos_attr); 586 struct bfa_rport_qos_attr_s new_qos_attr);
587void bfa_cb_rport_scn_online(struct bfa_s *bfa);
588void bfa_cb_rport_scn_offline(struct bfa_s *bfa);
589void bfa_cb_rport_scn_no_dev(void *rp);
578void bfa_cb_rport_qos_scn_prio(void *rport, 590void bfa_cb_rport_qos_scn_prio(void *rport,
579 struct bfa_rport_qos_attr_s old_qos_attr, 591 struct bfa_rport_qos_attr_s old_qos_attr,
580 struct bfa_rport_qos_attr_s new_qos_attr); 592 struct bfa_rport_qos_attr_s new_qos_attr);
@@ -697,11 +709,21 @@ struct bfa_fcdiag_lb_s {
697 u32 status; 709 u32 status;
698}; 710};
699 711
712struct bfa_dport_s {
713 struct bfa_s *bfa; /* Back pointer to BFA */
714 bfa_sm_t sm; /* finite state machine */
715 u32 msgtag; /* firmware msg tag for reply */
716 struct bfa_reqq_wait_s reqq_wait;
717 bfa_cb_diag_t cbfn;
718 void *cbarg;
719};
720
700struct bfa_fcdiag_s { 721struct bfa_fcdiag_s {
701 struct bfa_s *bfa; /* Back pointer to BFA */ 722 struct bfa_s *bfa; /* Back pointer to BFA */
702 struct bfa_trc_mod_s *trcmod; 723 struct bfa_trc_mod_s *trcmod;
703 struct bfa_fcdiag_lb_s lb; 724 struct bfa_fcdiag_lb_s lb;
704 struct bfa_fcdiag_qtest_s qtest; 725 struct bfa_fcdiag_qtest_s qtest;
726 struct bfa_dport_s dport;
705}; 727};
706 728
707#define BFA_FCDIAG_MOD(__bfa) (&(__bfa)->modules.fcdiag) 729#define BFA_FCDIAG_MOD(__bfa) (&(__bfa)->modules.fcdiag)
@@ -717,5 +739,11 @@ bfa_status_t bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 ignore,
717 u32 queue, struct bfa_diag_qtest_result_s *result, 739 u32 queue, struct bfa_diag_qtest_result_s *result,
718 bfa_cb_diag_t cbfn, void *cbarg); 740 bfa_cb_diag_t cbfn, void *cbarg);
719bfa_status_t bfa_fcdiag_lb_is_running(struct bfa_s *bfa); 741bfa_status_t bfa_fcdiag_lb_is_running(struct bfa_s *bfa);
742bfa_status_t bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn,
743 void *cbarg);
744bfa_status_t bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn,
745 void *cbarg);
746bfa_status_t bfa_dport_get_state(struct bfa_s *bfa,
747 enum bfa_dport_state *state);
720 748
721#endif /* __BFA_SVC_H__ */ 749#endif /* __BFA_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index c37494916a1a..895b0e516e07 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -63,9 +63,9 @@ int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
63u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size; 63u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
64u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2; 64u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
65 65
66#define BFAD_FW_FILE_CB "cbfw.bin" 66#define BFAD_FW_FILE_CB "cbfw-3.1.0.0.bin"
67#define BFAD_FW_FILE_CT "ctfw.bin" 67#define BFAD_FW_FILE_CT "ctfw-3.1.0.0.bin"
68#define BFAD_FW_FILE_CT2 "ct2fw.bin" 68#define BFAD_FW_FILE_CT2 "ct2fw-3.1.0.0.bin"
69 69
70static u32 *bfad_load_fwimg(struct pci_dev *pdev); 70static u32 *bfad_load_fwimg(struct pci_dev *pdev);
71static void bfad_free_fwimg(void); 71static void bfad_free_fwimg(void);
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 0afa39076cef..555e7db94a1c 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -33,7 +33,7 @@ bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
33 /* If IOC is not in disabled state - return */ 33 /* If IOC is not in disabled state - return */
34 if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) { 34 if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
35 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 35 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
36 iocmd->status = BFA_STATUS_IOC_FAILURE; 36 iocmd->status = BFA_STATUS_OK;
37 return rc; 37 return rc;
38 } 38 }
39 39
@@ -54,6 +54,12 @@ bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
54 unsigned long flags; 54 unsigned long flags;
55 55
56 spin_lock_irqsave(&bfad->bfad_lock, flags); 56 spin_lock_irqsave(&bfad->bfad_lock, flags);
57 if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
58 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
59 iocmd->status = BFA_STATUS_OK;
60 return rc;
61 }
62
57 if (bfad->disable_active) { 63 if (bfad->disable_active) {
58 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 64 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
59 return -EBUSY; 65 return -EBUSY;
@@ -101,9 +107,10 @@ bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
101 107
102 /* set adapter hw path */ 108 /* set adapter hw path */
103 strcpy(iocmd->adapter_hwpath, bfad->pci_name); 109 strcpy(iocmd->adapter_hwpath, bfad->pci_name);
104 i = strlen(iocmd->adapter_hwpath) - 1; 110 for (i = 0; iocmd->adapter_hwpath[i] != ':' && i < BFA_STRING_32; i++)
105 while (iocmd->adapter_hwpath[i] != '.') 111 ;
106 i--; 112 for (; iocmd->adapter_hwpath[++i] != ':' && i < BFA_STRING_32; )
113 ;
107 iocmd->adapter_hwpath[i] = '\0'; 114 iocmd->adapter_hwpath[i] = '\0';
108 iocmd->status = BFA_STATUS_OK; 115 iocmd->status = BFA_STATUS_OK;
109 return 0; 116 return 0;
@@ -880,6 +887,19 @@ out:
880} 887}
881 888
882int 889int
890bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd)
891{
892 struct bfa_bsg_qos_bw_s *iocmd = (struct bfa_bsg_qos_bw_s *)pcmd;
893 unsigned long flags;
894
895 spin_lock_irqsave(&bfad->bfad_lock, flags);
896 iocmd->status = bfa_fcport_set_qos_bw(&bfad->bfa, &iocmd->qos_bw);
897 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
898
899 return 0;
900}
901
902int
883bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd) 903bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
884{ 904{
885 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; 905 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
@@ -888,16 +908,22 @@ bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
888 908
889 spin_lock_irqsave(&bfad->bfad_lock, flags); 909 spin_lock_irqsave(&bfad->bfad_lock, flags);
890 910
891 if (cmd == IOCMD_RATELIM_ENABLE) 911 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
892 fcport->cfg.ratelimit = BFA_TRUE; 912 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
893 else if (cmd == IOCMD_RATELIM_DISABLE) 913 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
894 fcport->cfg.ratelimit = BFA_FALSE; 914 else {
915 if (cmd == IOCMD_RATELIM_ENABLE)
916 fcport->cfg.ratelimit = BFA_TRUE;
917 else if (cmd == IOCMD_RATELIM_DISABLE)
918 fcport->cfg.ratelimit = BFA_FALSE;
895 919
896 if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN) 920 if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
897 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; 921 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
922
923 iocmd->status = BFA_STATUS_OK;
924 }
898 925
899 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 926 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
900 iocmd->status = BFA_STATUS_OK;
901 927
902 return 0; 928 return 0;
903} 929}
@@ -919,8 +945,13 @@ bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
919 return 0; 945 return 0;
920 } 946 }
921 947
922 fcport->cfg.trl_def_speed = iocmd->speed; 948 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
923 iocmd->status = BFA_STATUS_OK; 949 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
950 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
951 else {
952 fcport->cfg.trl_def_speed = iocmd->speed;
953 iocmd->status = BFA_STATUS_OK;
954 }
924 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 955 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
925 956
926 return 0; 957 return 0;
@@ -1167,8 +1198,8 @@ bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
1167 spin_lock_irqsave(&bfad->bfad_lock, flags); 1198 spin_lock_irqsave(&bfad->bfad_lock, flags);
1168 iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk, 1199 iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
1169 &iocmd->pcifn_id, iocmd->port, 1200 &iocmd->pcifn_id, iocmd->port,
1170 iocmd->pcifn_class, iocmd->bandwidth, 1201 iocmd->pcifn_class, iocmd->bw_min,
1171 bfad_hcb_comp, &fcomp); 1202 iocmd->bw_max, bfad_hcb_comp, &fcomp);
1172 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1203 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1173 if (iocmd->status != BFA_STATUS_OK) 1204 if (iocmd->status != BFA_STATUS_OK)
1174 goto out; 1205 goto out;
@@ -1211,8 +1242,8 @@ bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
1211 init_completion(&fcomp.comp); 1242 init_completion(&fcomp.comp);
1212 spin_lock_irqsave(&bfad->bfad_lock, flags); 1243 spin_lock_irqsave(&bfad->bfad_lock, flags);
1213 iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk, 1244 iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
1214 iocmd->pcifn_id, iocmd->bandwidth, 1245 iocmd->pcifn_id, iocmd->bw_min,
1215 bfad_hcb_comp, &fcomp); 1246 iocmd->bw_max, bfad_hcb_comp, &fcomp);
1216 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1247 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1217 bfa_trc(bfad, iocmd->status); 1248 bfa_trc(bfad, iocmd->status);
1218 if (iocmd->status != BFA_STATUS_OK) 1249 if (iocmd->status != BFA_STATUS_OK)
@@ -1736,6 +1767,52 @@ bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
1736} 1767}
1737 1768
1738int 1769int
1770bfad_iocmd_diag_cfg_dport(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
1771{
1772 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
1773 unsigned long flags;
1774 struct bfad_hal_comp fcomp;
1775
1776 init_completion(&fcomp.comp);
1777 spin_lock_irqsave(&bfad->bfad_lock, flags);
1778 if (cmd == IOCMD_DIAG_DPORT_ENABLE)
1779 iocmd->status = bfa_dport_enable(&bfad->bfa,
1780 bfad_hcb_comp, &fcomp);
1781 else if (cmd == IOCMD_DIAG_DPORT_DISABLE)
1782 iocmd->status = bfa_dport_disable(&bfad->bfa,
1783 bfad_hcb_comp, &fcomp);
1784 else {
1785 bfa_trc(bfad, 0);
1786 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1787 return -EINVAL;
1788 }
1789 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1790
1791 if (iocmd->status != BFA_STATUS_OK)
1792 bfa_trc(bfad, iocmd->status);
1793 else {
1794 wait_for_completion(&fcomp.comp);
1795 iocmd->status = fcomp.status;
1796 }
1797
1798 return 0;
1799}
1800
1801int
1802bfad_iocmd_diag_dport_get_state(struct bfad_s *bfad, void *pcmd)
1803{
1804 struct bfa_bsg_diag_dport_get_state_s *iocmd =
1805 (struct bfa_bsg_diag_dport_get_state_s *)pcmd;
1806 unsigned long flags;
1807
1808 spin_lock_irqsave(&bfad->bfad_lock, flags);
1809 iocmd->status = bfa_dport_get_state(&bfad->bfa, &iocmd->state);
1810 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1811
1812 return 0;
1813}
1814
1815int
1739bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd) 1816bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
1740{ 1817{
1741 struct bfa_bsg_phy_attr_s *iocmd = 1818 struct bfa_bsg_phy_attr_s *iocmd =
@@ -2052,7 +2129,7 @@ bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
2052 init_completion(&fcomp.comp); 2129 init_completion(&fcomp.comp);
2053 spin_lock_irqsave(&bfad->bfad_lock, flags); 2130 spin_lock_irqsave(&bfad->bfad_lock, flags);
2054 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), 2131 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2055 BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn), 2132 BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
2056 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, 2133 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2057 bfad_hcb_comp, &fcomp); 2134 bfad_hcb_comp, &fcomp);
2058 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2135 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -2074,7 +2151,7 @@ bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
2074 init_completion(&fcomp.comp); 2151 init_completion(&fcomp.comp);
2075 spin_lock_irqsave(&bfad->bfad_lock, flags); 2152 spin_lock_irqsave(&bfad->bfad_lock, flags);
2076 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), 2153 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2077 BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn), 2154 BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
2078 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, 2155 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2079 bfad_hcb_comp, &fcomp); 2156 bfad_hcb_comp, &fcomp);
2080 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2157 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -2161,22 +2238,31 @@ bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2161 2238
2162 spin_lock_irqsave(&bfad->bfad_lock, flags); 2239 spin_lock_irqsave(&bfad->bfad_lock, flags);
2163 2240
2164 if (v_cmd == IOCMD_TRUNK_ENABLE) { 2241 if (bfa_fcport_is_dport(&bfad->bfa))
2165 trunk->attr.state = BFA_TRUNK_OFFLINE; 2242 return BFA_STATUS_DPORT_ERR;
2166 bfa_fcport_disable(&bfad->bfa);
2167 fcport->cfg.trunked = BFA_TRUE;
2168 } else if (v_cmd == IOCMD_TRUNK_DISABLE) {
2169 trunk->attr.state = BFA_TRUNK_DISABLED;
2170 bfa_fcport_disable(&bfad->bfa);
2171 fcport->cfg.trunked = BFA_FALSE;
2172 }
2173 2243
2174 if (!bfa_fcport_is_disabled(&bfad->bfa)) 2244 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
2175 bfa_fcport_enable(&bfad->bfa); 2245 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2246 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2247 else {
2248 if (v_cmd == IOCMD_TRUNK_ENABLE) {
2249 trunk->attr.state = BFA_TRUNK_OFFLINE;
2250 bfa_fcport_disable(&bfad->bfa);
2251 fcport->cfg.trunked = BFA_TRUE;
2252 } else if (v_cmd == IOCMD_TRUNK_DISABLE) {
2253 trunk->attr.state = BFA_TRUNK_DISABLED;
2254 bfa_fcport_disable(&bfad->bfa);
2255 fcport->cfg.trunked = BFA_FALSE;
2256 }
2257
2258 if (!bfa_fcport_is_disabled(&bfad->bfa))
2259 bfa_fcport_enable(&bfad->bfa);
2260
2261 iocmd->status = BFA_STATUS_OK;
2262 }
2176 2263
2177 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2264 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2178 2265
2179 iocmd->status = BFA_STATUS_OK;
2180 return 0; 2266 return 0;
2181} 2267}
2182 2268
@@ -2189,12 +2275,17 @@ bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
2189 unsigned long flags; 2275 unsigned long flags;
2190 2276
2191 spin_lock_irqsave(&bfad->bfad_lock, flags); 2277 spin_lock_irqsave(&bfad->bfad_lock, flags);
2192 memcpy((void *)&iocmd->attr, (void *)&trunk->attr, 2278 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
2193 sizeof(struct bfa_trunk_attr_s)); 2279 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2194 iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa); 2280 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2281 else {
2282 memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
2283 sizeof(struct bfa_trunk_attr_s));
2284 iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
2285 iocmd->status = BFA_STATUS_OK;
2286 }
2195 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2287 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2196 2288
2197 iocmd->status = BFA_STATUS_OK;
2198 return 0; 2289 return 0;
2199} 2290}
2200 2291
@@ -2207,14 +2298,22 @@ bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2207 2298
2208 spin_lock_irqsave(&bfad->bfad_lock, flags); 2299 spin_lock_irqsave(&bfad->bfad_lock, flags);
2209 if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) { 2300 if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
2210 if (v_cmd == IOCMD_QOS_ENABLE) 2301 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2211 fcport->cfg.qos_enabled = BFA_TRUE; 2302 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2212 else if (v_cmd == IOCMD_QOS_DISABLE) 2303 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2213 fcport->cfg.qos_enabled = BFA_FALSE; 2304 else {
2305 if (v_cmd == IOCMD_QOS_ENABLE)
2306 fcport->cfg.qos_enabled = BFA_TRUE;
2307 else if (v_cmd == IOCMD_QOS_DISABLE) {
2308 fcport->cfg.qos_enabled = BFA_FALSE;
2309 fcport->cfg.qos_bw.high = BFA_QOS_BW_HIGH;
2310 fcport->cfg.qos_bw.med = BFA_QOS_BW_MED;
2311 fcport->cfg.qos_bw.low = BFA_QOS_BW_LOW;
2312 }
2313 }
2214 } 2314 }
2215 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2315 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2216 2316
2217 iocmd->status = BFA_STATUS_OK;
2218 return 0; 2317 return 0;
2219} 2318}
2220 2319
@@ -2226,11 +2325,21 @@ bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
2226 unsigned long flags; 2325 unsigned long flags;
2227 2326
2228 spin_lock_irqsave(&bfad->bfad_lock, flags); 2327 spin_lock_irqsave(&bfad->bfad_lock, flags);
2229 iocmd->attr.state = fcport->qos_attr.state; 2328 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2230 iocmd->attr.total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr); 2329 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2330 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2331 else {
2332 iocmd->attr.state = fcport->qos_attr.state;
2333 iocmd->attr.total_bb_cr =
2334 be32_to_cpu(fcport->qos_attr.total_bb_cr);
2335 iocmd->attr.qos_bw.high = fcport->cfg.qos_bw.high;
2336 iocmd->attr.qos_bw.med = fcport->cfg.qos_bw.med;
2337 iocmd->attr.qos_bw.low = fcport->cfg.qos_bw.low;
2338 iocmd->attr.qos_bw_op = fcport->qos_attr.qos_bw_op;
2339 iocmd->status = BFA_STATUS_OK;
2340 }
2231 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2341 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2232 2342
2233 iocmd->status = BFA_STATUS_OK;
2234 return 0; 2343 return 0;
2235} 2344}
2236 2345
@@ -2274,6 +2383,7 @@ bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
2274 struct bfad_hal_comp fcomp; 2383 struct bfad_hal_comp fcomp;
2275 unsigned long flags; 2384 unsigned long flags;
2276 struct bfa_cb_pending_q_s cb_qe; 2385 struct bfa_cb_pending_q_s cb_qe;
2386 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2277 2387
2278 init_completion(&fcomp.comp); 2388 init_completion(&fcomp.comp);
2279 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, 2389 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
@@ -2281,7 +2391,11 @@ bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
2281 2391
2282 spin_lock_irqsave(&bfad->bfad_lock, flags); 2392 spin_lock_irqsave(&bfad->bfad_lock, flags);
2283 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); 2393 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2284 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); 2394 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2395 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2396 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2397 else
2398 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2285 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2399 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2286 if (iocmd->status != BFA_STATUS_OK) { 2400 if (iocmd->status != BFA_STATUS_OK) {
2287 bfa_trc(bfad, iocmd->status); 2401 bfa_trc(bfad, iocmd->status);
@@ -2300,6 +2414,7 @@ bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
2300 struct bfad_hal_comp fcomp; 2414 struct bfad_hal_comp fcomp;
2301 unsigned long flags; 2415 unsigned long flags;
2302 struct bfa_cb_pending_q_s cb_qe; 2416 struct bfa_cb_pending_q_s cb_qe;
2417 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2303 2418
2304 init_completion(&fcomp.comp); 2419 init_completion(&fcomp.comp);
2305 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, 2420 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
@@ -2307,7 +2422,11 @@ bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
2307 2422
2308 spin_lock_irqsave(&bfad->bfad_lock, flags); 2423 spin_lock_irqsave(&bfad->bfad_lock, flags);
2309 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); 2424 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2310 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); 2425 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2426 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2427 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2428 else
2429 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2311 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2430 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2312 if (iocmd->status != BFA_STATUS_OK) { 2431 if (iocmd->status != BFA_STATUS_OK) {
2313 bfa_trc(bfad, iocmd->status); 2432 bfa_trc(bfad, iocmd->status);
@@ -2435,6 +2554,139 @@ bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2435 return 0; 2554 return 0;
2436} 2555}
2437 2556
2557int
2558bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd)
2559{
2560 struct bfa_bsg_fcpim_throttle_s *iocmd =
2561 (struct bfa_bsg_fcpim_throttle_s *)cmd;
2562 unsigned long flags;
2563
2564 spin_lock_irqsave(&bfad->bfad_lock, flags);
2565 iocmd->status = bfa_fcpim_throttle_get(&bfad->bfa,
2566 (void *)&iocmd->throttle);
2567 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2568
2569 return 0;
2570}
2571
2572int
2573bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd)
2574{
2575 struct bfa_bsg_fcpim_throttle_s *iocmd =
2576 (struct bfa_bsg_fcpim_throttle_s *)cmd;
2577 unsigned long flags;
2578
2579 spin_lock_irqsave(&bfad->bfad_lock, flags);
2580 iocmd->status = bfa_fcpim_throttle_set(&bfad->bfa,
2581 iocmd->throttle.cfg_value);
2582 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2583
2584 return 0;
2585}
2586
2587int
2588bfad_iocmd_tfru_read(struct bfad_s *bfad, void *cmd)
2589{
2590 struct bfa_bsg_tfru_s *iocmd =
2591 (struct bfa_bsg_tfru_s *)cmd;
2592 struct bfad_hal_comp fcomp;
2593 unsigned long flags = 0;
2594
2595 init_completion(&fcomp.comp);
2596 spin_lock_irqsave(&bfad->bfad_lock, flags);
2597 iocmd->status = bfa_tfru_read(BFA_FRU(&bfad->bfa),
2598 &iocmd->data, iocmd->len, iocmd->offset,
2599 bfad_hcb_comp, &fcomp);
2600 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2601 if (iocmd->status == BFA_STATUS_OK) {
2602 wait_for_completion(&fcomp.comp);
2603 iocmd->status = fcomp.status;
2604 }
2605
2606 return 0;
2607}
2608
2609int
2610bfad_iocmd_tfru_write(struct bfad_s *bfad, void *cmd)
2611{
2612 struct bfa_bsg_tfru_s *iocmd =
2613 (struct bfa_bsg_tfru_s *)cmd;
2614 struct bfad_hal_comp fcomp;
2615 unsigned long flags = 0;
2616
2617 init_completion(&fcomp.comp);
2618 spin_lock_irqsave(&bfad->bfad_lock, flags);
2619 iocmd->status = bfa_tfru_write(BFA_FRU(&bfad->bfa),
2620 &iocmd->data, iocmd->len, iocmd->offset,
2621 bfad_hcb_comp, &fcomp);
2622 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2623 if (iocmd->status == BFA_STATUS_OK) {
2624 wait_for_completion(&fcomp.comp);
2625 iocmd->status = fcomp.status;
2626 }
2627
2628 return 0;
2629}
2630
2631int
2632bfad_iocmd_fruvpd_read(struct bfad_s *bfad, void *cmd)
2633{
2634 struct bfa_bsg_fruvpd_s *iocmd =
2635 (struct bfa_bsg_fruvpd_s *)cmd;
2636 struct bfad_hal_comp fcomp;
2637 unsigned long flags = 0;
2638
2639 init_completion(&fcomp.comp);
2640 spin_lock_irqsave(&bfad->bfad_lock, flags);
2641 iocmd->status = bfa_fruvpd_read(BFA_FRU(&bfad->bfa),
2642 &iocmd->data, iocmd->len, iocmd->offset,
2643 bfad_hcb_comp, &fcomp);
2644 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2645 if (iocmd->status == BFA_STATUS_OK) {
2646 wait_for_completion(&fcomp.comp);
2647 iocmd->status = fcomp.status;
2648 }
2649
2650 return 0;
2651}
2652
2653int
2654bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd)
2655{
2656 struct bfa_bsg_fruvpd_s *iocmd =
2657 (struct bfa_bsg_fruvpd_s *)cmd;
2658 struct bfad_hal_comp fcomp;
2659 unsigned long flags = 0;
2660
2661 init_completion(&fcomp.comp);
2662 spin_lock_irqsave(&bfad->bfad_lock, flags);
2663 iocmd->status = bfa_fruvpd_update(BFA_FRU(&bfad->bfa),
2664 &iocmd->data, iocmd->len, iocmd->offset,
2665 bfad_hcb_comp, &fcomp);
2666 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2667 if (iocmd->status == BFA_STATUS_OK) {
2668 wait_for_completion(&fcomp.comp);
2669 iocmd->status = fcomp.status;
2670 }
2671
2672 return 0;
2673}
2674
2675int
2676bfad_iocmd_fruvpd_get_max_size(struct bfad_s *bfad, void *cmd)
2677{
2678 struct bfa_bsg_fruvpd_max_size_s *iocmd =
2679 (struct bfa_bsg_fruvpd_max_size_s *)cmd;
2680 unsigned long flags = 0;
2681
2682 spin_lock_irqsave(&bfad->bfad_lock, flags);
2683 iocmd->status = bfa_fruvpd_get_max_size(BFA_FRU(&bfad->bfa),
2684 &iocmd->max_size);
2685 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2686
2687 return 0;
2688}
2689
2438static int 2690static int
2439bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, 2691bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
2440 unsigned int payload_len) 2692 unsigned int payload_len)
@@ -2660,6 +2912,13 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
2660 case IOCMD_DIAG_LB_STAT: 2912 case IOCMD_DIAG_LB_STAT:
2661 rc = bfad_iocmd_diag_lb_stat(bfad, iocmd); 2913 rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
2662 break; 2914 break;
2915 case IOCMD_DIAG_DPORT_ENABLE:
2916 case IOCMD_DIAG_DPORT_DISABLE:
2917 rc = bfad_iocmd_diag_cfg_dport(bfad, cmd, iocmd);
2918 break;
2919 case IOCMD_DIAG_DPORT_GET_STATE:
2920 rc = bfad_iocmd_diag_dport_get_state(bfad, iocmd);
2921 break;
2663 case IOCMD_PHY_GET_ATTR: 2922 case IOCMD_PHY_GET_ATTR:
2664 rc = bfad_iocmd_phy_get_attr(bfad, iocmd); 2923 rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
2665 break; 2924 break;
@@ -2741,6 +3000,9 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
2741 case IOCMD_QOS_RESET_STATS: 3000 case IOCMD_QOS_RESET_STATS:
2742 rc = bfad_iocmd_qos_reset_stats(bfad, iocmd); 3001 rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
2743 break; 3002 break;
3003 case IOCMD_QOS_SET_BW:
3004 rc = bfad_iocmd_qos_set_bw(bfad, iocmd);
3005 break;
2744 case IOCMD_VF_GET_STATS: 3006 case IOCMD_VF_GET_STATS:
2745 rc = bfad_iocmd_vf_get_stats(bfad, iocmd); 3007 rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
2746 break; 3008 break;
@@ -2759,6 +3021,29 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
2759 case IOCMD_FCPIM_LUNMASK_DELETE: 3021 case IOCMD_FCPIM_LUNMASK_DELETE:
2760 rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd); 3022 rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
2761 break; 3023 break;
3024 case IOCMD_FCPIM_THROTTLE_QUERY:
3025 rc = bfad_iocmd_fcpim_throttle_query(bfad, iocmd);
3026 break;
3027 case IOCMD_FCPIM_THROTTLE_SET:
3028 rc = bfad_iocmd_fcpim_throttle_set(bfad, iocmd);
3029 break;
3030 /* TFRU */
3031 case IOCMD_TFRU_READ:
3032 rc = bfad_iocmd_tfru_read(bfad, iocmd);
3033 break;
3034 case IOCMD_TFRU_WRITE:
3035 rc = bfad_iocmd_tfru_write(bfad, iocmd);
3036 break;
3037 /* FRU */
3038 case IOCMD_FRUVPD_READ:
3039 rc = bfad_iocmd_fruvpd_read(bfad, iocmd);
3040 break;
3041 case IOCMD_FRUVPD_UPDATE:
3042 rc = bfad_iocmd_fruvpd_update(bfad, iocmd);
3043 break;
3044 case IOCMD_FRUVPD_GET_MAX_SIZE:
3045 rc = bfad_iocmd_fruvpd_get_max_size(bfad, iocmd);
3046 break;
2762 default: 3047 default:
2763 rc = -EINVAL; 3048 rc = -EINVAL;
2764 break; 3049 break;
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
index 8c569ddb750d..15e1fc8e796b 100644
--- a/drivers/scsi/bfa/bfad_bsg.h
+++ b/drivers/scsi/bfa/bfad_bsg.h
@@ -141,6 +141,17 @@ enum {
141 IOCMD_FCPIM_LUNMASK_QUERY, 141 IOCMD_FCPIM_LUNMASK_QUERY,
142 IOCMD_FCPIM_LUNMASK_ADD, 142 IOCMD_FCPIM_LUNMASK_ADD,
143 IOCMD_FCPIM_LUNMASK_DELETE, 143 IOCMD_FCPIM_LUNMASK_DELETE,
144 IOCMD_DIAG_DPORT_ENABLE,
145 IOCMD_DIAG_DPORT_DISABLE,
146 IOCMD_DIAG_DPORT_GET_STATE,
147 IOCMD_QOS_SET_BW,
148 IOCMD_FCPIM_THROTTLE_QUERY,
149 IOCMD_FCPIM_THROTTLE_SET,
150 IOCMD_TFRU_READ,
151 IOCMD_TFRU_WRITE,
152 IOCMD_FRUVPD_READ,
153 IOCMD_FRUVPD_UPDATE,
154 IOCMD_FRUVPD_GET_MAX_SIZE,
144}; 155};
145 156
146struct bfa_bsg_gen_s { 157struct bfa_bsg_gen_s {
@@ -463,7 +474,8 @@ struct bfa_bsg_pcifn_s {
463 bfa_status_t status; 474 bfa_status_t status;
464 u16 bfad_num; 475 u16 bfad_num;
465 u16 pcifn_id; 476 u16 pcifn_id;
466 u32 bandwidth; 477 u16 bw_min;
478 u16 bw_max;
467 u8 port; 479 u8 port;
468 enum bfi_pcifn_class pcifn_class; 480 enum bfi_pcifn_class pcifn_class;
469 u8 rsvd[1]; 481 u8 rsvd[1];
@@ -613,6 +625,13 @@ struct bfa_bsg_diag_lb_stat_s {
613 u16 rsvd; 625 u16 rsvd;
614}; 626};
615 627
628struct bfa_bsg_diag_dport_get_state_s {
629 bfa_status_t status;
630 u16 bfad_num;
631 u16 rsvd;
632 enum bfa_dport_state state;
633};
634
616struct bfa_bsg_phy_attr_s { 635struct bfa_bsg_phy_attr_s {
617 bfa_status_t status; 636 bfa_status_t status;
618 u16 bfad_num; 637 u16 bfad_num;
@@ -694,6 +713,13 @@ struct bfa_bsg_qos_vc_attr_s {
694 struct bfa_qos_vc_attr_s attr; 713 struct bfa_qos_vc_attr_s attr;
695}; 714};
696 715
716struct bfa_bsg_qos_bw_s {
717 bfa_status_t status;
718 u16 bfad_num;
719 u16 rsvd;
720 struct bfa_qos_bw_s qos_bw;
721};
722
697struct bfa_bsg_vf_stats_s { 723struct bfa_bsg_vf_stats_s {
698 bfa_status_t status; 724 bfa_status_t status;
699 u16 bfad_num; 725 u16 bfad_num;
@@ -722,6 +748,41 @@ struct bfa_bsg_fcpim_lunmask_s {
722 struct scsi_lun lun; 748 struct scsi_lun lun;
723}; 749};
724 750
751struct bfa_bsg_fcpim_throttle_s {
752 bfa_status_t status;
753 u16 bfad_num;
754 u16 vf_id;
755 struct bfa_defs_fcpim_throttle_s throttle;
756};
757
758#define BFA_TFRU_DATA_SIZE 64
759#define BFA_MAX_FRUVPD_TRANSFER_SIZE 0x1000
760
761struct bfa_bsg_tfru_s {
762 bfa_status_t status;
763 u16 bfad_num;
764 u16 rsvd;
765 u32 offset;
766 u32 len;
767 u8 data[BFA_TFRU_DATA_SIZE];
768};
769
770struct bfa_bsg_fruvpd_s {
771 bfa_status_t status;
772 u16 bfad_num;
773 u16 rsvd;
774 u32 offset;
775 u32 len;
776 u8 data[BFA_MAX_FRUVPD_TRANSFER_SIZE];
777};
778
779struct bfa_bsg_fruvpd_max_size_s {
780 bfa_status_t status;
781 u16 bfad_num;
782 u16 rsvd;
783 u32 max_size;
784};
785
725struct bfa_bsg_fcpt_s { 786struct bfa_bsg_fcpt_s {
726 bfa_status_t status; 787 bfa_status_t status;
727 u16 vf_id; 788 u16 vf_id;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 1840651ce1d4..0c64a04f01fa 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -57,7 +57,7 @@
57#ifdef BFA_DRIVER_VERSION 57#ifdef BFA_DRIVER_VERSION
58#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION 58#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
59#else 59#else
60#define BFAD_DRIVER_VERSION "3.1.2.0" 60#define BFAD_DRIVER_VERSION "3.1.2.1"
61#endif 61#endif
62 62
63#define BFAD_PROTO_NAME FCPI_NAME 63#define BFAD_PROTO_NAME FCPI_NAME
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
index b2ba0b2e91b2..57b146bca18c 100644
--- a/drivers/scsi/bfa/bfi.h
+++ b/drivers/scsi/bfa/bfi.h
@@ -210,7 +210,8 @@ enum bfi_mclass {
210 BFI_MC_PORT = 21, /* Physical port */ 210 BFI_MC_PORT = 21, /* Physical port */
211 BFI_MC_SFP = 22, /* SFP module */ 211 BFI_MC_SFP = 22, /* SFP module */
212 BFI_MC_PHY = 25, /* External PHY message class */ 212 BFI_MC_PHY = 25, /* External PHY message class */
213 BFI_MC_MAX = 32 213 BFI_MC_FRU = 34,
214 BFI_MC_MAX = 35
214}; 215};
215 216
216#define BFI_IOC_MAX_CQS 4 217#define BFI_IOC_MAX_CQS 4
@@ -288,6 +289,9 @@ struct bfi_ioc_attr_s {
288 char optrom_version[BFA_VERSION_LEN]; 289 char optrom_version[BFA_VERSION_LEN];
289 struct bfa_mfg_vpd_s vpd; 290 struct bfa_mfg_vpd_s vpd;
290 u32 card_type; /* card type */ 291 u32 card_type; /* card type */
292 u8 mfg_day; /* manufacturing day */
293 u8 mfg_month; /* manufacturing month */
294 u16 mfg_year; /* manufacturing year */
291}; 295};
292 296
293/* 297/*
@@ -687,7 +691,8 @@ struct bfi_ablk_h2i_pf_req_s {
687 u8 pcifn; 691 u8 pcifn;
688 u8 port; 692 u8 port;
689 u16 pers; 693 u16 pers;
690 u32 bw; 694 u16 bw_min; /* percent BW @ max speed */
695 u16 bw_max; /* percent BW @ max speed */
691}; 696};
692 697
693/* BFI_ABLK_H2I_OPTROM_ENABLE, BFI_ABLK_H2I_OPTROM_DISABLE */ 698/* BFI_ABLK_H2I_OPTROM_ENABLE, BFI_ABLK_H2I_OPTROM_DISABLE */
@@ -957,6 +962,7 @@ enum bfi_diag_h2i {
957 BFI_DIAG_H2I_TEMPSENSOR = 4, 962 BFI_DIAG_H2I_TEMPSENSOR = 4,
958 BFI_DIAG_H2I_LEDTEST = 5, 963 BFI_DIAG_H2I_LEDTEST = 5,
959 BFI_DIAG_H2I_QTEST = 6, 964 BFI_DIAG_H2I_QTEST = 6,
965 BFI_DIAG_H2I_DPORT = 7,
960}; 966};
961 967
962enum bfi_diag_i2h { 968enum bfi_diag_i2h {
@@ -966,6 +972,7 @@ enum bfi_diag_i2h {
966 BFI_DIAG_I2H_TEMPSENSOR = BFA_I2HM(BFI_DIAG_H2I_TEMPSENSOR), 972 BFI_DIAG_I2H_TEMPSENSOR = BFA_I2HM(BFI_DIAG_H2I_TEMPSENSOR),
967 BFI_DIAG_I2H_LEDTEST = BFA_I2HM(BFI_DIAG_H2I_LEDTEST), 973 BFI_DIAG_I2H_LEDTEST = BFA_I2HM(BFI_DIAG_H2I_LEDTEST),
968 BFI_DIAG_I2H_QTEST = BFA_I2HM(BFI_DIAG_H2I_QTEST), 974 BFI_DIAG_I2H_QTEST = BFA_I2HM(BFI_DIAG_H2I_QTEST),
975 BFI_DIAG_I2H_DPORT = BFA_I2HM(BFI_DIAG_H2I_DPORT),
969}; 976};
970 977
971#define BFI_DIAG_MAX_SGES 2 978#define BFI_DIAG_MAX_SGES 2
@@ -1052,6 +1059,23 @@ struct bfi_diag_qtest_req_s {
1052#define bfi_diag_qtest_rsp_t struct bfi_diag_qtest_req_s 1059#define bfi_diag_qtest_rsp_t struct bfi_diag_qtest_req_s
1053 1060
1054/* 1061/*
1062 * D-port test
1063 */
1064enum bfi_dport_req {
1065 BFI_DPORT_DISABLE = 0, /* disable dport request */
1066 BFI_DPORT_ENABLE = 1, /* enable dport request */
1067};
1068
1069struct bfi_diag_dport_req_s {
1070 struct bfi_mhdr_s mh; /* 4 bytes */
1071 u8 req; /* request 1: enable 0: disable */
1072 u8 status; /* reply status */
1073 u8 rsvd[2];
1074 u32 msgtag; /* msgtag for reply */
1075};
1076#define bfi_diag_dport_rsp_t struct bfi_diag_dport_req_s
1077
1078/*
1055 * PHY module specific 1079 * PHY module specific
1056 */ 1080 */
1057enum bfi_phy_h2i_msgs_e { 1081enum bfi_phy_h2i_msgs_e {
@@ -1147,6 +1171,50 @@ struct bfi_phy_write_rsp_s {
1147 u32 length; 1171 u32 length;
1148}; 1172};
1149 1173
1174enum bfi_fru_h2i_msgs {
1175 BFI_FRUVPD_H2I_WRITE_REQ = 1,
1176 BFI_FRUVPD_H2I_READ_REQ = 2,
1177 BFI_TFRU_H2I_WRITE_REQ = 3,
1178 BFI_TFRU_H2I_READ_REQ = 4,
1179};
1180
1181enum bfi_fru_i2h_msgs {
1182 BFI_FRUVPD_I2H_WRITE_RSP = BFA_I2HM(1),
1183 BFI_FRUVPD_I2H_READ_RSP = BFA_I2HM(2),
1184 BFI_TFRU_I2H_WRITE_RSP = BFA_I2HM(3),
1185 BFI_TFRU_I2H_READ_RSP = BFA_I2HM(4),
1186};
1187
1188/*
1189 * FRU write request
1190 */
1191struct bfi_fru_write_req_s {
1192 struct bfi_mhdr_s mh; /* Common msg header */
1193 u8 last;
1194 u8 rsv[3];
1195 u32 offset;
1196 u32 length;
1197 struct bfi_alen_s alen;
1198};
1199
1200/*
1201 * FRU read request
1202 */
1203struct bfi_fru_read_req_s {
1204 struct bfi_mhdr_s mh; /* Common msg header */
1205 u32 offset;
1206 u32 length;
1207 struct bfi_alen_s alen;
1208};
1209
1210/*
1211 * FRU response
1212 */
1213struct bfi_fru_rsp_s {
1214 struct bfi_mhdr_s mh; /* Common msg header */
1215 u32 status;
1216 u32 length;
1217};
1150#pragma pack() 1218#pragma pack()
1151 1219
1152#endif /* __BFI_H__ */ 1220#endif /* __BFI_H__ */
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h
index d4220e13cafa..5ae2c167b2c8 100644
--- a/drivers/scsi/bfa/bfi_ms.h
+++ b/drivers/scsi/bfa/bfi_ms.h
@@ -426,6 +426,7 @@ struct bfi_lps_login_req_s {
426 u8 auth_en; 426 u8 auth_en;
427 u8 lps_role; 427 u8 lps_role;
428 u8 bb_scn; 428 u8 bb_scn;
429 u32 vvl_flag;
429}; 430};
430 431
431struct bfi_lps_login_rsp_s { 432struct bfi_lps_login_rsp_s {
@@ -499,6 +500,9 @@ enum bfi_rport_i2h_msgs {
499 BFI_RPORT_I2H_CREATE_RSP = BFA_I2HM(1), 500 BFI_RPORT_I2H_CREATE_RSP = BFA_I2HM(1),
500 BFI_RPORT_I2H_DELETE_RSP = BFA_I2HM(2), 501 BFI_RPORT_I2H_DELETE_RSP = BFA_I2HM(2),
501 BFI_RPORT_I2H_QOS_SCN = BFA_I2HM(3), 502 BFI_RPORT_I2H_QOS_SCN = BFA_I2HM(3),
503 BFI_RPORT_I2H_LIP_SCN_ONLINE = BFA_I2HM(4),
504 BFI_RPORT_I2H_LIP_SCN_OFFLINE = BFA_I2HM(5),
505 BFI_RPORT_I2H_NO_DEV = BFA_I2HM(6),
502}; 506};
503 507
504struct bfi_rport_create_req_s { 508struct bfi_rport_create_req_s {
@@ -551,6 +555,14 @@ struct bfi_rport_qos_scn_s {
551 struct bfa_rport_qos_attr_s new_qos_attr; /* New QoS Attributes */ 555 struct bfa_rport_qos_attr_s new_qos_attr; /* New QoS Attributes */
552}; 556};
553 557
558struct bfi_rport_lip_scn_s {
559 struct bfi_mhdr_s mh; /*!< common msg header */
560 u16 bfa_handle; /*!< host rport handle */
561 u8 status; /*!< scn online status */
562 u8 rsvd;
563 struct bfa_fcport_loop_info_s loop_info;
564};
565
554union bfi_rport_h2i_msg_u { 566union bfi_rport_h2i_msg_u {
555 struct bfi_msg_s *msg; 567 struct bfi_msg_s *msg;
556 struct bfi_rport_create_req_s *create_req; 568 struct bfi_rport_create_req_s *create_req;
@@ -563,6 +575,7 @@ union bfi_rport_i2h_msg_u {
563 struct bfi_rport_create_rsp_s *create_rsp; 575 struct bfi_rport_create_rsp_s *create_rsp;
564 struct bfi_rport_delete_rsp_s *delete_rsp; 576 struct bfi_rport_delete_rsp_s *delete_rsp;
565 struct bfi_rport_qos_scn_s *qos_scn_evt; 577 struct bfi_rport_qos_scn_s *qos_scn_evt;
578 struct bfi_rport_lip_scn_s *lip_scn;
566}; 579};
567 580
568/* 581/*
@@ -828,6 +841,7 @@ enum bfi_tskim_status {
828 */ 841 */
829 BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */ 842 BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */
830 BFI_TSKIM_STS_ABORTED = 11, /* Aborted on host request */ 843 BFI_TSKIM_STS_ABORTED = 11, /* Aborted on host request */
844 BFI_TSKIM_STS_UTAG = 12, /* unknown tag for request */
831}; 845};
832 846
833struct bfi_tskim_rsp_s { 847struct bfi_tskim_rsp_s {
diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h
index ed5f159e1867..99133bcf53f9 100644
--- a/drivers/scsi/bfa/bfi_reg.h
+++ b/drivers/scsi/bfa/bfi_reg.h
@@ -338,6 +338,7 @@ enum {
338#define __A2T_AHB_LOAD 0x00000800 338#define __A2T_AHB_LOAD 0x00000800
339#define __WGN_READY 0x00000400 339#define __WGN_READY 0x00000400
340#define __GLBL_PF_VF_CFG_RDY 0x00000200 340#define __GLBL_PF_VF_CFG_RDY 0x00000200
341#define CT2_NFC_STS_REG 0x00027410
341#define CT2_NFC_CSR_CLR_REG 0x00027420 342#define CT2_NFC_CSR_CLR_REG 0x00027420
342#define CT2_NFC_CSR_SET_REG 0x00027424 343#define CT2_NFC_CSR_SET_REG 0x00027424
343#define __HALT_NFC_CONTROLLER 0x00000002 344#define __HALT_NFC_CONTROLLER 0x00000002
@@ -355,6 +356,8 @@ enum {
355 (CT2_CSI_MAC0_CONTROL_REG + \ 356 (CT2_CSI_MAC0_CONTROL_REG + \
356 (__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG)) 357 (__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG))
357 358
359#define CT2_NFC_FLASH_STS_REG 0x00014834
360#define __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS 0x00000020
358/* 361/*
359 * Name semaphore registers based on usage 362 * Name semaphore registers based on usage
360 */ 363 */
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 078d262ac7cc..666b7ac4475f 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1643,7 +1643,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1643 skb_reset_network_header(skb); 1643 skb_reset_network_header(skb);
1644 skb->mac_len = elen; 1644 skb->mac_len = elen;
1645 skb->protocol = htons(ETH_P_FCOE); 1645 skb->protocol = htons(ETH_P_FCOE);
1646 skb->priority = port->priority; 1646 skb->priority = fcoe->priority;
1647 1647
1648 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN && 1648 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
1649 fcoe->realdev->features & NETIF_F_HW_VLAN_TX) { 1649 fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
@@ -1917,7 +1917,6 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
1917 struct fcoe_ctlr *ctlr; 1917 struct fcoe_ctlr *ctlr;
1918 struct fcoe_interface *fcoe; 1918 struct fcoe_interface *fcoe;
1919 struct net_device *netdev; 1919 struct net_device *netdev;
1920 struct fcoe_port *port;
1921 int prio; 1920 int prio;
1922 1921
1923 if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE) 1922 if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE)
@@ -1946,10 +1945,8 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
1946 entry->app.protocol == ETH_P_FCOE) 1945 entry->app.protocol == ETH_P_FCOE)
1947 ctlr->priority = prio; 1946 ctlr->priority = prio;
1948 1947
1949 if (entry->app.protocol == ETH_P_FCOE) { 1948 if (entry->app.protocol == ETH_P_FCOE)
1950 port = lport_priv(ctlr->lp); 1949 fcoe->priority = prio;
1951 port->priority = prio;
1952 }
1953 1950
1954 return NOTIFY_OK; 1951 return NOTIFY_OK;
1955} 1952}
@@ -2180,7 +2177,6 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
2180 u8 fup, up; 2177 u8 fup, up;
2181 struct net_device *netdev = fcoe->realdev; 2178 struct net_device *netdev = fcoe->realdev;
2182 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); 2179 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
2183 struct fcoe_port *port = lport_priv(ctlr->lp);
2184 struct dcb_app app = { 2180 struct dcb_app app = {
2185 .priority = 0, 2181 .priority = 0,
2186 .protocol = ETH_P_FCOE 2182 .protocol = ETH_P_FCOE
@@ -2202,8 +2198,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
2202 fup = dcb_getapp(netdev, &app); 2198 fup = dcb_getapp(netdev, &app);
2203 } 2199 }
2204 2200
2205 port->priority = ffs(up) ? ffs(up) - 1 : 0; 2201 fcoe->priority = ffs(up) ? ffs(up) - 1 : 0;
2206 ctlr->priority = ffs(fup) ? ffs(fup) - 1 : port->priority; 2202 ctlr->priority = ffs(fup) ? ffs(fup) - 1 : fcoe->priority;
2207 } 2203 }
2208#endif 2204#endif
2209} 2205}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index a624add4f8ec..b42dc32cb5eb 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -71,6 +71,7 @@ do { \
71 * @oem: The offload exchange manager for all local port 71 * @oem: The offload exchange manager for all local port
72 * instances associated with this port 72 * instances associated with this port
73 * @removed: Indicates fcoe interface removed from net device 73 * @removed: Indicates fcoe interface removed from net device
74 * @priority: Priority for the FCoE packet (DCB)
74 * This structure is 1:1 with a net device. 75 * This structure is 1:1 with a net device.
75 */ 76 */
76struct fcoe_interface { 77struct fcoe_interface {
@@ -81,6 +82,7 @@ struct fcoe_interface {
81 struct packet_type fip_packet_type; 82 struct packet_type fip_packet_type;
82 struct fc_exch_mgr *oem; 83 struct fc_exch_mgr *oem;
83 u8 removed; 84 u8 removed;
85 u8 priority;
84}; 86};
85 87
86#define fcoe_to_ctlr(x) \ 88#define fcoe_to_ctlr(x) \
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 14243fa5f8e8..fcb9d0b20ee4 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -851,7 +851,8 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
851 fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1); 851 fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1);
852 if (flags & FCP_RSP_LEN_VAL) { 852 if (flags & FCP_RSP_LEN_VAL) {
853 respl = ntohl(rp_ex->fr_rsp_len); 853 respl = ntohl(rp_ex->fr_rsp_len);
854 if (respl != sizeof(*fc_rp_info)) 854 if ((respl != FCP_RESP_RSP_INFO_LEN4) &&
855 (respl != FCP_RESP_RSP_INFO_LEN8))
855 goto len_err; 856 goto len_err;
856 if (fsp->wait_for_comp) { 857 if (fsp->wait_for_comp) {
857 /* Abuse cdb_status for rsp code */ 858 /* Abuse cdb_status for rsp code */
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index a184c2443a64..69b59935b53f 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -27,6 +27,8 @@
27 27
28struct lpfc_sli2_slim; 28struct lpfc_sli2_slim;
29 29
30#define ELX_MODEL_NAME_SIZE 80
31
30#define LPFC_PCI_DEV_LP 0x1 32#define LPFC_PCI_DEV_LP 0x1
31#define LPFC_PCI_DEV_OC 0x2 33#define LPFC_PCI_DEV_OC 0x2
32 34
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index b032562aa0d9..ad16e54ac383 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3935,6 +3935,12 @@ MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions");
3935# - Only meaningful if BG is turned on (lpfc_enable_bg=1). 3935# - Only meaningful if BG is turned on (lpfc_enable_bg=1).
3936# - Allows you to ultimately specify which profiles to use 3936# - Allows you to ultimately specify which profiles to use
3937# - Default will result in registering capabilities for all profiles. 3937# - Default will result in registering capabilities for all profiles.
3938# - SHOST_DIF_TYPE1_PROTECTION 1
3939# HBA supports T10 DIF Type 1: HBA to Target Type 1 Protection
3940# - SHOST_DIX_TYPE0_PROTECTION 8
3941# HBA supports DIX Type 0: Host to HBA protection only
3942# - SHOST_DIX_TYPE1_PROTECTION 16
3943# HBA supports DIX Type 1: Host to HBA Type 1 protection
3938# 3944#
3939*/ 3945*/
3940unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION | 3946unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION |
@@ -3947,7 +3953,7 @@ MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
3947/* 3953/*
3948# lpfc_prot_guard: i 3954# lpfc_prot_guard: i
3949# - Bit mask of protection guard types to register with the SCSI mid-layer 3955# - Bit mask of protection guard types to register with the SCSI mid-layer
3950# - Guard types are currently either 1) IP checksum 2) T10-DIF CRC 3956# - Guard types are currently either 1) T10-DIF CRC 2) IP checksum
3951# - Allows you to ultimately specify which profiles to use 3957# - Allows you to ultimately specify which profiles to use
3952# - Default will result in registering capabilities for all guard types 3958# - Default will result in registering capabilities for all guard types
3953# 3959#
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index e470c489de07..4380a44000bc 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -467,3 +467,4 @@ int lpfc_sli4_read_config(struct lpfc_hba *);
467void lpfc_sli4_node_prep(struct lpfc_hba *); 467void lpfc_sli4_node_prep(struct lpfc_hba *);
468int lpfc_sli4_xri_sgl_update(struct lpfc_hba *); 468int lpfc_sli4_xri_sgl_update(struct lpfc_hba *);
469void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *); 469void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
470uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index cfe533bc9790..f19e9b6f9f13 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -809,6 +809,8 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
809 phba->fc_ratov = FF_DEF_RATOV; 809 phba->fc_ratov = FF_DEF_RATOV;
810 rc = memcmp(&vport->fc_portname, &sp->portName, 810 rc = memcmp(&vport->fc_portname, &sp->portName,
811 sizeof(vport->fc_portname)); 811 sizeof(vport->fc_portname));
812 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
813
812 if (rc >= 0) { 814 if (rc >= 0) {
813 /* This side will initiate the PLOGI */ 815 /* This side will initiate the PLOGI */
814 spin_lock_irq(shost->host_lock); 816 spin_lock_irq(shost->host_lock);
@@ -3160,7 +3162,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3160 retry = 1; 3162 retry = 1;
3161 break; 3163 break;
3162 } 3164 }
3163 if (cmd == ELS_CMD_PLOGI) { 3165 if ((cmd == ELS_CMD_PLOGI) ||
3166 (cmd == ELS_CMD_PRLI)) {
3164 delay = 1000; 3167 delay = 1000;
3165 maxretry = lpfc_max_els_tries + 1; 3168 maxretry = lpfc_max_els_tries + 1;
3166 retry = 1; 3169 retry = 1;
@@ -3305,7 +3308,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3305 ndlp->nlp_prev_state = ndlp->nlp_state; 3308 ndlp->nlp_prev_state = ndlp->nlp_state;
3306 if (cmd == ELS_CMD_PRLI) 3309 if (cmd == ELS_CMD_PRLI)
3307 lpfc_nlp_set_state(vport, ndlp, 3310 lpfc_nlp_set_state(vport, ndlp,
3308 NLP_STE_REG_LOGIN_ISSUE); 3311 NLP_STE_PRLI_ISSUE);
3309 else 3312 else
3310 lpfc_nlp_set_state(vport, ndlp, 3313 lpfc_nlp_set_state(vport, ndlp,
3311 NLP_STE_NPR_NODE); 3314 NLP_STE_NPR_NODE);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e9845d2ecf10..d7096ad94d3f 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1506,9 +1506,10 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1506 } 1506 }
1507 } 1507 }
1508 1508
1509 /* If FCF not available return 0 */ 1509 /* FCF not valid/available or solicitation in progress */
1510 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || 1510 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1511 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record)) 1511 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) ||
1512 bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record))
1512 return 0; 1513 return 0;
1513 1514
1514 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { 1515 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
@@ -1842,6 +1843,7 @@ lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
1842 "\tFCF_Index : x%x\n" 1843 "\tFCF_Index : x%x\n"
1843 "\tFCF_Avail : x%x\n" 1844 "\tFCF_Avail : x%x\n"
1844 "\tFCF_Valid : x%x\n" 1845 "\tFCF_Valid : x%x\n"
1846 "\tFCF_SOL : x%x\n"
1845 "\tFIP_Priority : x%x\n" 1847 "\tFIP_Priority : x%x\n"
1846 "\tMAC_Provider : x%x\n" 1848 "\tMAC_Provider : x%x\n"
1847 "\tLowest VLANID : x%x\n" 1849 "\tLowest VLANID : x%x\n"
@@ -1852,6 +1854,7 @@ lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
1852 bf_get(lpfc_fcf_record_fcf_index, fcf_record), 1854 bf_get(lpfc_fcf_record_fcf_index, fcf_record),
1853 bf_get(lpfc_fcf_record_fcf_avail, fcf_record), 1855 bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
1854 bf_get(lpfc_fcf_record_fcf_valid, fcf_record), 1856 bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
1857 bf_get(lpfc_fcf_record_fcf_sol, fcf_record),
1855 fcf_record->fip_priority, 1858 fcf_record->fip_priority,
1856 bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record), 1859 bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
1857 vlan_id, 1860 vlan_id,
@@ -2185,12 +2188,14 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2185 new_fcf_record)); 2188 new_fcf_record));
2186 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2189 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2187 "2781 FCF (x%x) failed connection " 2190 "2781 FCF (x%x) failed connection "
2188 "list check: (x%x/x%x)\n", 2191 "list check: (x%x/x%x/%x)\n",
2189 bf_get(lpfc_fcf_record_fcf_index, 2192 bf_get(lpfc_fcf_record_fcf_index,
2190 new_fcf_record), 2193 new_fcf_record),
2191 bf_get(lpfc_fcf_record_fcf_avail, 2194 bf_get(lpfc_fcf_record_fcf_avail,
2192 new_fcf_record), 2195 new_fcf_record),
2193 bf_get(lpfc_fcf_record_fcf_valid, 2196 bf_get(lpfc_fcf_record_fcf_valid,
2197 new_fcf_record),
2198 bf_get(lpfc_fcf_record_fcf_sol,
2194 new_fcf_record)); 2199 new_fcf_record));
2195 if ((phba->fcf.fcf_flag & FCF_IN_USE) && 2200 if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
2196 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, 2201 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 834b699cac76..2cdeb5434fb7 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1305,6 +1305,11 @@ struct lpfc_mbx_mq_create_ext {
1305#define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK 1305#define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK
1306#define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001 1306#define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001
1307#define lpfc_mbx_mq_create_ext_async_evt_link_WORD async_evt_bmap 1307#define lpfc_mbx_mq_create_ext_async_evt_link_WORD async_evt_bmap
1308#define LPFC_EVT_CODE_LINK_NO_LINK 0x0
1309#define LPFC_EVT_CODE_LINK_10_MBIT 0x1
1310#define LPFC_EVT_CODE_LINK_100_MBIT 0x2
1311#define LPFC_EVT_CODE_LINK_1_GBIT 0x3
1312#define LPFC_EVT_CODE_LINK_10_GBIT 0x4
1308#define lpfc_mbx_mq_create_ext_async_evt_fip_SHIFT LPFC_TRAILER_CODE_FCOE 1313#define lpfc_mbx_mq_create_ext_async_evt_fip_SHIFT LPFC_TRAILER_CODE_FCOE
1309#define lpfc_mbx_mq_create_ext_async_evt_fip_MASK 0x00000001 1314#define lpfc_mbx_mq_create_ext_async_evt_fip_MASK 0x00000001
1310#define lpfc_mbx_mq_create_ext_async_evt_fip_WORD async_evt_bmap 1315#define lpfc_mbx_mq_create_ext_async_evt_fip_WORD async_evt_bmap
@@ -1314,6 +1319,13 @@ struct lpfc_mbx_mq_create_ext {
1314#define lpfc_mbx_mq_create_ext_async_evt_fc_SHIFT LPFC_TRAILER_CODE_FC 1319#define lpfc_mbx_mq_create_ext_async_evt_fc_SHIFT LPFC_TRAILER_CODE_FC
1315#define lpfc_mbx_mq_create_ext_async_evt_fc_MASK 0x00000001 1320#define lpfc_mbx_mq_create_ext_async_evt_fc_MASK 0x00000001
1316#define lpfc_mbx_mq_create_ext_async_evt_fc_WORD async_evt_bmap 1321#define lpfc_mbx_mq_create_ext_async_evt_fc_WORD async_evt_bmap
1322#define LPFC_EVT_CODE_FC_NO_LINK 0x0
1323#define LPFC_EVT_CODE_FC_1_GBAUD 0x1
1324#define LPFC_EVT_CODE_FC_2_GBAUD 0x2
1325#define LPFC_EVT_CODE_FC_4_GBAUD 0x4
1326#define LPFC_EVT_CODE_FC_8_GBAUD 0x8
1327#define LPFC_EVT_CODE_FC_10_GBAUD 0xA
1328#define LPFC_EVT_CODE_FC_16_GBAUD 0x10
1317#define lpfc_mbx_mq_create_ext_async_evt_sli_SHIFT LPFC_TRAILER_CODE_SLI 1329#define lpfc_mbx_mq_create_ext_async_evt_sli_SHIFT LPFC_TRAILER_CODE_SLI
1318#define lpfc_mbx_mq_create_ext_async_evt_sli_MASK 0x00000001 1330#define lpfc_mbx_mq_create_ext_async_evt_sli_MASK 0x00000001
1319#define lpfc_mbx_mq_create_ext_async_evt_sli_WORD async_evt_bmap 1331#define lpfc_mbx_mq_create_ext_async_evt_sli_WORD async_evt_bmap
@@ -1695,8 +1707,14 @@ struct fcf_record {
1695#define lpfc_fcf_record_fc_map_2_MASK 0x000000FF 1707#define lpfc_fcf_record_fc_map_2_MASK 0x000000FF
1696#define lpfc_fcf_record_fc_map_2_WORD word7 1708#define lpfc_fcf_record_fc_map_2_WORD word7
1697#define lpfc_fcf_record_fcf_valid_SHIFT 24 1709#define lpfc_fcf_record_fcf_valid_SHIFT 24
1698#define lpfc_fcf_record_fcf_valid_MASK 0x000000FF 1710#define lpfc_fcf_record_fcf_valid_MASK 0x00000001
1699#define lpfc_fcf_record_fcf_valid_WORD word7 1711#define lpfc_fcf_record_fcf_valid_WORD word7
1712#define lpfc_fcf_record_fcf_fc_SHIFT 25
1713#define lpfc_fcf_record_fcf_fc_MASK 0x00000001
1714#define lpfc_fcf_record_fcf_fc_WORD word7
1715#define lpfc_fcf_record_fcf_sol_SHIFT 31
1716#define lpfc_fcf_record_fcf_sol_MASK 0x00000001
1717#define lpfc_fcf_record_fcf_sol_WORD word7
1700 uint32_t word8; 1718 uint32_t word8;
1701#define lpfc_fcf_record_fcf_index_SHIFT 0 1719#define lpfc_fcf_record_fcf_index_SHIFT 0
1702#define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF 1720#define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 8a55a586dd65..7dc4218d9c4c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1892,8 +1892,10 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1892 max_speed = 4; 1892 max_speed = 4;
1893 else if (phba->lmt & LMT_2Gb) 1893 else if (phba->lmt & LMT_2Gb)
1894 max_speed = 2; 1894 max_speed = 2;
1895 else 1895 else if (phba->lmt & LMT_1Gb)
1896 max_speed = 1; 1896 max_speed = 1;
1897 else
1898 max_speed = 0;
1897 1899
1898 vp = &phba->vpd; 1900 vp = &phba->vpd;
1899 1901
@@ -2078,9 +2080,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2078 if (descp && descp[0] == '\0') { 2080 if (descp && descp[0] == '\0') {
2079 if (oneConnect) 2081 if (oneConnect)
2080 snprintf(descp, 255, 2082 snprintf(descp, 255,
2081 "Emulex OneConnect %s, %s Initiator, Port %s", 2083 "Emulex OneConnect %s, %s Initiator %s",
2082 m.name, m.function, 2084 m.name, m.function,
2083 phba->Port); 2085 phba->Port);
2086 else if (max_speed == 0)
2087 snprintf(descp, 255,
2088 "Emulex %s %s %s ",
2089 m.name, m.bus, m.function);
2084 else 2090 else
2085 snprintf(descp, 255, 2091 snprintf(descp, 255,
2086 "Emulex %s %d%s %s %s", 2092 "Emulex %s %d%s %s %s",
@@ -3502,6 +3508,119 @@ lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3502} 3508}
3503 3509
3504/** 3510/**
3511 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
3512 * @phba: pointer to lpfc hba data structure.
3513 *
3514 * This routine is to get an SLI3 FC port's link speed in Mbps.
3515 *
3516 * Return: link speed in terms of Mbps.
3517 **/
3518uint32_t
3519lpfc_sli_port_speed_get(struct lpfc_hba *phba)
3520{
3521 uint32_t link_speed;
3522
3523 if (!lpfc_is_link_up(phba))
3524 return 0;
3525
3526 switch (phba->fc_linkspeed) {
3527 case LPFC_LINK_SPEED_1GHZ:
3528 link_speed = 1000;
3529 break;
3530 case LPFC_LINK_SPEED_2GHZ:
3531 link_speed = 2000;
3532 break;
3533 case LPFC_LINK_SPEED_4GHZ:
3534 link_speed = 4000;
3535 break;
3536 case LPFC_LINK_SPEED_8GHZ:
3537 link_speed = 8000;
3538 break;
3539 case LPFC_LINK_SPEED_10GHZ:
3540 link_speed = 10000;
3541 break;
3542 case LPFC_LINK_SPEED_16GHZ:
3543 link_speed = 16000;
3544 break;
3545 default:
3546 link_speed = 0;
3547 }
3548 return link_speed;
3549}
3550
3551/**
3552 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
3553 * @phba: pointer to lpfc hba data structure.
3554 * @evt_code: asynchronous event code.
3555 * @speed_code: asynchronous event link speed code.
3556 *
3557 * This routine is to parse the giving SLI4 async event link speed code into
3558 * value of Mbps for the link speed.
3559 *
3560 * Return: link speed in terms of Mbps.
3561 **/
3562static uint32_t
3563lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
3564 uint8_t speed_code)
3565{
3566 uint32_t port_speed;
3567
3568 switch (evt_code) {
3569 case LPFC_TRAILER_CODE_LINK:
3570 switch (speed_code) {
3571 case LPFC_EVT_CODE_LINK_NO_LINK:
3572 port_speed = 0;
3573 break;
3574 case LPFC_EVT_CODE_LINK_10_MBIT:
3575 port_speed = 10;
3576 break;
3577 case LPFC_EVT_CODE_LINK_100_MBIT:
3578 port_speed = 100;
3579 break;
3580 case LPFC_EVT_CODE_LINK_1_GBIT:
3581 port_speed = 1000;
3582 break;
3583 case LPFC_EVT_CODE_LINK_10_GBIT:
3584 port_speed = 10000;
3585 break;
3586 default:
3587 port_speed = 0;
3588 }
3589 break;
3590 case LPFC_TRAILER_CODE_FC:
3591 switch (speed_code) {
3592 case LPFC_EVT_CODE_FC_NO_LINK:
3593 port_speed = 0;
3594 break;
3595 case LPFC_EVT_CODE_FC_1_GBAUD:
3596 port_speed = 1000;
3597 break;
3598 case LPFC_EVT_CODE_FC_2_GBAUD:
3599 port_speed = 2000;
3600 break;
3601 case LPFC_EVT_CODE_FC_4_GBAUD:
3602 port_speed = 4000;
3603 break;
3604 case LPFC_EVT_CODE_FC_8_GBAUD:
3605 port_speed = 8000;
3606 break;
3607 case LPFC_EVT_CODE_FC_10_GBAUD:
3608 port_speed = 10000;
3609 break;
3610 case LPFC_EVT_CODE_FC_16_GBAUD:
3611 port_speed = 16000;
3612 break;
3613 default:
3614 port_speed = 0;
3615 }
3616 break;
3617 default:
3618 port_speed = 0;
3619 }
3620 return port_speed;
3621}
3622
3623/**
3505 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 3624 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
3506 * @phba: pointer to lpfc hba data structure. 3625 * @phba: pointer to lpfc hba data structure.
3507 * @acqe_link: pointer to the async link completion queue entry. 3626 * @acqe_link: pointer to the async link completion queue entry.
@@ -3558,7 +3677,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3558 3677
3559 /* Keep the link status for extra SLI4 state machine reference */ 3678 /* Keep the link status for extra SLI4 state machine reference */
3560 phba->sli4_hba.link_state.speed = 3679 phba->sli4_hba.link_state.speed =
3561 bf_get(lpfc_acqe_link_speed, acqe_link); 3680 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
3681 bf_get(lpfc_acqe_link_speed, acqe_link));
3562 phba->sli4_hba.link_state.duplex = 3682 phba->sli4_hba.link_state.duplex =
3563 bf_get(lpfc_acqe_link_duplex, acqe_link); 3683 bf_get(lpfc_acqe_link_duplex, acqe_link);
3564 phba->sli4_hba.link_state.status = 3684 phba->sli4_hba.link_state.status =
@@ -3570,7 +3690,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3570 phba->sli4_hba.link_state.fault = 3690 phba->sli4_hba.link_state.fault =
3571 bf_get(lpfc_acqe_link_fault, acqe_link); 3691 bf_get(lpfc_acqe_link_fault, acqe_link);
3572 phba->sli4_hba.link_state.logical_speed = 3692 phba->sli4_hba.link_state.logical_speed =
3573 bf_get(lpfc_acqe_logical_link_speed, acqe_link); 3693 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
3694
3574 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3695 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3575 "2900 Async FC/FCoE Link event - Speed:%dGBit " 3696 "2900 Async FC/FCoE Link event - Speed:%dGBit "
3576 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 3697 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
@@ -3580,7 +3701,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3580 phba->sli4_hba.link_state.status, 3701 phba->sli4_hba.link_state.status,
3581 phba->sli4_hba.link_state.type, 3702 phba->sli4_hba.link_state.type,
3582 phba->sli4_hba.link_state.number, 3703 phba->sli4_hba.link_state.number,
3583 phba->sli4_hba.link_state.logical_speed * 10, 3704 phba->sli4_hba.link_state.logical_speed,
3584 phba->sli4_hba.link_state.fault); 3705 phba->sli4_hba.link_state.fault);
3585 /* 3706 /*
3586 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 3707 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
@@ -3652,7 +3773,8 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3652 } 3773 }
3653 /* Keep the link status for extra SLI4 state machine reference */ 3774 /* Keep the link status for extra SLI4 state machine reference */
3654 phba->sli4_hba.link_state.speed = 3775 phba->sli4_hba.link_state.speed =
3655 bf_get(lpfc_acqe_fc_la_speed, acqe_fc); 3776 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
3777 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
3656 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 3778 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
3657 phba->sli4_hba.link_state.topology = 3779 phba->sli4_hba.link_state.topology =
3658 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 3780 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
@@ -3665,7 +3787,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3665 phba->sli4_hba.link_state.fault = 3787 phba->sli4_hba.link_state.fault =
3666 bf_get(lpfc_acqe_link_fault, acqe_fc); 3788 bf_get(lpfc_acqe_link_fault, acqe_fc);
3667 phba->sli4_hba.link_state.logical_speed = 3789 phba->sli4_hba.link_state.logical_speed =
3668 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc); 3790 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
3669 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3791 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3670 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 3792 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
3671 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 3793 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
@@ -3675,7 +3797,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3675 phba->sli4_hba.link_state.status, 3797 phba->sli4_hba.link_state.status,
3676 phba->sli4_hba.link_state.type, 3798 phba->sli4_hba.link_state.type,
3677 phba->sli4_hba.link_state.number, 3799 phba->sli4_hba.link_state.number,
3678 phba->sli4_hba.link_state.logical_speed * 10, 3800 phba->sli4_hba.link_state.logical_speed,
3679 phba->sli4_hba.link_state.fault); 3801 phba->sli4_hba.link_state.fault);
3680 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3802 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3681 if (!pmb) { 3803 if (!pmb) {
@@ -3783,14 +3905,18 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
3783 case LPFC_SLI_EVENT_STATUS_VALID: 3905 case LPFC_SLI_EVENT_STATUS_VALID:
3784 return; /* no message if the sfp is okay */ 3906 return; /* no message if the sfp is okay */
3785 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 3907 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
3786 sprintf(message, "Not installed"); 3908 sprintf(message, "Optics faulted/incorrectly installed/not " \
3909 "installed - Reseat optics, if issue not "
3910 "resolved, replace.");
3787 break; 3911 break;
3788 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 3912 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
3789 sprintf(message, 3913 sprintf(message,
3790 "Optics of two types installed"); 3914 "Optics of two types installed - Remove one optic or " \
3915 "install matching pair of optics.");
3791 break; 3916 break;
3792 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 3917 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
3793 sprintf(message, "Incompatible optics"); 3918 sprintf(message, "Incompatible optics - Replace with " \
3919 "compatible optics for card to function.");
3794 break; 3920 break;
3795 default: 3921 default:
3796 /* firmware is reporting a status we don't know about */ 3922 /* firmware is reporting a status we don't know about */
@@ -4161,11 +4287,11 @@ lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
4161 phba->fcoe_eventtag = acqe_grp5->event_tag; 4287 phba->fcoe_eventtag = acqe_grp5->event_tag;
4162 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 4288 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
4163 phba->sli4_hba.link_state.logical_speed = 4289 phba->sli4_hba.link_state.logical_speed =
4164 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)); 4290 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
4165 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4291 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4166 "2789 GRP5 Async Event: Updating logical link speed " 4292 "2789 GRP5 Async Event: Updating logical link speed "
4167 "from %dMbps to %dMbps\n", (prev_ll_spd * 10), 4293 "from %dMbps to %dMbps\n", prev_ll_spd,
4168 (phba->sli4_hba.link_state.logical_speed*10)); 4294 phba->sli4_hba.link_state.logical_speed);
4169} 4295}
4170 4296
4171/** 4297/**
@@ -4947,7 +5073,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4947 } 5073 }
4948 5074
4949 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 5075 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
4950 phba->sli4_hba.cfg_eqn), GFP_KERNEL); 5076 phba->cfg_fcp_io_channel), GFP_KERNEL);
4951 if (!phba->sli4_hba.msix_entries) { 5077 if (!phba->sli4_hba.msix_entries) {
4952 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4953 "2573 Failed allocate memory for msi-x " 5079 "2573 Failed allocate memory for msi-x "
@@ -6559,7 +6685,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6559 i++; 6685 i++;
6560 } 6686 }
6561 if (i < cfg_fcp_io_channel) { 6687 if (i < cfg_fcp_io_channel) {
6562 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6688 lpfc_printf_log(phba,
6689 KERN_ERR, LOG_INIT,
6563 "3188 Reducing IO channels to match number of " 6690 "3188 Reducing IO channels to match number of "
6564 "CPUs: from %d to %d\n", cfg_fcp_io_channel, i); 6691 "CPUs: from %d to %d\n", cfg_fcp_io_channel, i);
6565 cfg_fcp_io_channel = i; 6692 cfg_fcp_io_channel = i;
@@ -6567,8 +6694,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6567 6694
6568 if (cfg_fcp_io_channel > 6695 if (cfg_fcp_io_channel >
6569 phba->sli4_hba.max_cfg_param.max_eq) { 6696 phba->sli4_hba.max_cfg_param.max_eq) {
6570 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq; 6697 if (phba->sli4_hba.max_cfg_param.max_eq <
6571 if (cfg_fcp_io_channel < LPFC_FCP_IO_CHAN_MIN) { 6698 LPFC_FCP_IO_CHAN_MIN) {
6572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6699 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6573 "2574 Not enough EQs (%d) from the " 6700 "2574 Not enough EQs (%d) from the "
6574 "pci function for supporting FCP " 6701 "pci function for supporting FCP "
@@ -6577,13 +6704,12 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6577 phba->cfg_fcp_io_channel); 6704 phba->cfg_fcp_io_channel);
6578 goto out_error; 6705 goto out_error;
6579 } 6706 }
6580 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6581 "2575 Not enough EQs (%d) from the pci " 6708 "2575 Reducing IO channels to match number of "
6582 "function for supporting the requested " 6709 "available EQs: from %d to %d\n",
6583 "FCP EQs (%d), the actual FCP EQs can " 6710 cfg_fcp_io_channel,
6584 "be supported: %d\n", 6711 phba->sli4_hba.max_cfg_param.max_eq);
6585 phba->sli4_hba.max_cfg_param.max_eq, 6712 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
6586 phba->cfg_fcp_io_channel, cfg_fcp_io_channel);
6587 } 6713 }
6588 6714
6589 /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */ 6715 /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
@@ -6592,7 +6718,6 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6592 phba->cfg_fcp_eq_count = cfg_fcp_io_channel; 6718 phba->cfg_fcp_eq_count = cfg_fcp_io_channel;
6593 phba->cfg_fcp_wq_count = cfg_fcp_io_channel; 6719 phba->cfg_fcp_wq_count = cfg_fcp_io_channel;
6594 phba->cfg_fcp_io_channel = cfg_fcp_io_channel; 6720 phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
6595 phba->sli4_hba.cfg_eqn = cfg_fcp_io_channel;
6596 6721
6597 /* Get EQ depth from module parameter, fake the default for now */ 6722 /* Get EQ depth from module parameter, fake the default for now */
6598 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 6723 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
@@ -8095,11 +8220,11 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
8095 int vectors, rc, index; 8220 int vectors, rc, index;
8096 8221
8097 /* Set up MSI-X multi-message vectors */ 8222 /* Set up MSI-X multi-message vectors */
8098 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 8223 for (index = 0; index < phba->cfg_fcp_io_channel; index++)
8099 phba->sli4_hba.msix_entries[index].entry = index; 8224 phba->sli4_hba.msix_entries[index].entry = index;
8100 8225
8101 /* Configure MSI-X capability structure */ 8226 /* Configure MSI-X capability structure */
8102 vectors = phba->sli4_hba.cfg_eqn; 8227 vectors = phba->cfg_fcp_io_channel;
8103enable_msix_vectors: 8228enable_msix_vectors:
8104 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 8229 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
8105 vectors); 8230 vectors);
@@ -8142,8 +8267,14 @@ enable_msix_vectors:
8142 goto cfg_fail_out; 8267 goto cfg_fail_out;
8143 } 8268 }
8144 } 8269 }
8145 phba->sli4_hba.msix_vec_nr = vectors;
8146 8270
8271 if (vectors != phba->cfg_fcp_io_channel) {
8272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8273 "3238 Reducing IO channels to match number of "
8274 "MSI-X vectors, requested %d got %d\n",
8275 phba->cfg_fcp_io_channel, vectors);
8276 phba->cfg_fcp_io_channel = vectors;
8277 }
8147 return rc; 8278 return rc;
8148 8279
8149cfg_fail_out: 8280cfg_fail_out:
@@ -8171,7 +8302,7 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
8171 int index; 8302 int index;
8172 8303
8173 /* Free up MSI-X multi-message vectors */ 8304 /* Free up MSI-X multi-message vectors */
8174 for (index = 0; index < phba->sli4_hba.msix_vec_nr; index++) 8305 for (index = 0; index < phba->cfg_fcp_io_channel; index++)
8175 free_irq(phba->sli4_hba.msix_entries[index].vector, 8306 free_irq(phba->sli4_hba.msix_entries[index].vector,
8176 &phba->sli4_hba.fcp_eq_hdl[index]); 8307 &phba->sli4_hba.fcp_eq_hdl[index]);
8177 8308
@@ -9304,23 +9435,28 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
9304 9435
9305/** 9436/**
9306 * lpfc_write_firmware - attempt to write a firmware image to the port 9437 * lpfc_write_firmware - attempt to write a firmware image to the port
9307 * @phba: pointer to lpfc hba data structure.
9308 * @fw: pointer to firmware image returned from request_firmware. 9438 * @fw: pointer to firmware image returned from request_firmware.
9439 * @phba: pointer to lpfc hba data structure.
9309 * 9440 *
9310 * returns the number of bytes written if write is successful.
9311 * returns a negative error value if there were errors.
9312 * returns 0 if firmware matches currently active firmware on port.
9313 **/ 9441 **/
9314int 9442static void
9315lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) 9443lpfc_write_firmware(const struct firmware *fw, void *context)
9316{ 9444{
9445 struct lpfc_hba *phba = (struct lpfc_hba *)context;
9317 char fwrev[FW_REV_STR_SIZE]; 9446 char fwrev[FW_REV_STR_SIZE];
9318 struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data; 9447 struct lpfc_grp_hdr *image;
9319 struct list_head dma_buffer_list; 9448 struct list_head dma_buffer_list;
9320 int i, rc = 0; 9449 int i, rc = 0;
9321 struct lpfc_dmabuf *dmabuf, *next; 9450 struct lpfc_dmabuf *dmabuf, *next;
9322 uint32_t offset = 0, temp_offset = 0; 9451 uint32_t offset = 0, temp_offset = 0;
9323 9452
9453 /* It can be null, sanity check */
9454 if (!fw) {
9455 rc = -ENXIO;
9456 goto out;
9457 }
9458 image = (struct lpfc_grp_hdr *)fw->data;
9459
9324 INIT_LIST_HEAD(&dma_buffer_list); 9460 INIT_LIST_HEAD(&dma_buffer_list);
9325 if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) || 9461 if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
9326 (bf_get_be32(lpfc_grp_hdr_file_type, image) != 9462 (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
@@ -9333,12 +9469,13 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
9333 be32_to_cpu(image->magic_number), 9469 be32_to_cpu(image->magic_number),
9334 bf_get_be32(lpfc_grp_hdr_file_type, image), 9470 bf_get_be32(lpfc_grp_hdr_file_type, image),
9335 bf_get_be32(lpfc_grp_hdr_id, image)); 9471 bf_get_be32(lpfc_grp_hdr_id, image));
9336 return -EINVAL; 9472 rc = -EINVAL;
9473 goto release_out;
9337 } 9474 }
9338 lpfc_decode_firmware_rev(phba, fwrev, 1); 9475 lpfc_decode_firmware_rev(phba, fwrev, 1);
9339 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 9476 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
9340 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9477 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9341 "3023 Updating Firmware. Current Version:%s " 9478 "3023 Updating Firmware, Current Version:%s "
9342 "New Version:%s\n", 9479 "New Version:%s\n",
9343 fwrev, image->revision); 9480 fwrev, image->revision);
9344 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 9481 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
@@ -9346,7 +9483,7 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
9346 GFP_KERNEL); 9483 GFP_KERNEL);
9347 if (!dmabuf) { 9484 if (!dmabuf) {
9348 rc = -ENOMEM; 9485 rc = -ENOMEM;
9349 goto out; 9486 goto release_out;
9350 } 9487 }
9351 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 9488 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9352 SLI4_PAGE_SIZE, 9489 SLI4_PAGE_SIZE,
@@ -9355,7 +9492,7 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
9355 if (!dmabuf->virt) { 9492 if (!dmabuf->virt) {
9356 kfree(dmabuf); 9493 kfree(dmabuf);
9357 rc = -ENOMEM; 9494 rc = -ENOMEM;
9358 goto out; 9495 goto release_out;
9359 } 9496 }
9360 list_add_tail(&dmabuf->list, &dma_buffer_list); 9497 list_add_tail(&dmabuf->list, &dma_buffer_list);
9361 } 9498 }
@@ -9375,23 +9512,24 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
9375 } 9512 }
9376 rc = lpfc_wr_object(phba, &dma_buffer_list, 9513 rc = lpfc_wr_object(phba, &dma_buffer_list,
9377 (fw->size - offset), &offset); 9514 (fw->size - offset), &offset);
9378 if (rc) { 9515 if (rc)
9379 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9516 goto release_out;
9380 "3024 Firmware update failed. "
9381 "%d\n", rc);
9382 goto out;
9383 }
9384 } 9517 }
9385 rc = offset; 9518 rc = offset;
9386 } 9519 }
9387out: 9520
9521release_out:
9388 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 9522 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
9389 list_del(&dmabuf->list); 9523 list_del(&dmabuf->list);
9390 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 9524 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
9391 dmabuf->virt, dmabuf->phys); 9525 dmabuf->virt, dmabuf->phys);
9392 kfree(dmabuf); 9526 kfree(dmabuf);
9393 } 9527 }
9394 return rc; 9528 release_firmware(fw);
9529out:
9530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9531 "3024 Firmware update done: %d.", rc);
9532 return;
9395} 9533}
9396 9534
9397/** 9535/**
@@ -9418,12 +9556,11 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9418 struct lpfc_hba *phba; 9556 struct lpfc_hba *phba;
9419 struct lpfc_vport *vport = NULL; 9557 struct lpfc_vport *vport = NULL;
9420 struct Scsi_Host *shost = NULL; 9558 struct Scsi_Host *shost = NULL;
9421 int error; 9559 int error, ret;
9422 uint32_t cfg_mode, intr_mode; 9560 uint32_t cfg_mode, intr_mode;
9423 int mcnt; 9561 int mcnt;
9424 int adjusted_fcp_io_channel; 9562 int adjusted_fcp_io_channel;
9425 const struct firmware *fw; 9563 uint8_t file_name[ELX_MODEL_NAME_SIZE];
9426 uint8_t file_name[16];
9427 9564
9428 /* Allocate memory for HBA structure */ 9565 /* Allocate memory for HBA structure */
9429 phba = lpfc_hba_alloc(pdev); 9566 phba = lpfc_hba_alloc(pdev);
@@ -9525,9 +9662,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9525 /* Default to single EQ for non-MSI-X */ 9662 /* Default to single EQ for non-MSI-X */
9526 if (phba->intr_type != MSIX) 9663 if (phba->intr_type != MSIX)
9527 adjusted_fcp_io_channel = 1; 9664 adjusted_fcp_io_channel = 1;
9528 else if (phba->sli4_hba.msix_vec_nr <
9529 phba->cfg_fcp_io_channel)
9530 adjusted_fcp_io_channel = phba->sli4_hba.msix_vec_nr;
9531 else 9665 else
9532 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel; 9666 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
9533 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel; 9667 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
@@ -9572,12 +9706,12 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9572 /* check for firmware upgrade or downgrade (if_type 2 only) */ 9706 /* check for firmware upgrade or downgrade (if_type 2 only) */
9573 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 9707 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9574 LPFC_SLI_INTF_IF_TYPE_2) { 9708 LPFC_SLI_INTF_IF_TYPE_2) {
9575 snprintf(file_name, 16, "%s.grp", phba->ModelName); 9709 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp",
9576 error = request_firmware(&fw, file_name, &phba->pcidev->dev); 9710 phba->ModelName);
9577 if (!error) { 9711 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
9578 lpfc_write_firmware(phba, fw); 9712 file_name, &phba->pcidev->dev,
9579 release_firmware(fw); 9713 GFP_KERNEL, (void *)phba,
9580 } 9714 lpfc_write_firmware);
9581 } 9715 }
9582 9716
9583 /* Check if there are static vports to be created. */ 9717 /* Check if there are static vports to be created. */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 64013f3097ad..7f45ac9964a9 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3829,9 +3829,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3829 cmd->scsi_done(cmd); 3829 cmd->scsi_done(cmd);
3830 3830
3831 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 3831 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3832 spin_lock_irq(&phba->hbalock); 3832 spin_lock_irqsave(&phba->hbalock, flags);
3833 lpfc_cmd->pCmd = NULL; 3833 lpfc_cmd->pCmd = NULL;
3834 spin_unlock_irq(&phba->hbalock); 3834 spin_unlock_irqrestore(&phba->hbalock, flags);
3835 3835
3836 /* 3836 /*
3837 * If there is a thread waiting for command completion 3837 * If there is a thread waiting for command completion
@@ -3871,9 +3871,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3871 } 3871 }
3872 } 3872 }
3873 3873
3874 spin_lock_irq(&phba->hbalock); 3874 spin_lock_irqsave(&phba->hbalock, flags);
3875 lpfc_cmd->pCmd = NULL; 3875 lpfc_cmd->pCmd = NULL;
3876 spin_unlock_irq(&phba->hbalock); 3876 spin_unlock_irqrestore(&phba->hbalock, flags);
3877 3877
3878 /* 3878 /*
3879 * If there is a thread waiting for command completion 3879 * If there is a thread waiting for command completion
@@ -4163,7 +4163,7 @@ lpfc_info(struct Scsi_Host *host)
4163{ 4163{
4164 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; 4164 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4165 struct lpfc_hba *phba = vport->phba; 4165 struct lpfc_hba *phba = vport->phba;
4166 int len; 4166 int len, link_speed = 0;
4167 static char lpfcinfobuf[384]; 4167 static char lpfcinfobuf[384];
4168 4168
4169 memset(lpfcinfobuf,0,384); 4169 memset(lpfcinfobuf,0,384);
@@ -4184,12 +4184,18 @@ lpfc_info(struct Scsi_Host *host)
4184 phba->Port); 4184 phba->Port);
4185 } 4185 }
4186 len = strlen(lpfcinfobuf); 4186 len = strlen(lpfcinfobuf);
4187 if (phba->sli4_hba.link_state.logical_speed) { 4187 if (phba->sli_rev <= LPFC_SLI_REV3) {
4188 snprintf(lpfcinfobuf + len, 4188 link_speed = lpfc_sli_port_speed_get(phba);
4189 384-len, 4189 } else {
4190 " Logical Link Speed: %d Mbps", 4190 if (phba->sli4_hba.link_state.logical_speed)
4191 phba->sli4_hba.link_state.logical_speed * 10); 4191 link_speed =
4192 phba->sli4_hba.link_state.logical_speed;
4193 else
4194 link_speed = phba->sli4_hba.link_state.speed;
4192 } 4195 }
4196 if (link_speed != 0)
4197 snprintf(lpfcinfobuf + len, 384-len,
4198 " Logical Link Speed: %d Mbps", link_speed);
4193 } 4199 }
4194 return lpfcinfobuf; 4200 return lpfcinfobuf;
4195} 4201}
@@ -4398,16 +4404,17 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4398 struct lpfc_scsi_buf *lpfc_cmd; 4404 struct lpfc_scsi_buf *lpfc_cmd;
4399 IOCB_t *cmd, *icmd; 4405 IOCB_t *cmd, *icmd;
4400 int ret = SUCCESS, status = 0; 4406 int ret = SUCCESS, status = 0;
4407 unsigned long flags;
4401 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 4408 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4402 4409
4403 status = fc_block_scsi_eh(cmnd); 4410 status = fc_block_scsi_eh(cmnd);
4404 if (status != 0 && status != SUCCESS) 4411 if (status != 0 && status != SUCCESS)
4405 return status; 4412 return status;
4406 4413
4407 spin_lock_irq(&phba->hbalock); 4414 spin_lock_irqsave(&phba->hbalock, flags);
4408 /* driver queued commands are in process of being flushed */ 4415 /* driver queued commands are in process of being flushed */
4409 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { 4416 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
4410 spin_unlock_irq(&phba->hbalock); 4417 spin_unlock_irqrestore(&phba->hbalock, flags);
4411 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4418 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4412 "3168 SCSI Layer abort requested I/O has been " 4419 "3168 SCSI Layer abort requested I/O has been "
4413 "flushed by LLD.\n"); 4420 "flushed by LLD.\n");
@@ -4416,7 +4423,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4416 4423
4417 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 4424 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
4418 if (!lpfc_cmd || !lpfc_cmd->pCmd) { 4425 if (!lpfc_cmd || !lpfc_cmd->pCmd) {
4419 spin_unlock_irq(&phba->hbalock); 4426 spin_unlock_irqrestore(&phba->hbalock, flags);
4420 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4427 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4421 "2873 SCSI Layer I/O Abort Request IO CMPL Status " 4428 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4422 "x%x ID %d LUN %d\n", 4429 "x%x ID %d LUN %d\n",
@@ -4427,7 +4434,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4427 iocb = &lpfc_cmd->cur_iocbq; 4434 iocb = &lpfc_cmd->cur_iocbq;
4428 /* the command is in process of being cancelled */ 4435 /* the command is in process of being cancelled */
4429 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { 4436 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4430 spin_unlock_irq(&phba->hbalock); 4437 spin_unlock_irqrestore(&phba->hbalock, flags);
4431 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4438 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4432 "3169 SCSI Layer abort requested I/O has been " 4439 "3169 SCSI Layer abort requested I/O has been "
4433 "cancelled by LLD.\n"); 4440 "cancelled by LLD.\n");
@@ -4484,7 +4491,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4484 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 4491 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4485 abtsiocb->vport = vport; 4492 abtsiocb->vport = vport;
4486 /* no longer need the lock after this point */ 4493 /* no longer need the lock after this point */
4487 spin_unlock_irq(&phba->hbalock); 4494 spin_unlock_irqrestore(&phba->hbalock, flags);
4488 4495
4489 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) == 4496 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
4490 IOCB_ERROR) { 4497 IOCB_ERROR) {
@@ -4516,7 +4523,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4516 goto out; 4523 goto out;
4517 4524
4518out_unlock: 4525out_unlock:
4519 spin_unlock_irq(&phba->hbalock); 4526 spin_unlock_irqrestore(&phba->hbalock, flags);
4520out: 4527out:
4521 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4528 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4522 "0749 SCSI Layer I/O Abort Request Status x%x ID %d " 4529 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 219bf534ef99..d7f3313ef886 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -3964,9 +3964,9 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3964 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 3964 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
3965 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3965 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3966 3966
3967 /* Perform FCoE PCI function reset */ 3967 /* Perform FCoE PCI function reset before freeing queue memory */
3968 lpfc_sli4_queue_destroy(phba);
3969 rc = lpfc_pci_function_reset(phba); 3968 rc = lpfc_pci_function_reset(phba);
3969 lpfc_sli4_queue_destroy(phba);
3970 3970
3971 /* Restore PCI cmd register */ 3971 /* Restore PCI cmd register */
3972 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 3972 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
@@ -7072,6 +7072,40 @@ lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
7072} 7072}
7073 7073
7074/** 7074/**
7075 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
7076 * @phba: Pointer to HBA context object.
7077 * @mboxq: Pointer to mailbox object.
7078 *
7079 * The function waits for the bootstrap mailbox register ready bit from
7080 * port for twice the regular mailbox command timeout value.
7081 *
7082 * 0 - no timeout on waiting for bootstrap mailbox register ready.
7083 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
7084 **/
7085static int
7086lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7087{
7088 uint32_t db_ready;
7089 unsigned long timeout;
7090 struct lpfc_register bmbx_reg;
7091
7092 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
7093 * 1000) + jiffies;
7094
7095 do {
7096 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7097 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7098 if (!db_ready)
7099 msleep(2);
7100
7101 if (time_after(jiffies, timeout))
7102 return MBXERR_ERROR;
7103 } while (!db_ready);
7104
7105 return 0;
7106}
7107
7108/**
7075 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 7109 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
7076 * @phba: Pointer to HBA context object. 7110 * @phba: Pointer to HBA context object.
7077 * @mboxq: Pointer to mailbox object. 7111 * @mboxq: Pointer to mailbox object.
@@ -7092,15 +7126,12 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7092{ 7126{
7093 int rc = MBX_SUCCESS; 7127 int rc = MBX_SUCCESS;
7094 unsigned long iflag; 7128 unsigned long iflag;
7095 uint32_t db_ready;
7096 uint32_t mcqe_status; 7129 uint32_t mcqe_status;
7097 uint32_t mbx_cmnd; 7130 uint32_t mbx_cmnd;
7098 unsigned long timeout;
7099 struct lpfc_sli *psli = &phba->sli; 7131 struct lpfc_sli *psli = &phba->sli;
7100 struct lpfc_mqe *mb = &mboxq->u.mqe; 7132 struct lpfc_mqe *mb = &mboxq->u.mqe;
7101 struct lpfc_bmbx_create *mbox_rgn; 7133 struct lpfc_bmbx_create *mbox_rgn;
7102 struct dma_address *dma_address; 7134 struct dma_address *dma_address;
7103 struct lpfc_register bmbx_reg;
7104 7135
7105 /* 7136 /*
7106 * Only one mailbox can be active to the bootstrap mailbox region 7137 * Only one mailbox can be active to the bootstrap mailbox region
@@ -7124,6 +7155,11 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7124 phba->sli.mbox_active = mboxq; 7155 phba->sli.mbox_active = mboxq;
7125 spin_unlock_irqrestore(&phba->hbalock, iflag); 7156 spin_unlock_irqrestore(&phba->hbalock, iflag);
7126 7157
7158 /* wait for bootstrap mbox register for readyness */
7159 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7160 if (rc)
7161 goto exit;
7162
7127 /* 7163 /*
7128 * Initialize the bootstrap memory region to avoid stale data areas 7164 * Initialize the bootstrap memory region to avoid stale data areas
7129 * in the mailbox post. Then copy the caller's mailbox contents to 7165 * in the mailbox post. Then copy the caller's mailbox contents to
@@ -7138,35 +7174,18 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7138 dma_address = &phba->sli4_hba.bmbx.dma_address; 7174 dma_address = &phba->sli4_hba.bmbx.dma_address;
7139 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 7175 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
7140 7176
7141 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 7177 /* wait for bootstrap mbox register for hi-address write done */
7142 * 1000) + jiffies; 7178 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7143 do { 7179 if (rc)
7144 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 7180 goto exit;
7145 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7146 if (!db_ready)
7147 msleep(2);
7148
7149 if (time_after(jiffies, timeout)) {
7150 rc = MBXERR_ERROR;
7151 goto exit;
7152 }
7153 } while (!db_ready);
7154 7181
7155 /* Post the low mailbox dma address to the port. */ 7182 /* Post the low mailbox dma address to the port. */
7156 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 7183 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
7157 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
7158 * 1000) + jiffies;
7159 do {
7160 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7161 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7162 if (!db_ready)
7163 msleep(2);
7164 7184
7165 if (time_after(jiffies, timeout)) { 7185 /* wait for bootstrap mbox register for low address write done */
7166 rc = MBXERR_ERROR; 7186 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7167 goto exit; 7187 if (rc)
7168 } 7188 goto exit;
7169 } while (!db_ready);
7170 7189
7171 /* 7190 /*
7172 * Read the CQ to ensure the mailbox has completed. 7191 * Read the CQ to ensure the mailbox has completed.
@@ -8090,6 +8109,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8090 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 8109 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
8091 LPFC_WQE_LENLOC_NONE); 8110 LPFC_WQE_LENLOC_NONE);
8092 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); 8111 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
8112 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
8113 iocbq->iocb.ulpFCP2Rcvy);
8093 break; 8114 break;
8094 case CMD_GEN_REQUEST64_CR: 8115 case CMD_GEN_REQUEST64_CR:
8095 /* For this command calculate the xmit length of the 8116 /* For this command calculate the xmit length of the
@@ -12099,6 +12120,7 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12099 struct lpfc_queue *eq; 12120 struct lpfc_queue *eq;
12100 int cnt, rc, length, status = 0; 12121 int cnt, rc, length, status = 0;
12101 uint32_t shdr_status, shdr_add_status; 12122 uint32_t shdr_status, shdr_add_status;
12123 uint32_t result;
12102 int fcp_eqidx; 12124 int fcp_eqidx;
12103 union lpfc_sli4_cfg_shdr *shdr; 12125 union lpfc_sli4_cfg_shdr *shdr;
12104 uint16_t dmult; 12126 uint16_t dmult;
@@ -12117,8 +12139,11 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12117 eq_delay = &mbox->u.mqe.un.eq_delay; 12139 eq_delay = &mbox->u.mqe.un.eq_delay;
12118 12140
12119 /* Calculate delay multiper from maximum interrupt per second */ 12141 /* Calculate delay multiper from maximum interrupt per second */
12120 dmult = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel; 12142 result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel;
12121 dmult = LPFC_DMULT_CONST/dmult - 1; 12143 if (result > LPFC_DMULT_CONST)
12144 dmult = 0;
12145 else
12146 dmult = LPFC_DMULT_CONST/result - 1;
12122 12147
12123 cnt = 0; 12148 cnt = 0;
12124 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel; 12149 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel;
@@ -12174,7 +12199,7 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12174 * fails this function will return -ENXIO. 12199 * fails this function will return -ENXIO.
12175 **/ 12200 **/
12176uint32_t 12201uint32_t
12177lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) 12202lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
12178{ 12203{
12179 struct lpfc_mbx_eq_create *eq_create; 12204 struct lpfc_mbx_eq_create *eq_create;
12180 LPFC_MBOXQ_t *mbox; 12205 LPFC_MBOXQ_t *mbox;
@@ -12206,7 +12231,10 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
12206 LPFC_EQE_SIZE); 12231 LPFC_EQE_SIZE);
12207 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 12232 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
12208 /* Calculate delay multiper from maximum interrupt per second */ 12233 /* Calculate delay multiper from maximum interrupt per second */
12209 dmult = LPFC_DMULT_CONST/imax - 1; 12234 if (imax > LPFC_DMULT_CONST)
12235 dmult = 0;
12236 else
12237 dmult = LPFC_DMULT_CONST/imax - 1;
12210 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 12238 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
12211 dmult); 12239 dmult);
12212 switch (eq->entry_count) { 12240 switch (eq->entry_count) {
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index bd4bc4342ae2..f44a06a4c6e7 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -37,7 +37,7 @@
37/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */ 37/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
38#define LPFC_FCP_IO_CHAN_DEF 4 38#define LPFC_FCP_IO_CHAN_DEF 4
39#define LPFC_FCP_IO_CHAN_MIN 1 39#define LPFC_FCP_IO_CHAN_MIN 1
40#define LPFC_FCP_IO_CHAN_MAX 8 40#define LPFC_FCP_IO_CHAN_MAX 16
41 41
42/* 42/*
43 * Provide the default FCF Record attributes used by the driver 43 * Provide the default FCF Record attributes used by the driver
@@ -168,7 +168,7 @@ struct lpfc_queue {
168}; 168};
169 169
170struct lpfc_sli4_link { 170struct lpfc_sli4_link {
171 uint8_t speed; 171 uint16_t speed;
172 uint8_t duplex; 172 uint8_t duplex;
173 uint8_t status; 173 uint8_t status;
174 uint8_t type; 174 uint8_t type;
@@ -490,8 +490,6 @@ struct lpfc_sli4_hba {
490 struct lpfc_pc_sli4_params pc_sli4_params; 490 struct lpfc_pc_sli4_params pc_sli4_params;
491 struct msix_entry *msix_entries; 491 struct msix_entry *msix_entries;
492 uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ]; 492 uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ];
493 uint32_t cfg_eqn;
494 uint32_t msix_vec_nr;
495 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ 493 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
496 494
497 /* Pointers to the constructed SLI4 queues */ 495 /* Pointers to the constructed SLI4 queues */
@@ -626,7 +624,7 @@ void lpfc_sli4_hba_reset(struct lpfc_hba *);
626struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, 624struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
627 uint32_t); 625 uint32_t);
628void lpfc_sli4_queue_free(struct lpfc_queue *); 626void lpfc_sli4_queue_free(struct lpfc_queue *);
629uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t); 627uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
630uint32_t lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t); 628uint32_t lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t);
631uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, 629uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
632 struct lpfc_queue *, uint32_t, uint32_t); 630 struct lpfc_queue *, uint32_t, uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 04265a1c4e52..0c2149189dda 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.34" 21#define LPFC_DRIVER_VERSION "8.3.35"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23 23
24/* Used for SLI 2/3 */ 24/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index fcb005fa4bd1..16b7a72a70c4 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2009-2011 LSI Corporation. 4 * Copyright (c) 2003-2012 LSI Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
@@ -33,9 +33,9 @@
33/* 33/*
34 * MegaRAID SAS Driver meta data 34 * MegaRAID SAS Driver meta data
35 */ 35 */
36#define MEGASAS_VERSION "00.00.06.18-rc1" 36#define MEGASAS_VERSION "06.504.01.00-rc1"
37#define MEGASAS_RELDATE "Jun. 17, 2012" 37#define MEGASAS_RELDATE "Oct. 1, 2012"
38#define MEGASAS_EXT_VERSION "Tue. Jun. 17 17:00:00 PDT 2012" 38#define MEGASAS_EXT_VERSION "Mon. Oct. 1 17:00:00 PDT 2012"
39 39
40/* 40/*
41 * Device IDs 41 * Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 0393ec478cdf..d2c5366aff7f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2009-2011 LSI Corporation. 4 * Copyright (c) 2003-2012 LSI Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
@@ -18,7 +18,7 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_base.c 20 * FILE: megaraid_sas_base.c
21 * Version : v00.00.06.18-rc1 21 * Version : v06.504.01.00-rc1
22 * 22 *
23 * Authors: LSI Corporation 23 * Authors: LSI Corporation
24 * Sreenivas Bagalkote 24 * Sreenivas Bagalkote
@@ -71,6 +71,10 @@ static int msix_disable;
71module_param(msix_disable, int, S_IRUGO); 71module_param(msix_disable, int, S_IRUGO);
72MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); 72MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
73 73
74static unsigned int msix_vectors;
75module_param(msix_vectors, int, S_IRUGO);
76MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
77
74static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; 78static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
75module_param(throttlequeuedepth, int, S_IRUGO); 79module_param(throttlequeuedepth, int, S_IRUGO);
76MODULE_PARM_DESC(throttlequeuedepth, 80MODULE_PARM_DESC(throttlequeuedepth,
@@ -3520,6 +3524,10 @@ static int megasas_init_fw(struct megasas_instance *instance)
3520 instance->msix_vectors = (readl(&instance->reg_set-> 3524 instance->msix_vectors = (readl(&instance->reg_set->
3521 outbound_scratch_pad_2 3525 outbound_scratch_pad_2
3522 ) & 0x1F) + 1; 3526 ) & 0x1F) + 1;
3527 if (msix_vectors)
3528 instance->msix_vectors =
3529 min(msix_vectors,
3530 instance->msix_vectors);
3523 } else 3531 } else
3524 instance->msix_vectors = 1; 3532 instance->msix_vectors = 1;
3525 /* Don't bother allocating more MSI-X vectors than cpus */ 3533 /* Don't bother allocating more MSI-X vectors than cpus */
@@ -5233,7 +5241,6 @@ megasas_aen_polling(struct work_struct *work)
5233 5241
5234 case MR_EVT_PD_REMOVED: 5242 case MR_EVT_PD_REMOVED:
5235 if (megasas_get_pd_list(instance) == 0) { 5243 if (megasas_get_pd_list(instance) == 0) {
5236 megasas_get_pd_list(instance);
5237 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { 5244 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
5238 for (j = 0; 5245 for (j = 0;
5239 j < MEGASAS_MAX_DEV_PER_CHANNEL; 5246 j < MEGASAS_MAX_DEV_PER_CHANNEL;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index e3d251a2e26a..a11df82474ef 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2009-2011 LSI Corporation. 4 * Copyright (c) 2009-2012 LSI Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index ddf094e7d0ac..74030aff69ad 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2009-2011 LSI Corporation. 4 * Copyright (c) 2009-2012 LSI Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
@@ -1184,8 +1184,6 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1184 io_request->CDB.EEDP32.PrimaryReferenceTag = 1184 io_request->CDB.EEDP32.PrimaryReferenceTag =
1185 cpu_to_be32(ref_tag); 1185 cpu_to_be32(ref_tag);
1186 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; 1186 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff;
1187
1188 io_request->DataLength = num_blocks * 512;
1189 io_request->IoFlags = 32; /* Specify 32-byte cdb */ 1187 io_request->IoFlags = 32; /* Specify 32-byte cdb */
1190 1188
1191 /* Transfer length */ 1189 /* Transfer length */
@@ -1329,7 +1327,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1329 struct megasas_cmd_fusion *cmd) 1327 struct megasas_cmd_fusion *cmd)
1330{ 1328{
1331 u8 fp_possible; 1329 u8 fp_possible;
1332 u32 start_lba_lo, start_lba_hi, device_id; 1330 u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
1333 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 1331 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1334 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 1332 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
1335 struct IO_REQUEST_INFO io_info; 1333 struct IO_REQUEST_INFO io_info;
@@ -1355,7 +1353,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1355 * 6-byte READ(0x08) or WRITE(0x0A) cdb 1353 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1356 */ 1354 */
1357 if (scp->cmd_len == 6) { 1355 if (scp->cmd_len == 6) {
1358 io_request->DataLength = (u32) scp->cmnd[4]; 1356 datalength = (u32) scp->cmnd[4];
1359 start_lba_lo = ((u32) scp->cmnd[1] << 16) | 1357 start_lba_lo = ((u32) scp->cmnd[1] << 16) |
1360 ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; 1358 ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
1361 1359
@@ -1366,7 +1364,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1366 * 10-byte READ(0x28) or WRITE(0x2A) cdb 1364 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1367 */ 1365 */
1368 else if (scp->cmd_len == 10) { 1366 else if (scp->cmd_len == 10) {
1369 io_request->DataLength = (u32) scp->cmnd[8] | 1367 datalength = (u32) scp->cmnd[8] |
1370 ((u32) scp->cmnd[7] << 8); 1368 ((u32) scp->cmnd[7] << 8);
1371 start_lba_lo = ((u32) scp->cmnd[2] << 24) | 1369 start_lba_lo = ((u32) scp->cmnd[2] << 24) |
1372 ((u32) scp->cmnd[3] << 16) | 1370 ((u32) scp->cmnd[3] << 16) |
@@ -1377,7 +1375,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1377 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 1375 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1378 */ 1376 */
1379 else if (scp->cmd_len == 12) { 1377 else if (scp->cmd_len == 12) {
1380 io_request->DataLength = ((u32) scp->cmnd[6] << 24) | 1378 datalength = ((u32) scp->cmnd[6] << 24) |
1381 ((u32) scp->cmnd[7] << 16) | 1379 ((u32) scp->cmnd[7] << 16) |
1382 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; 1380 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
1383 start_lba_lo = ((u32) scp->cmnd[2] << 24) | 1381 start_lba_lo = ((u32) scp->cmnd[2] << 24) |
@@ -1389,7 +1387,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1389 * 16-byte READ(0x88) or WRITE(0x8A) cdb 1387 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1390 */ 1388 */
1391 else if (scp->cmd_len == 16) { 1389 else if (scp->cmd_len == 16) {
1392 io_request->DataLength = ((u32) scp->cmnd[10] << 24) | 1390 datalength = ((u32) scp->cmnd[10] << 24) |
1393 ((u32) scp->cmnd[11] << 16) | 1391 ((u32) scp->cmnd[11] << 16) |
1394 ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; 1392 ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
1395 start_lba_lo = ((u32) scp->cmnd[6] << 24) | 1393 start_lba_lo = ((u32) scp->cmnd[6] << 24) |
@@ -1403,8 +1401,9 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1403 1401
1404 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); 1402 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
1405 io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; 1403 io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
1406 io_info.numBlocks = io_request->DataLength; 1404 io_info.numBlocks = datalength;
1407 io_info.ldTgtId = device_id; 1405 io_info.ldTgtId = device_id;
1406 io_request->DataLength = scsi_bufflen(scp);
1408 1407
1409 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1408 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1410 io_info.isRead = 1; 1409 io_info.isRead = 1;
@@ -1431,7 +1430,6 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1431 if (fp_possible) { 1430 if (fp_possible) {
1432 megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, 1431 megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
1433 local_map_ptr, start_lba_lo); 1432 local_map_ptr, start_lba_lo);
1434 io_request->DataLength = scsi_bufflen(scp);
1435 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1433 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1436 cmd->request_desc->SCSIIO.RequestFlags = 1434 cmd->request_desc->SCSIIO.RequestFlags =
1437 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY 1435 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
@@ -1510,7 +1508,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
1510 local_map_ptr = fusion->ld_map[(instance->map_id & 1)]; 1508 local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
1511 1509
1512 /* Check if this is a system PD I/O */ 1510 /* Check if this is a system PD I/O */
1513 if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { 1511 if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
1512 instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
1514 io_request->Function = 0; 1513 io_request->Function = 0;
1515 io_request->DevHandle = 1514 io_request->DevHandle =
1516 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; 1515 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
@@ -1525,6 +1524,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
1525 cmd->request_desc->SCSIIO.RequestFlags = 1524 cmd->request_desc->SCSIIO.RequestFlags =
1526 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << 1525 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1527 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1526 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1527 cmd->request_desc->SCSIIO.DevHandle =
1528 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
1528 } else { 1529 } else {
1529 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 1530 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1530 io_request->DevHandle = device_id; 1531 io_request->DevHandle = device_id;
@@ -1732,8 +1733,6 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
1732 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1733 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1733 return IRQ_NONE; 1734 return IRQ_NONE;
1734 1735
1735 d_val.word = desc->Words;
1736
1737 num_completed = 0; 1736 num_completed = 0;
1738 1737
1739 while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) { 1738 while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) {
@@ -1855,10 +1854,8 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
1855 } 1854 }
1856 spin_unlock_irqrestore(&instance->hba_lock, flags); 1855 spin_unlock_irqrestore(&instance->hba_lock, flags);
1857 1856
1858 spin_lock_irqsave(&instance->completion_lock, flags);
1859 for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++) 1857 for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
1860 complete_cmd_fusion(instance, MSIxIndex); 1858 complete_cmd_fusion(instance, MSIxIndex);
1861 spin_unlock_irqrestore(&instance->completion_lock, flags);
1862} 1859}
1863 1860
1864/** 1861/**
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 088c9f91da95..a7c64f051996 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2009-2011 LSI Corporation. 4 * Copyright (c) 2009-2012 LSI Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 783edc7c6b98..c585a925b3cd 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -35,10 +35,12 @@
35#include <linux/io.h> 35#include <linux/io.h>
36#include <scsi/scsi.h> 36#include <scsi/scsi.h>
37#include <scsi/scsi_cmnd.h> 37#include <scsi/scsi_cmnd.h>
38#include <scsi/scsi_device.h>
38#include <scsi/scsi_host.h> 39#include <scsi/scsi_host.h>
39#include <scsi/scsi_transport.h> 40#include <scsi/scsi_transport.h>
40#include <scsi/scsi_eh.h> 41#include <scsi/scsi_eh.h>
41#include <linux/uaccess.h> 42#include <linux/uaccess.h>
43#include <linux/kthread.h>
42 44
43#include "mvumi.h" 45#include "mvumi.h"
44 46
@@ -48,6 +50,7 @@ MODULE_DESCRIPTION("Marvell UMI Driver");
48 50
49static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = { 51static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = {
50 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9143) }, 52 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9143) },
53 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9580) },
51 { 0 } 54 { 0 }
52}; 55};
53 56
@@ -118,7 +121,7 @@ static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
118static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, 121static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
119 enum resource_type type, unsigned int size) 122 enum resource_type type, unsigned int size)
120{ 123{
121 struct mvumi_res *res = kzalloc(sizeof(*res), GFP_KERNEL); 124 struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC);
122 125
123 if (!res) { 126 if (!res) {
124 dev_err(&mhba->pdev->dev, 127 dev_err(&mhba->pdev->dev,
@@ -128,7 +131,7 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
128 131
129 switch (type) { 132 switch (type) {
130 case RESOURCE_CACHED_MEMORY: 133 case RESOURCE_CACHED_MEMORY:
131 res->virt_addr = kzalloc(size, GFP_KERNEL); 134 res->virt_addr = kzalloc(size, GFP_ATOMIC);
132 if (!res->virt_addr) { 135 if (!res->virt_addr) {
133 dev_err(&mhba->pdev->dev, 136 dev_err(&mhba->pdev->dev,
134 "unable to allocate memory,size = %d.\n", size); 137 "unable to allocate memory,size = %d.\n", size);
@@ -222,11 +225,11 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
222 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); 225 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
223 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); 226 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
224 m_sg->flags = 0; 227 m_sg->flags = 0;
225 m_sg->size = cpu_to_le32(sg_dma_len(&sg[i])); 228 sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
226 if ((i + 1) == *sg_count) 229 if ((i + 1) == *sg_count)
227 m_sg->flags |= SGD_EOT; 230 m_sg->flags |= 1U << mhba->eot_flag;
228 231
229 m_sg++; 232 sgd_inc(mhba, m_sg);
230 } 233 }
231 } else { 234 } else {
232 scmd->SCp.dma_handle = scsi_bufflen(scmd) ? 235 scmd->SCp.dma_handle = scsi_bufflen(scmd) ?
@@ -237,8 +240,8 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
237 busaddr = scmd->SCp.dma_handle; 240 busaddr = scmd->SCp.dma_handle;
238 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); 241 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
239 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); 242 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
240 m_sg->flags = SGD_EOT; 243 m_sg->flags = 1U << mhba->eot_flag;
241 m_sg->size = cpu_to_le32(scsi_bufflen(scmd)); 244 sgd_setsz(mhba, m_sg, cpu_to_le32(scsi_bufflen(scmd)));
242 *sg_count = 1; 245 *sg_count = 1;
243 } 246 }
244 247
@@ -267,8 +270,8 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
267 270
268 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr)); 271 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
269 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr)); 272 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
270 m_sg->flags = SGD_EOT; 273 m_sg->flags = 1U << mhba->eot_flag;
271 m_sg->size = cpu_to_le32(size); 274 sgd_setsz(mhba, m_sg, cpu_to_le32(size));
272 275
273 return 0; 276 return 0;
274} 277}
@@ -285,7 +288,8 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
285 } 288 }
286 INIT_LIST_HEAD(&cmd->queue_pointer); 289 INIT_LIST_HEAD(&cmd->queue_pointer);
287 290
288 cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL); 291 cmd->frame = pci_alloc_consistent(mhba->pdev,
292 mhba->ib_max_size, &cmd->frame_phys);
289 if (!cmd->frame) { 293 if (!cmd->frame) {
290 dev_err(&mhba->pdev->dev, "failed to allocate memory for FW" 294 dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
291 " frame,size = %d.\n", mhba->ib_max_size); 295 " frame,size = %d.\n", mhba->ib_max_size);
@@ -297,7 +301,8 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
297 if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) { 301 if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
298 dev_err(&mhba->pdev->dev, "failed to allocate memory" 302 dev_err(&mhba->pdev->dev, "failed to allocate memory"
299 " for internal frame\n"); 303 " for internal frame\n");
300 kfree(cmd->frame); 304 pci_free_consistent(mhba->pdev, mhba->ib_max_size,
305 cmd->frame, cmd->frame_phys);
301 kfree(cmd); 306 kfree(cmd);
302 return NULL; 307 return NULL;
303 } 308 }
@@ -317,7 +322,7 @@ static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
317 if (cmd && cmd->frame) { 322 if (cmd && cmd->frame) {
318 if (cmd->frame->sg_counts) { 323 if (cmd->frame->sg_counts) {
319 m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; 324 m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
320 size = m_sg->size; 325 sgd_getsz(mhba, m_sg, size);
321 326
322 phy_addr = (dma_addr_t) m_sg->baseaddr_l | 327 phy_addr = (dma_addr_t) m_sg->baseaddr_l |
323 (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16); 328 (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
@@ -325,7 +330,8 @@ static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
325 pci_free_consistent(mhba->pdev, size, cmd->data_buf, 330 pci_free_consistent(mhba->pdev, size, cmd->data_buf,
326 phy_addr); 331 phy_addr);
327 } 332 }
328 kfree(cmd->frame); 333 pci_free_consistent(mhba->pdev, mhba->ib_max_size,
334 cmd->frame, cmd->frame_phys);
329 kfree(cmd); 335 kfree(cmd);
330 } 336 }
331} 337}
@@ -374,7 +380,8 @@ static void mvumi_free_cmds(struct mvumi_hba *mhba)
374 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, 380 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
375 queue_pointer); 381 queue_pointer);
376 list_del(&cmd->queue_pointer); 382 list_del(&cmd->queue_pointer);
377 kfree(cmd->frame); 383 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
384 kfree(cmd->frame);
378 kfree(cmd); 385 kfree(cmd);
379 } 386 }
380} 387}
@@ -396,7 +403,12 @@ static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
396 403
397 INIT_LIST_HEAD(&cmd->queue_pointer); 404 INIT_LIST_HEAD(&cmd->queue_pointer);
398 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); 405 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
399 cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL); 406 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
407 cmd->frame = mhba->ib_frame + i * mhba->ib_max_size;
408 cmd->frame_phys = mhba->ib_frame_phys
409 + i * mhba->ib_max_size;
410 } else
411 cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
400 if (!cmd->frame) 412 if (!cmd->frame)
401 goto err_exit; 413 goto err_exit;
402 } 414 }
@@ -409,48 +421,71 @@ err_exit:
409 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, 421 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
410 queue_pointer); 422 queue_pointer);
411 list_del(&cmd->queue_pointer); 423 list_del(&cmd->queue_pointer);
412 kfree(cmd->frame); 424 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
425 kfree(cmd->frame);
413 kfree(cmd); 426 kfree(cmd);
414 } 427 }
415 return -ENOMEM; 428 return -ENOMEM;
416} 429}
417 430
418static int mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry) 431static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)
419{ 432{
420 unsigned int ib_rp_reg, cur_ib_entry; 433 unsigned int ib_rp_reg;
434 struct mvumi_hw_regs *regs = mhba->regs;
435
436 ib_rp_reg = ioread32(mhba->regs->inb_read_pointer);
421 437
438 if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) ==
439 (mhba->ib_cur_slot & regs->cl_slot_num_mask)) &&
440 ((ib_rp_reg & regs->cl_pointer_toggle)
441 != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) {
442 dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
443 return 0;
444 }
422 if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) { 445 if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
423 dev_warn(&mhba->pdev->dev, "firmware io overflow.\n"); 446 dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
424 return -1; 447 return 0;
448 } else {
449 return mhba->max_io - atomic_read(&mhba->fw_outstanding);
425 } 450 }
426 ib_rp_reg = ioread32(mhba->mmio + CLA_INB_READ_POINTER); 451}
427 452
428 if (unlikely(((ib_rp_reg & CL_SLOT_NUM_MASK) == 453static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)
429 (mhba->ib_cur_slot & CL_SLOT_NUM_MASK)) && 454{
430 ((ib_rp_reg & CL_POINTER_TOGGLE) != 455 unsigned int count;
431 (mhba->ib_cur_slot & CL_POINTER_TOGGLE)))) { 456 if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1))
432 dev_warn(&mhba->pdev->dev, "no free slot to use.\n"); 457 return 0;
433 return -1; 458 count = ioread32(mhba->ib_shadow);
434 } 459 if (count == 0xffff)
460 return 0;
461 return count;
462}
463
464static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
465{
466 unsigned int cur_ib_entry;
435 467
436 cur_ib_entry = mhba->ib_cur_slot & CL_SLOT_NUM_MASK; 468 cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask;
437 cur_ib_entry++; 469 cur_ib_entry++;
438 if (cur_ib_entry >= mhba->list_num_io) { 470 if (cur_ib_entry >= mhba->list_num_io) {
439 cur_ib_entry -= mhba->list_num_io; 471 cur_ib_entry -= mhba->list_num_io;
440 mhba->ib_cur_slot ^= CL_POINTER_TOGGLE; 472 mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle;
473 }
474 mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask;
475 mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask);
476 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
477 *ib_entry = mhba->ib_list + cur_ib_entry *
478 sizeof(struct mvumi_dyn_list_entry);
479 } else {
480 *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
441 } 481 }
442 mhba->ib_cur_slot &= ~CL_SLOT_NUM_MASK;
443 mhba->ib_cur_slot |= (cur_ib_entry & CL_SLOT_NUM_MASK);
444 *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
445 atomic_inc(&mhba->fw_outstanding); 482 atomic_inc(&mhba->fw_outstanding);
446
447 return 0;
448} 483}
449 484
450static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba) 485static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
451{ 486{
452 iowrite32(0xfff, mhba->ib_shadow); 487 iowrite32(0xffff, mhba->ib_shadow);
453 iowrite32(mhba->ib_cur_slot, mhba->mmio + CLA_INB_WRITE_POINTER); 488 iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer);
454} 489}
455 490
456static char mvumi_check_ob_frame(struct mvumi_hba *mhba, 491static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
@@ -480,31 +515,59 @@ static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
480 return 0; 515 return 0;
481} 516}
482 517
483static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba) 518static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba,
519 unsigned int *cur_obf, unsigned int *assign_obf_end)
484{ 520{
485 unsigned int ob_write_reg, ob_write_shadow_reg; 521 unsigned int ob_write, ob_write_shadow;
486 unsigned int cur_obf, assign_obf_end, i; 522 struct mvumi_hw_regs *regs = mhba->regs;
487 struct mvumi_ob_data *ob_data;
488 struct mvumi_rsp_frame *p_outb_frame;
489 523
490 do { 524 do {
491 ob_write_reg = ioread32(mhba->mmio + CLA_OUTB_COPY_POINTER); 525 ob_write = ioread32(regs->outb_copy_pointer);
492 ob_write_shadow_reg = ioread32(mhba->ob_shadow); 526 ob_write_shadow = ioread32(mhba->ob_shadow);
493 } while ((ob_write_reg & CL_SLOT_NUM_MASK) != ob_write_shadow_reg); 527 } while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow);
494 528
495 cur_obf = mhba->ob_cur_slot & CL_SLOT_NUM_MASK; 529 *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
496 assign_obf_end = ob_write_reg & CL_SLOT_NUM_MASK; 530 *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
497 531
498 if ((ob_write_reg & CL_POINTER_TOGGLE) != 532 if ((ob_write & regs->cl_pointer_toggle) !=
499 (mhba->ob_cur_slot & CL_POINTER_TOGGLE)) { 533 (mhba->ob_cur_slot & regs->cl_pointer_toggle)) {
500 assign_obf_end += mhba->list_num_io; 534 *assign_obf_end += mhba->list_num_io;
501 } 535 }
536 return 0;
537}
538
539static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba,
540 unsigned int *cur_obf, unsigned int *assign_obf_end)
541{
542 unsigned int ob_write;
543 struct mvumi_hw_regs *regs = mhba->regs;
544
545 ob_write = ioread32(regs->outb_read_pointer);
546 ob_write = ioread32(regs->outb_copy_pointer);
547 *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
548 *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
549 if (*assign_obf_end < *cur_obf)
550 *assign_obf_end += mhba->list_num_io;
551 else if (*assign_obf_end == *cur_obf)
552 return -1;
553 return 0;
554}
555
556static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
557{
558 unsigned int cur_obf, assign_obf_end, i;
559 struct mvumi_ob_data *ob_data;
560 struct mvumi_rsp_frame *p_outb_frame;
561 struct mvumi_hw_regs *regs = mhba->regs;
562
563 if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end))
564 return;
502 565
503 for (i = (assign_obf_end - cur_obf); i != 0; i--) { 566 for (i = (assign_obf_end - cur_obf); i != 0; i--) {
504 cur_obf++; 567 cur_obf++;
505 if (cur_obf >= mhba->list_num_io) { 568 if (cur_obf >= mhba->list_num_io) {
506 cur_obf -= mhba->list_num_io; 569 cur_obf -= mhba->list_num_io;
507 mhba->ob_cur_slot ^= CL_POINTER_TOGGLE; 570 mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
508 } 571 }
509 572
510 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; 573 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
@@ -528,7 +591,7 @@ static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
528 ob_data = NULL; 591 ob_data = NULL;
529 if (cur_obf == 0) { 592 if (cur_obf == 0) {
530 cur_obf = mhba->list_num_io - 1; 593 cur_obf = mhba->list_num_io - 1;
531 mhba->ob_cur_slot ^= CL_POINTER_TOGGLE; 594 mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
532 } else 595 } else
533 cur_obf -= 1; 596 cur_obf -= 1;
534 break; 597 break;
@@ -539,18 +602,20 @@ static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
539 602
540 list_add_tail(&ob_data->list, &mhba->free_ob_list); 603 list_add_tail(&ob_data->list, &mhba->free_ob_list);
541 } 604 }
542 mhba->ob_cur_slot &= ~CL_SLOT_NUM_MASK; 605 mhba->ob_cur_slot &= ~regs->cl_slot_num_mask;
543 mhba->ob_cur_slot |= (cur_obf & CL_SLOT_NUM_MASK); 606 mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask);
544 iowrite32(mhba->ob_cur_slot, mhba->mmio + CLA_OUTB_READ_POINTER); 607 iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer);
545} 608}
546 609
547static void mvumi_reset(void *regs) 610static void mvumi_reset(struct mvumi_hba *mhba)
548{ 611{
549 iowrite32(0, regs + CPU_ENPOINTA_MASK_REG); 612 struct mvumi_hw_regs *regs = mhba->regs;
550 if (ioread32(regs + CPU_ARM_TO_PCIEA_MSG1) != HANDSHAKE_DONESTATE) 613
614 iowrite32(0, regs->enpointa_mask_reg);
615 if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE)
551 return; 616 return;
552 617
553 iowrite32(DRBL_SOFT_RESET, regs + CPU_PCIEA_TO_ARM_DRBL_REG); 618 iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg);
554} 619}
555 620
556static unsigned char mvumi_start(struct mvumi_hba *mhba); 621static unsigned char mvumi_start(struct mvumi_hba *mhba);
@@ -558,7 +623,7 @@ static unsigned char mvumi_start(struct mvumi_hba *mhba);
558static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba) 623static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
559{ 624{
560 mhba->fw_state = FW_STATE_ABORT; 625 mhba->fw_state = FW_STATE_ABORT;
561 mvumi_reset(mhba->mmio); 626 mvumi_reset(mhba);
562 627
563 if (mvumi_start(mhba)) 628 if (mvumi_start(mhba))
564 return FAILED; 629 return FAILED;
@@ -566,6 +631,98 @@ static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
566 return SUCCESS; 631 return SUCCESS;
567} 632}
568 633
634static int mvumi_wait_for_fw(struct mvumi_hba *mhba)
635{
636 struct mvumi_hw_regs *regs = mhba->regs;
637 u32 tmp;
638 unsigned long before;
639 before = jiffies;
640
641 iowrite32(0, regs->enpointa_mask_reg);
642 tmp = ioread32(regs->arm_to_pciea_msg1);
643 while (tmp != HANDSHAKE_READYSTATE) {
644 iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg);
645 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
646 dev_err(&mhba->pdev->dev,
647 "FW reset failed [0x%x].\n", tmp);
648 return FAILED;
649 }
650
651 msleep(500);
652 rmb();
653 tmp = ioread32(regs->arm_to_pciea_msg1);
654 }
655
656 return SUCCESS;
657}
658
659static void mvumi_backup_bar_addr(struct mvumi_hba *mhba)
660{
661 unsigned char i;
662
663 for (i = 0; i < MAX_BASE_ADDRESS; i++) {
664 pci_read_config_dword(mhba->pdev, 0x10 + i * 4,
665 &mhba->pci_base[i]);
666 }
667}
668
669static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
670{
671 unsigned char i;
672
673 for (i = 0; i < MAX_BASE_ADDRESS; i++) {
674 if (mhba->pci_base[i])
675 pci_write_config_dword(mhba->pdev, 0x10 + i * 4,
676 mhba->pci_base[i]);
677 }
678}
679
680static unsigned int mvumi_pci_set_master(struct pci_dev *pdev)
681{
682 unsigned int ret = 0;
683 pci_set_master(pdev);
684
685 if (IS_DMA64) {
686 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
687 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
688 } else
689 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
690
691 return ret;
692}
693
694static int mvumi_reset_host_9580(struct mvumi_hba *mhba)
695{
696 mhba->fw_state = FW_STATE_ABORT;
697
698 iowrite32(0, mhba->regs->reset_enable);
699 iowrite32(0xf, mhba->regs->reset_request);
700
701 iowrite32(0x10, mhba->regs->reset_enable);
702 iowrite32(0x10, mhba->regs->reset_request);
703 msleep(100);
704 pci_disable_device(mhba->pdev);
705
706 if (pci_enable_device(mhba->pdev)) {
707 dev_err(&mhba->pdev->dev, "enable device failed\n");
708 return FAILED;
709 }
710 if (mvumi_pci_set_master(mhba->pdev)) {
711 dev_err(&mhba->pdev->dev, "set master failed\n");
712 return FAILED;
713 }
714 mvumi_restore_bar_addr(mhba);
715 if (mvumi_wait_for_fw(mhba) == FAILED)
716 return FAILED;
717
718 return mvumi_wait_for_outstanding(mhba);
719}
720
721static int mvumi_reset_host_9143(struct mvumi_hba *mhba)
722{
723 return mvumi_wait_for_outstanding(mhba);
724}
725
569static int mvumi_host_reset(struct scsi_cmnd *scmd) 726static int mvumi_host_reset(struct scsi_cmnd *scmd)
570{ 727{
571 struct mvumi_hba *mhba; 728 struct mvumi_hba *mhba;
@@ -575,7 +732,7 @@ static int mvumi_host_reset(struct scsi_cmnd *scmd)
575 scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n", 732 scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
576 scmd->serial_number, scmd->cmnd[0], scmd->retries); 733 scmd->serial_number, scmd->cmnd[0], scmd->retries);
577 734
578 return mvumi_wait_for_outstanding(mhba); 735 return mhba->instancet->reset_host(mhba);
579} 736}
580 737
581static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba, 738static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
@@ -628,7 +785,9 @@ static void mvumi_release_fw(struct mvumi_hba *mhba)
628 mvumi_free_cmds(mhba); 785 mvumi_free_cmds(mhba);
629 mvumi_release_mem_resource(mhba); 786 mvumi_release_mem_resource(mhba);
630 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); 787 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
631 kfree(mhba->handshake_page); 788 pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
789 mhba->handshake_page, mhba->handshake_page_phys);
790 kfree(mhba->regs);
632 pci_release_regions(mhba->pdev); 791 pci_release_regions(mhba->pdev);
633} 792}
634 793
@@ -665,6 +824,7 @@ get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0);
665 frame->cdb_length = MAX_COMMAND_SIZE; 824 frame->cdb_length = MAX_COMMAND_SIZE;
666 memset(frame->cdb, 0, MAX_COMMAND_SIZE); 825 memset(frame->cdb, 0, MAX_COMMAND_SIZE);
667 frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC; 826 frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
827 frame->cdb[1] = CDB_CORE_MODULE;
668 frame->cdb[2] = CDB_CORE_SHUTDOWN; 828 frame->cdb[2] = CDB_CORE_SHUTDOWN;
669 829
670 mvumi_issue_blocked_cmd(mhba, cmd); 830 mvumi_issue_blocked_cmd(mhba, cmd);
@@ -695,7 +855,7 @@ mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
695 return ret; 855 return ret;
696} 856}
697 857
698void mvumi_hs_build_page(struct mvumi_hba *mhba, 858static void mvumi_hs_build_page(struct mvumi_hba *mhba,
699 struct mvumi_hs_header *hs_header) 859 struct mvumi_hs_header *hs_header)
700{ 860{
701 struct mvumi_hs_page2 *hs_page2; 861 struct mvumi_hs_page2 *hs_page2;
@@ -710,6 +870,8 @@ void mvumi_hs_build_page(struct mvumi_hba *mhba,
710 hs_header->frame_length = sizeof(*hs_page2) - 4; 870 hs_header->frame_length = sizeof(*hs_page2) - 4;
711 memset(hs_header->frame_content, 0, hs_header->frame_length); 871 memset(hs_header->frame_content, 0, hs_header->frame_length);
712 hs_page2->host_type = 3; /* 3 mean linux*/ 872 hs_page2->host_type = 3; /* 3 mean linux*/
873 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
874 hs_page2->host_cap = 0x08;/* host dynamic source mode */
713 hs_page2->host_ver.ver_major = VER_MAJOR; 875 hs_page2->host_ver.ver_major = VER_MAJOR;
714 hs_page2->host_ver.ver_minor = VER_MINOR; 876 hs_page2->host_ver.ver_minor = VER_MINOR;
715 hs_page2->host_ver.ver_oem = VER_OEM; 877 hs_page2->host_ver.ver_oem = VER_OEM;
@@ -745,8 +907,18 @@ void mvumi_hs_build_page(struct mvumi_hba *mhba,
745 hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys); 907 hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
746 hs_page4->ib_entry_size = mhba->ib_max_size_setting; 908 hs_page4->ib_entry_size = mhba->ib_max_size_setting;
747 hs_page4->ob_entry_size = mhba->ob_max_size_setting; 909 hs_page4->ob_entry_size = mhba->ob_max_size_setting;
748 hs_page4->ob_depth = mhba->list_num_io; 910 if (mhba->hba_capability
749 hs_page4->ib_depth = mhba->list_num_io; 911 & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) {
912 hs_page4->ob_depth = find_first_bit((unsigned long *)
913 &mhba->list_num_io,
914 BITS_PER_LONG);
915 hs_page4->ib_depth = find_first_bit((unsigned long *)
916 &mhba->list_num_io,
917 BITS_PER_LONG);
918 } else {
919 hs_page4->ob_depth = (u8) mhba->list_num_io;
920 hs_page4->ib_depth = (u8) mhba->list_num_io;
921 }
750 hs_header->checksum = mvumi_calculate_checksum(hs_header, 922 hs_header->checksum = mvumi_calculate_checksum(hs_header,
751 hs_header->frame_length); 923 hs_header->frame_length);
752 break; 924 break;
@@ -774,8 +946,11 @@ static int mvumi_init_data(struct mvumi_hba *mhba)
774 return 0; 946 return 0;
775 947
776 tmp_size = mhba->ib_max_size * mhba->max_io; 948 tmp_size = mhba->ib_max_size * mhba->max_io;
949 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
950 tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
951
777 tmp_size += 128 + mhba->ob_max_size * mhba->max_io; 952 tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
778 tmp_size += 8 + sizeof(u32) + 16; 953 tmp_size += 8 + sizeof(u32)*2 + 16;
779 954
780 res_mgnt = mvumi_alloc_mem_resource(mhba, 955 res_mgnt = mvumi_alloc_mem_resource(mhba,
781 RESOURCE_UNCACHED_MEMORY, tmp_size); 956 RESOURCE_UNCACHED_MEMORY, tmp_size);
@@ -793,24 +968,41 @@ static int mvumi_init_data(struct mvumi_hba *mhba)
793 v += offset; 968 v += offset;
794 mhba->ib_list = v; 969 mhba->ib_list = v;
795 mhba->ib_list_phys = p; 970 mhba->ib_list_phys = p;
971 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
972 v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
973 p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
974 mhba->ib_frame = v;
975 mhba->ib_frame_phys = p;
976 }
796 v += mhba->ib_max_size * mhba->max_io; 977 v += mhba->ib_max_size * mhba->max_io;
797 p += mhba->ib_max_size * mhba->max_io; 978 p += mhba->ib_max_size * mhba->max_io;
979
798 /* ib shadow */ 980 /* ib shadow */
799 offset = round_up(p, 8) - p; 981 offset = round_up(p, 8) - p;
800 p += offset; 982 p += offset;
801 v += offset; 983 v += offset;
802 mhba->ib_shadow = v; 984 mhba->ib_shadow = v;
803 mhba->ib_shadow_phys = p; 985 mhba->ib_shadow_phys = p;
804 p += sizeof(u32); 986 p += sizeof(u32)*2;
805 v += sizeof(u32); 987 v += sizeof(u32)*2;
806 /* ob shadow */ 988 /* ob shadow */
807 offset = round_up(p, 8) - p; 989 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
808 p += offset; 990 offset = round_up(p, 8) - p;
809 v += offset; 991 p += offset;
810 mhba->ob_shadow = v; 992 v += offset;
811 mhba->ob_shadow_phys = p; 993 mhba->ob_shadow = v;
812 p += 8; 994 mhba->ob_shadow_phys = p;
813 v += 8; 995 p += 8;
996 v += 8;
997 } else {
998 offset = round_up(p, 4) - p;
999 p += offset;
1000 v += offset;
1001 mhba->ob_shadow = v;
1002 mhba->ob_shadow_phys = p;
1003 p += 4;
1004 v += 4;
1005 }
814 1006
815 /* ob list */ 1007 /* ob list */
816 offset = round_up(p, 128) - p; 1008 offset = round_up(p, 128) - p;
@@ -902,6 +1094,12 @@ static int mvumi_hs_process_page(struct mvumi_hba *mhba,
902 dev_dbg(&mhba->pdev->dev, "FW version:%d\n", 1094 dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
903 hs_page1->fw_ver.ver_build); 1095 hs_page1->fw_ver.ver_build);
904 1096
1097 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)
1098 mhba->eot_flag = 22;
1099 else
1100 mhba->eot_flag = 27;
1101 if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF)
1102 mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth;
905 break; 1103 break;
906 default: 1104 default:
907 dev_err(&mhba->pdev->dev, "handshake: page code error\n"); 1105 dev_err(&mhba->pdev->dev, "handshake: page code error\n");
@@ -923,12 +1121,12 @@ static int mvumi_handshake(struct mvumi_hba *mhba)
923{ 1121{
924 unsigned int hs_state, tmp, hs_fun; 1122 unsigned int hs_state, tmp, hs_fun;
925 struct mvumi_hs_header *hs_header; 1123 struct mvumi_hs_header *hs_header;
926 void *regs = mhba->mmio; 1124 struct mvumi_hw_regs *regs = mhba->regs;
927 1125
928 if (mhba->fw_state == FW_STATE_STARTING) 1126 if (mhba->fw_state == FW_STATE_STARTING)
929 hs_state = HS_S_START; 1127 hs_state = HS_S_START;
930 else { 1128 else {
931 tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG0); 1129 tmp = ioread32(regs->arm_to_pciea_msg0);
932 hs_state = HS_GET_STATE(tmp); 1130 hs_state = HS_GET_STATE(tmp);
933 dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state); 1131 dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
934 if (HS_GET_STATUS(tmp) != HS_STATUS_OK) { 1132 if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
@@ -943,21 +1141,20 @@ static int mvumi_handshake(struct mvumi_hba *mhba)
943 mhba->fw_state = FW_STATE_HANDSHAKING; 1141 mhba->fw_state = FW_STATE_HANDSHAKING;
944 HS_SET_STATUS(hs_fun, HS_STATUS_OK); 1142 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
945 HS_SET_STATE(hs_fun, HS_S_RESET); 1143 HS_SET_STATE(hs_fun, HS_S_RESET);
946 iowrite32(HANDSHAKE_SIGNATURE, regs + CPU_PCIEA_TO_ARM_MSG1); 1144 iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1);
947 iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0); 1145 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
948 iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG); 1146 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
949 break; 1147 break;
950 1148
951 case HS_S_RESET: 1149 case HS_S_RESET:
952 iowrite32(lower_32_bits(mhba->handshake_page_phys), 1150 iowrite32(lower_32_bits(mhba->handshake_page_phys),
953 regs + CPU_PCIEA_TO_ARM_MSG1); 1151 regs->pciea_to_arm_msg1);
954 iowrite32(upper_32_bits(mhba->handshake_page_phys), 1152 iowrite32(upper_32_bits(mhba->handshake_page_phys),
955 regs + CPU_ARM_TO_PCIEA_MSG1); 1153 regs->arm_to_pciea_msg1);
956 HS_SET_STATUS(hs_fun, HS_STATUS_OK); 1154 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
957 HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR); 1155 HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
958 iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0); 1156 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
959 iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG); 1157 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
960
961 break; 1158 break;
962 1159
963 case HS_S_PAGE_ADDR: 1160 case HS_S_PAGE_ADDR:
@@ -997,30 +1194,37 @@ static int mvumi_handshake(struct mvumi_hba *mhba)
997 HS_SET_STATE(hs_fun, HS_S_END); 1194 HS_SET_STATE(hs_fun, HS_S_END);
998 1195
999 HS_SET_STATUS(hs_fun, HS_STATUS_OK); 1196 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1000 iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0); 1197 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1001 iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG); 1198 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1002 break; 1199 break;
1003 1200
1004 case HS_S_END: 1201 case HS_S_END:
1005 /* Set communication list ISR */ 1202 /* Set communication list ISR */
1006 tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG); 1203 tmp = ioread32(regs->enpointa_mask_reg);
1007 tmp |= INT_MAP_COMAOUT | INT_MAP_COMAERR; 1204 tmp |= regs->int_comaout | regs->int_comaerr;
1008 iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG); 1205 iowrite32(tmp, regs->enpointa_mask_reg);
1009 iowrite32(mhba->list_num_io, mhba->ib_shadow); 1206 iowrite32(mhba->list_num_io, mhba->ib_shadow);
1010 /* Set InBound List Available count shadow */ 1207 /* Set InBound List Available count shadow */
1011 iowrite32(lower_32_bits(mhba->ib_shadow_phys), 1208 iowrite32(lower_32_bits(mhba->ib_shadow_phys),
1012 regs + CLA_INB_AVAL_COUNT_BASEL); 1209 regs->inb_aval_count_basel);
1013 iowrite32(upper_32_bits(mhba->ib_shadow_phys), 1210 iowrite32(upper_32_bits(mhba->ib_shadow_phys),
1014 regs + CLA_INB_AVAL_COUNT_BASEH); 1211 regs->inb_aval_count_baseh);
1015 1212
1016 /* Set OutBound List Available count shadow */ 1213 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) {
1017 iowrite32((mhba->list_num_io-1) | CL_POINTER_TOGGLE, 1214 /* Set OutBound List Available count shadow */
1018 mhba->ob_shadow); 1215 iowrite32((mhba->list_num_io-1) |
1019 iowrite32(lower_32_bits(mhba->ob_shadow_phys), regs + 0x5B0); 1216 regs->cl_pointer_toggle,
1020 iowrite32(upper_32_bits(mhba->ob_shadow_phys), regs + 0x5B4); 1217 mhba->ob_shadow);
1218 iowrite32(lower_32_bits(mhba->ob_shadow_phys),
1219 regs->outb_copy_basel);
1220 iowrite32(upper_32_bits(mhba->ob_shadow_phys),
1221 regs->outb_copy_baseh);
1222 }
1021 1223
1022 mhba->ib_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE; 1224 mhba->ib_cur_slot = (mhba->list_num_io - 1) |
1023 mhba->ob_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE; 1225 regs->cl_pointer_toggle;
1226 mhba->ob_cur_slot = (mhba->list_num_io - 1) |
1227 regs->cl_pointer_toggle;
1024 mhba->fw_state = FW_STATE_STARTED; 1228 mhba->fw_state = FW_STATE_STARTED;
1025 1229
1026 break; 1230 break;
@@ -1040,7 +1244,7 @@ static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
1040 before = jiffies; 1244 before = jiffies;
1041 mvumi_handshake(mhba); 1245 mvumi_handshake(mhba);
1042 do { 1246 do {
1043 isr_status = mhba->instancet->read_fw_status_reg(mhba->mmio); 1247 isr_status = mhba->instancet->read_fw_status_reg(mhba);
1044 1248
1045 if (mhba->fw_state == FW_STATE_STARTED) 1249 if (mhba->fw_state == FW_STATE_STARTED)
1046 return 0; 1250 return 0;
@@ -1062,16 +1266,15 @@ static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
1062 1266
1063static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba) 1267static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1064{ 1268{
1065 void *regs = mhba->mmio;
1066 unsigned int tmp; 1269 unsigned int tmp;
1067 unsigned long before; 1270 unsigned long before;
1068 1271
1069 before = jiffies; 1272 before = jiffies;
1070 tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1); 1273 tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1071 while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) { 1274 while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
1072 if (tmp != HANDSHAKE_READYSTATE) 1275 if (tmp != HANDSHAKE_READYSTATE)
1073 iowrite32(DRBL_MU_RESET, 1276 iowrite32(DRBL_MU_RESET,
1074 regs + CPU_PCIEA_TO_ARM_DRBL_REG); 1277 mhba->regs->pciea_to_arm_drbl_reg);
1075 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { 1278 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1076 dev_err(&mhba->pdev->dev, 1279 dev_err(&mhba->pdev->dev,
1077 "invalid signature [0x%x].\n", tmp); 1280 "invalid signature [0x%x].\n", tmp);
@@ -1079,7 +1282,7 @@ static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1079 } 1282 }
1080 usleep_range(1000, 2000); 1283 usleep_range(1000, 2000);
1081 rmb(); 1284 rmb();
1082 tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1); 1285 tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1083 } 1286 }
1084 1287
1085 mhba->fw_state = FW_STATE_STARTING; 1288 mhba->fw_state = FW_STATE_STARTING;
@@ -1100,15 +1303,17 @@ static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1100 1303
1101static unsigned char mvumi_start(struct mvumi_hba *mhba) 1304static unsigned char mvumi_start(struct mvumi_hba *mhba)
1102{ 1305{
1103 void *regs = mhba->mmio;
1104 unsigned int tmp; 1306 unsigned int tmp;
1307 struct mvumi_hw_regs *regs = mhba->regs;
1308
1105 /* clear Door bell */ 1309 /* clear Door bell */
1106 tmp = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG); 1310 tmp = ioread32(regs->arm_to_pciea_drbl_reg);
1107 iowrite32(tmp, regs + CPU_ARM_TO_PCIEA_DRBL_REG); 1311 iowrite32(tmp, regs->arm_to_pciea_drbl_reg);
1108 1312
1109 iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG); 1313 iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1110 tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG) | INT_MAP_DL_CPU2PCIEA; 1314 tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea;
1111 iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG); 1315 iowrite32(tmp, regs->enpointa_mask_reg);
1316 msleep(100);
1112 if (mvumi_check_handshake(mhba)) 1317 if (mvumi_check_handshake(mhba))
1113 return -1; 1318 return -1;
1114 1319
@@ -1166,6 +1371,7 @@ static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
1166 cmd->scmd->scsi_done(scmd); 1371 cmd->scmd->scsi_done(scmd);
1167 mvumi_return_cmd(mhba, cmd); 1372 mvumi_return_cmd(mhba, cmd);
1168} 1373}
1374
1169static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba, 1375static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
1170 struct mvumi_cmd *cmd, 1376 struct mvumi_cmd *cmd,
1171 struct mvumi_rsp_frame *ob_frame) 1377 struct mvumi_rsp_frame *ob_frame)
@@ -1210,6 +1416,304 @@ static void mvumi_show_event(struct mvumi_hba *mhba,
1210 } 1416 }
1211} 1417}
1212 1418
1419static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status)
1420{
1421 struct scsi_device *sdev;
1422 int ret = -1;
1423
1424 if (status == DEVICE_OFFLINE) {
1425 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1426 if (sdev) {
1427 dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0,
1428 sdev->id, 0);
1429 scsi_remove_device(sdev);
1430 scsi_device_put(sdev);
1431 ret = 0;
1432 } else
1433 dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n",
1434 devid);
1435 } else if (status == DEVICE_ONLINE) {
1436 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1437 if (!sdev) {
1438 scsi_add_device(mhba->shost, 0, devid, 0);
1439 dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0,
1440 devid, 0);
1441 ret = 0;
1442 } else {
1443 dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n",
1444 0, devid, 0);
1445 scsi_device_put(sdev);
1446 }
1447 }
1448 return ret;
1449}
1450
1451static u64 mvumi_inquiry(struct mvumi_hba *mhba,
1452 unsigned int id, struct mvumi_cmd *cmd)
1453{
1454 struct mvumi_msg_frame *frame;
1455 u64 wwid = 0;
1456 int cmd_alloc = 0;
1457 int data_buf_len = 64;
1458
1459 if (!cmd) {
1460 cmd = mvumi_create_internal_cmd(mhba, data_buf_len);
1461 if (cmd)
1462 cmd_alloc = 1;
1463 else
1464 return 0;
1465 } else {
1466 memset(cmd->data_buf, 0, data_buf_len);
1467 }
1468 cmd->scmd = NULL;
1469 cmd->cmd_status = REQ_STATUS_PENDING;
1470 atomic_set(&cmd->sync_cmd, 0);
1471 frame = cmd->frame;
1472 frame->device_id = (u16) id;
1473 frame->cmd_flag = CMD_FLAG_DATA_IN;
1474 frame->req_function = CL_FUN_SCSI_CMD;
1475 frame->cdb_length = 6;
1476 frame->data_transfer_length = MVUMI_INQUIRY_LENGTH;
1477 memset(frame->cdb, 0, frame->cdb_length);
1478 frame->cdb[0] = INQUIRY;
1479 frame->cdb[4] = frame->data_transfer_length;
1480
1481 mvumi_issue_blocked_cmd(mhba, cmd);
1482
1483 if (cmd->cmd_status == SAM_STAT_GOOD) {
1484 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1485 wwid = id + 1;
1486 else
1487 memcpy((void *)&wwid,
1488 (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF),
1489 MVUMI_INQUIRY_UUID_LEN);
1490 dev_dbg(&mhba->pdev->dev,
1491 "inquiry device(0:%d:0) wwid(%llx)\n", id, wwid);
1492 } else {
1493 wwid = 0;
1494 }
1495 if (cmd_alloc)
1496 mvumi_delete_internal_cmd(mhba, cmd);
1497
1498 return wwid;
1499}
1500
1501static void mvumi_detach_devices(struct mvumi_hba *mhba)
1502{
1503 struct mvumi_device *mv_dev = NULL , *dev_next;
1504 struct scsi_device *sdev = NULL;
1505
1506 mutex_lock(&mhba->device_lock);
1507
1508 /* detach Hard Disk */
1509 list_for_each_entry_safe(mv_dev, dev_next,
1510 &mhba->shost_dev_list, list) {
1511 mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1512 list_del_init(&mv_dev->list);
1513 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1514 mv_dev->id, mv_dev->wwid);
1515 kfree(mv_dev);
1516 }
1517 list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) {
1518 list_del_init(&mv_dev->list);
1519 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1520 mv_dev->id, mv_dev->wwid);
1521 kfree(mv_dev);
1522 }
1523
1524 /* detach virtual device */
1525 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
1526 sdev = scsi_device_lookup(mhba->shost, 0,
1527 mhba->max_target_id - 1, 0);
1528
1529 if (sdev) {
1530 scsi_remove_device(sdev);
1531 scsi_device_put(sdev);
1532 }
1533
1534 mutex_unlock(&mhba->device_lock);
1535}
1536
1537static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
1538{
1539 struct scsi_device *sdev;
1540
1541 sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
1542 if (sdev) {
1543 scsi_rescan_device(&sdev->sdev_gendev);
1544 scsi_device_put(sdev);
1545 }
1546}
1547
1548static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid)
1549{
1550 struct mvumi_device *mv_dev = NULL;
1551
1552 list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) {
1553 if (mv_dev->wwid == wwid) {
1554 if (mv_dev->id != id) {
1555 dev_err(&mhba->pdev->dev,
1556 "%s has same wwid[%llx] ,"
1557 " but different id[%d %d]\n",
1558 __func__, mv_dev->wwid, mv_dev->id, id);
1559 return -1;
1560 } else {
1561 if (mhba->pdev->device ==
1562 PCI_DEVICE_ID_MARVELL_MV9143)
1563 mvumi_rescan_devices(mhba, id);
1564 return 1;
1565 }
1566 }
1567 }
1568 return 0;
1569}
1570
1571static void mvumi_remove_devices(struct mvumi_hba *mhba, int id)
1572{
1573 struct mvumi_device *mv_dev = NULL, *dev_next;
1574
1575 list_for_each_entry_safe(mv_dev, dev_next,
1576 &mhba->shost_dev_list, list) {
1577 if (mv_dev->id == id) {
1578 dev_dbg(&mhba->pdev->dev,
1579 "detach device(0:%d:0) wwid(%llx) from HOST\n",
1580 mv_dev->id, mv_dev->wwid);
1581 mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1582 list_del_init(&mv_dev->list);
1583 kfree(mv_dev);
1584 }
1585 }
1586}
1587
1588static int mvumi_probe_devices(struct mvumi_hba *mhba)
1589{
1590 int id, maxid;
1591 u64 wwid = 0;
1592 struct mvumi_device *mv_dev = NULL;
1593 struct mvumi_cmd *cmd = NULL;
1594 int found = 0;
1595
1596 cmd = mvumi_create_internal_cmd(mhba, 64);
1597 if (!cmd)
1598 return -1;
1599
1600 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1601 maxid = mhba->max_target_id;
1602 else
1603 maxid = mhba->max_target_id - 1;
1604
1605 for (id = 0; id < maxid; id++) {
1606 wwid = mvumi_inquiry(mhba, id, cmd);
1607 if (!wwid) {
1608 /* device no response, remove it */
1609 mvumi_remove_devices(mhba, id);
1610 } else {
1611 /* device response, add it */
1612 found = mvumi_match_devices(mhba, id, wwid);
1613 if (!found) {
1614 mvumi_remove_devices(mhba, id);
1615 mv_dev = kzalloc(sizeof(struct mvumi_device),
1616 GFP_KERNEL);
1617 if (!mv_dev) {
1618 dev_err(&mhba->pdev->dev,
1619 "%s alloc mv_dev failed\n",
1620 __func__);
1621 continue;
1622 }
1623 mv_dev->id = id;
1624 mv_dev->wwid = wwid;
1625 mv_dev->sdev = NULL;
1626 INIT_LIST_HEAD(&mv_dev->list);
1627 list_add_tail(&mv_dev->list,
1628 &mhba->mhba_dev_list);
1629 dev_dbg(&mhba->pdev->dev,
1630 "probe a new device(0:%d:0)"
1631 " wwid(%llx)\n", id, mv_dev->wwid);
1632 } else if (found == -1)
1633 return -1;
1634 else
1635 continue;
1636 }
1637 }
1638
1639 if (cmd)
1640 mvumi_delete_internal_cmd(mhba, cmd);
1641
1642 return 0;
1643}
1644
1645static int mvumi_rescan_bus(void *data)
1646{
1647 int ret = 0;
1648 struct mvumi_hba *mhba = (struct mvumi_hba *) data;
1649 struct mvumi_device *mv_dev = NULL , *dev_next;
1650
1651 while (!kthread_should_stop()) {
1652
1653 set_current_state(TASK_INTERRUPTIBLE);
1654 if (!atomic_read(&mhba->pnp_count))
1655 schedule();
1656 msleep(1000);
1657 atomic_set(&mhba->pnp_count, 0);
1658 __set_current_state(TASK_RUNNING);
1659
1660 mutex_lock(&mhba->device_lock);
1661 ret = mvumi_probe_devices(mhba);
1662 if (!ret) {
1663 list_for_each_entry_safe(mv_dev, dev_next,
1664 &mhba->mhba_dev_list, list) {
1665 if (mvumi_handle_hotplug(mhba, mv_dev->id,
1666 DEVICE_ONLINE)) {
1667 dev_err(&mhba->pdev->dev,
1668 "%s add device(0:%d:0) failed"
1669 "wwid(%llx) has exist\n",
1670 __func__,
1671 mv_dev->id, mv_dev->wwid);
1672 list_del_init(&mv_dev->list);
1673 kfree(mv_dev);
1674 } else {
1675 list_move_tail(&mv_dev->list,
1676 &mhba->shost_dev_list);
1677 }
1678 }
1679 }
1680 mutex_unlock(&mhba->device_lock);
1681 }
1682 return 0;
1683}
1684
1685static void mvumi_proc_msg(struct mvumi_hba *mhba,
1686 struct mvumi_hotplug_event *param)
1687{
1688 u16 size = param->size;
1689 const unsigned long *ar_bitmap;
1690 const unsigned long *re_bitmap;
1691 int index;
1692
1693 if (mhba->fw_flag & MVUMI_FW_ATTACH) {
1694 index = -1;
1695 ar_bitmap = (const unsigned long *) param->bitmap;
1696 re_bitmap = (const unsigned long *) &param->bitmap[size >> 3];
1697
1698 mutex_lock(&mhba->sas_discovery_mutex);
1699 do {
1700 index = find_next_zero_bit(ar_bitmap, size, index + 1);
1701 if (index >= size)
1702 break;
1703 mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE);
1704 } while (1);
1705
1706 index = -1;
1707 do {
1708 index = find_next_zero_bit(re_bitmap, size, index + 1);
1709 if (index >= size)
1710 break;
1711 mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE);
1712 } while (1);
1713 mutex_unlock(&mhba->sas_discovery_mutex);
1714 }
1715}
1716
1213static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer) 1717static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
1214{ 1718{
1215 if (msg == APICDB1_EVENT_GETEVENT) { 1719 if (msg == APICDB1_EVENT_GETEVENT) {
@@ -1227,6 +1731,8 @@ static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
1227 param = &er->events[i]; 1731 param = &er->events[i];
1228 mvumi_show_event(mhba, param); 1732 mvumi_show_event(mhba, param);
1229 } 1733 }
1734 } else if (msg == APICDB1_HOST_GETEVENT) {
1735 mvumi_proc_msg(mhba, buffer);
1230 } 1736 }
1231} 1737}
1232 1738
@@ -1271,17 +1777,27 @@ static void mvumi_scan_events(struct work_struct *work)
1271 kfree(mu_ev); 1777 kfree(mu_ev);
1272} 1778}
1273 1779
1274static void mvumi_launch_events(struct mvumi_hba *mhba, u8 msg) 1780static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status)
1275{ 1781{
1276 struct mvumi_events_wq *mu_ev; 1782 struct mvumi_events_wq *mu_ev;
1277 1783
1278 mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC); 1784 while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) {
1279 if (mu_ev) { 1785 if (isr_status & DRBL_BUS_CHANGE) {
1280 INIT_WORK(&mu_ev->work_q, mvumi_scan_events); 1786 atomic_inc(&mhba->pnp_count);
1281 mu_ev->mhba = mhba; 1787 wake_up_process(mhba->dm_thread);
1282 mu_ev->event = msg; 1788 isr_status &= ~(DRBL_BUS_CHANGE);
1283 mu_ev->param = NULL; 1789 continue;
1284 schedule_work(&mu_ev->work_q); 1790 }
1791
1792 mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
1793 if (mu_ev) {
1794 INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
1795 mu_ev->mhba = mhba;
1796 mu_ev->event = APICDB1_EVENT_GETEVENT;
1797 isr_status &= ~(DRBL_EVENT_NOTIFY);
1798 mu_ev->param = NULL;
1799 schedule_work(&mu_ev->work_q);
1800 }
1285 } 1801 }
1286} 1802}
1287 1803
@@ -1322,16 +1838,17 @@ static irqreturn_t mvumi_isr_handler(int irq, void *devp)
1322 return IRQ_NONE; 1838 return IRQ_NONE;
1323 } 1839 }
1324 1840
1325 if (mhba->global_isr & INT_MAP_DL_CPU2PCIEA) { 1841 if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) {
1842 if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY))
1843 mvumi_launch_events(mhba, mhba->isr_status);
1326 if (mhba->isr_status & DRBL_HANDSHAKE_ISR) { 1844 if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
1327 dev_warn(&mhba->pdev->dev, "enter handshake again!\n"); 1845 dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
1328 mvumi_handshake(mhba); 1846 mvumi_handshake(mhba);
1329 } 1847 }
1330 if (mhba->isr_status & DRBL_EVENT_NOTIFY) 1848
1331 mvumi_launch_events(mhba, APICDB1_EVENT_GETEVENT);
1332 } 1849 }
1333 1850
1334 if (mhba->global_isr & INT_MAP_COMAOUT) 1851 if (mhba->global_isr & mhba->regs->int_comaout)
1335 mvumi_receive_ob_list_entry(mhba); 1852 mvumi_receive_ob_list_entry(mhba);
1336 1853
1337 mhba->global_isr = 0; 1854 mhba->global_isr = 0;
@@ -1358,8 +1875,7 @@ static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
1358 dev_dbg(&mhba->pdev->dev, "no free tag.\n"); 1875 dev_dbg(&mhba->pdev->dev, "no free tag.\n");
1359 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE; 1876 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1360 } 1877 }
1361 if (mvumi_get_ib_list_entry(mhba, &ib_entry)) 1878 mvumi_get_ib_list_entry(mhba, &ib_entry);
1362 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1363 1879
1364 cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool); 1880 cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
1365 cmd->frame->request_id = mhba->io_seq++; 1881 cmd->frame->request_id = mhba->io_seq++;
@@ -1367,21 +1883,35 @@ static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
1367 mhba->tag_cmd[cmd->frame->tag] = cmd; 1883 mhba->tag_cmd[cmd->frame->tag] = cmd;
1368 frame_len = sizeof(*ib_frame) - 4 + 1884 frame_len = sizeof(*ib_frame) - 4 +
1369 ib_frame->sg_counts * sizeof(struct mvumi_sgl); 1885 ib_frame->sg_counts * sizeof(struct mvumi_sgl);
1370 memcpy(ib_entry, ib_frame, frame_len); 1886 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
1887 struct mvumi_dyn_list_entry *dle;
1888 dle = ib_entry;
1889 dle->src_low_addr =
1890 cpu_to_le32(lower_32_bits(cmd->frame_phys));
1891 dle->src_high_addr =
1892 cpu_to_le32(upper_32_bits(cmd->frame_phys));
1893 dle->if_length = (frame_len >> 2) & 0xFFF;
1894 } else {
1895 memcpy(ib_entry, ib_frame, frame_len);
1896 }
1371 return MV_QUEUE_COMMAND_RESULT_SENT; 1897 return MV_QUEUE_COMMAND_RESULT_SENT;
1372} 1898}
1373 1899
1374static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd) 1900static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
1375{ 1901{
1376 unsigned short num_of_cl_sent = 0; 1902 unsigned short num_of_cl_sent = 0;
1903 unsigned int count;
1377 enum mvumi_qc_result result; 1904 enum mvumi_qc_result result;
1378 1905
1379 if (cmd) 1906 if (cmd)
1380 list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list); 1907 list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
1908 count = mhba->instancet->check_ib_list(mhba);
1909 if (list_empty(&mhba->waiting_req_list) || !count)
1910 return;
1381 1911
1382 while (!list_empty(&mhba->waiting_req_list)) { 1912 do {
1383 cmd = list_first_entry(&mhba->waiting_req_list, 1913 cmd = list_first_entry(&mhba->waiting_req_list,
1384 struct mvumi_cmd, queue_pointer); 1914 struct mvumi_cmd, queue_pointer);
1385 list_del_init(&cmd->queue_pointer); 1915 list_del_init(&cmd->queue_pointer);
1386 result = mvumi_send_command(mhba, cmd); 1916 result = mvumi_send_command(mhba, cmd);
1387 switch (result) { 1917 switch (result) {
@@ -1395,65 +1925,77 @@ static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
1395 1925
1396 return; 1926 return;
1397 } 1927 }
1398 } 1928 } while (!list_empty(&mhba->waiting_req_list) && count--);
1929
1399 if (num_of_cl_sent > 0) 1930 if (num_of_cl_sent > 0)
1400 mvumi_send_ib_list_entry(mhba); 1931 mvumi_send_ib_list_entry(mhba);
1401} 1932}
1402 1933
1403/** 1934/**
1404 * mvumi_enable_intr - Enables interrupts 1935 * mvumi_enable_intr - Enables interrupts
1405 * @regs: FW register set 1936 * @mhba: Adapter soft state
1406 */ 1937 */
1407static void mvumi_enable_intr(void *regs) 1938static void mvumi_enable_intr(struct mvumi_hba *mhba)
1408{ 1939{
1409 unsigned int mask; 1940 unsigned int mask;
1941 struct mvumi_hw_regs *regs = mhba->regs;
1410 1942
1411 iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG); 1943 iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1412 mask = ioread32(regs + CPU_ENPOINTA_MASK_REG); 1944 mask = ioread32(regs->enpointa_mask_reg);
1413 mask |= INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR; 1945 mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr;
1414 iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG); 1946 iowrite32(mask, regs->enpointa_mask_reg);
1415} 1947}
1416 1948
1417/** 1949/**
1418 * mvumi_disable_intr -Disables interrupt 1950 * mvumi_disable_intr -Disables interrupt
1419 * @regs: FW register set 1951 * @mhba: Adapter soft state
1420 */ 1952 */
1421static void mvumi_disable_intr(void *regs) 1953static void mvumi_disable_intr(struct mvumi_hba *mhba)
1422{ 1954{
1423 unsigned int mask; 1955 unsigned int mask;
1956 struct mvumi_hw_regs *regs = mhba->regs;
1424 1957
1425 iowrite32(0, regs + CPU_ARM_TO_PCIEA_MASK_REG); 1958 iowrite32(0, regs->arm_to_pciea_mask_reg);
1426 mask = ioread32(regs + CPU_ENPOINTA_MASK_REG); 1959 mask = ioread32(regs->enpointa_mask_reg);
1427 mask &= ~(INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR); 1960 mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout |
1428 iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG); 1961 regs->int_comaerr);
1962 iowrite32(mask, regs->enpointa_mask_reg);
1429} 1963}
1430 1964
1431static int mvumi_clear_intr(void *extend) 1965static int mvumi_clear_intr(void *extend)
1432{ 1966{
1433 struct mvumi_hba *mhba = (struct mvumi_hba *) extend; 1967 struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
1434 unsigned int status, isr_status = 0, tmp = 0; 1968 unsigned int status, isr_status = 0, tmp = 0;
1435 void *regs = mhba->mmio; 1969 struct mvumi_hw_regs *regs = mhba->regs;
1436 1970
1437 status = ioread32(regs + CPU_MAIN_INT_CAUSE_REG); 1971 status = ioread32(regs->main_int_cause_reg);
1438 if (!(status & INT_MAP_MU) || status == 0xFFFFFFFF) 1972 if (!(status & regs->int_mu) || status == 0xFFFFFFFF)
1439 return 1; 1973 return 1;
1440 if (unlikely(status & INT_MAP_COMAERR)) { 1974 if (unlikely(status & regs->int_comaerr)) {
1441 tmp = ioread32(regs + CLA_ISR_CAUSE); 1975 tmp = ioread32(regs->outb_isr_cause);
1442 if (tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ)) 1976 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
1443 iowrite32(tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ), 1977 if (tmp & regs->clic_out_err) {
1444 regs + CLA_ISR_CAUSE); 1978 iowrite32(tmp & regs->clic_out_err,
1445 status ^= INT_MAP_COMAERR; 1979 regs->outb_isr_cause);
1980 }
1981 } else {
1982 if (tmp & (regs->clic_in_err | regs->clic_out_err))
1983 iowrite32(tmp & (regs->clic_in_err |
1984 regs->clic_out_err),
1985 regs->outb_isr_cause);
1986 }
1987 status ^= mhba->regs->int_comaerr;
1446 /* inbound or outbound parity error, command will timeout */ 1988 /* inbound or outbound parity error, command will timeout */
1447 } 1989 }
1448 if (status & INT_MAP_COMAOUT) { 1990 if (status & regs->int_comaout) {
1449 tmp = ioread32(regs + CLA_ISR_CAUSE); 1991 tmp = ioread32(regs->outb_isr_cause);
1450 if (tmp & CLIC_OUT_IRQ) 1992 if (tmp & regs->clic_irq)
1451 iowrite32(tmp & CLIC_OUT_IRQ, regs + CLA_ISR_CAUSE); 1993 iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause);
1452 } 1994 }
1453 if (status & INT_MAP_DL_CPU2PCIEA) { 1995 if (status & regs->int_dl_cpu2pciea) {
1454 isr_status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG); 1996 isr_status = ioread32(regs->arm_to_pciea_drbl_reg);
1455 if (isr_status) 1997 if (isr_status)
1456 iowrite32(isr_status, regs + CPU_ARM_TO_PCIEA_DRBL_REG); 1998 iowrite32(isr_status, regs->arm_to_pciea_drbl_reg);
1457 } 1999 }
1458 2000
1459 mhba->global_isr = status; 2001 mhba->global_isr = status;
@@ -1464,24 +2006,38 @@ static int mvumi_clear_intr(void *extend)
1464 2006
1465/** 2007/**
1466 * mvumi_read_fw_status_reg - returns the current FW status value 2008 * mvumi_read_fw_status_reg - returns the current FW status value
1467 * @regs: FW register set 2009 * @mhba: Adapter soft state
1468 */ 2010 */
1469static unsigned int mvumi_read_fw_status_reg(void *regs) 2011static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)
1470{ 2012{
1471 unsigned int status; 2013 unsigned int status;
1472 2014
1473 status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG); 2015 status = ioread32(mhba->regs->arm_to_pciea_drbl_reg);
1474 if (status) 2016 if (status)
1475 iowrite32(status, regs + CPU_ARM_TO_PCIEA_DRBL_REG); 2017 iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg);
1476 return status; 2018 return status;
1477} 2019}
1478 2020
1479static struct mvumi_instance_template mvumi_instance_template = { 2021static struct mvumi_instance_template mvumi_instance_9143 = {
1480 .fire_cmd = mvumi_fire_cmd, 2022 .fire_cmd = mvumi_fire_cmd,
1481 .enable_intr = mvumi_enable_intr, 2023 .enable_intr = mvumi_enable_intr,
1482 .disable_intr = mvumi_disable_intr, 2024 .disable_intr = mvumi_disable_intr,
1483 .clear_intr = mvumi_clear_intr, 2025 .clear_intr = mvumi_clear_intr,
1484 .read_fw_status_reg = mvumi_read_fw_status_reg, 2026 .read_fw_status_reg = mvumi_read_fw_status_reg,
2027 .check_ib_list = mvumi_check_ib_list_9143,
2028 .check_ob_list = mvumi_check_ob_list_9143,
2029 .reset_host = mvumi_reset_host_9143,
2030};
2031
2032static struct mvumi_instance_template mvumi_instance_9580 = {
2033 .fire_cmd = mvumi_fire_cmd,
2034 .enable_intr = mvumi_enable_intr,
2035 .disable_intr = mvumi_disable_intr,
2036 .clear_intr = mvumi_clear_intr,
2037 .read_fw_status_reg = mvumi_read_fw_status_reg,
2038 .check_ib_list = mvumi_check_ib_list_9580,
2039 .check_ob_list = mvumi_check_ob_list_9580,
2040 .reset_host = mvumi_reset_host_9580,
1485}; 2041};
1486 2042
1487static int mvumi_slave_configure(struct scsi_device *sdev) 2043static int mvumi_slave_configure(struct scsi_device *sdev)
@@ -1681,6 +2237,124 @@ static struct scsi_transport_template mvumi_transport_template = {
1681 .eh_timed_out = mvumi_timed_out, 2237 .eh_timed_out = mvumi_timed_out,
1682}; 2238};
1683 2239
2240static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
2241{
2242 void *base = NULL;
2243 struct mvumi_hw_regs *regs;
2244
2245 switch (mhba->pdev->device) {
2246 case PCI_DEVICE_ID_MARVELL_MV9143:
2247 mhba->mmio = mhba->base_addr[0];
2248 base = mhba->mmio;
2249 if (!mhba->regs) {
2250 mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2251 if (mhba->regs == NULL)
2252 return -ENOMEM;
2253 }
2254 regs = mhba->regs;
2255
2256 /* For Arm */
2257 regs->ctrl_sts_reg = base + 0x20104;
2258 regs->rstoutn_mask_reg = base + 0x20108;
2259 regs->sys_soft_rst_reg = base + 0x2010C;
2260 regs->main_int_cause_reg = base + 0x20200;
2261 regs->enpointa_mask_reg = base + 0x2020C;
2262 regs->rstoutn_en_reg = base + 0xF1400;
2263 /* For Doorbell */
2264 regs->pciea_to_arm_drbl_reg = base + 0x20400;
2265 regs->arm_to_pciea_drbl_reg = base + 0x20408;
2266 regs->arm_to_pciea_mask_reg = base + 0x2040C;
2267 regs->pciea_to_arm_msg0 = base + 0x20430;
2268 regs->pciea_to_arm_msg1 = base + 0x20434;
2269 regs->arm_to_pciea_msg0 = base + 0x20438;
2270 regs->arm_to_pciea_msg1 = base + 0x2043C;
2271
2272 /* For Message Unit */
2273
2274 regs->inb_aval_count_basel = base + 0x508;
2275 regs->inb_aval_count_baseh = base + 0x50C;
2276 regs->inb_write_pointer = base + 0x518;
2277 regs->inb_read_pointer = base + 0x51C;
2278 regs->outb_coal_cfg = base + 0x568;
2279 regs->outb_copy_basel = base + 0x5B0;
2280 regs->outb_copy_baseh = base + 0x5B4;
2281 regs->outb_copy_pointer = base + 0x544;
2282 regs->outb_read_pointer = base + 0x548;
2283 regs->outb_isr_cause = base + 0x560;
2284 regs->outb_coal_cfg = base + 0x568;
2285 /* Bit setting for HW */
2286 regs->int_comaout = 1 << 8;
2287 regs->int_comaerr = 1 << 6;
2288 regs->int_dl_cpu2pciea = 1 << 1;
2289 regs->cl_pointer_toggle = 1 << 12;
2290 regs->clic_irq = 1 << 1;
2291 regs->clic_in_err = 1 << 8;
2292 regs->clic_out_err = 1 << 12;
2293 regs->cl_slot_num_mask = 0xFFF;
2294 regs->int_drbl_int_mask = 0x3FFFFFFF;
2295 regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout |
2296 regs->int_comaerr;
2297 break;
2298 case PCI_DEVICE_ID_MARVELL_MV9580:
2299 mhba->mmio = mhba->base_addr[2];
2300 base = mhba->mmio;
2301 if (!mhba->regs) {
2302 mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2303 if (mhba->regs == NULL)
2304 return -ENOMEM;
2305 }
2306 regs = mhba->regs;
2307 /* For Arm */
2308 regs->ctrl_sts_reg = base + 0x20104;
2309 regs->rstoutn_mask_reg = base + 0x1010C;
2310 regs->sys_soft_rst_reg = base + 0x10108;
2311 regs->main_int_cause_reg = base + 0x10200;
2312 regs->enpointa_mask_reg = base + 0x1020C;
2313 regs->rstoutn_en_reg = base + 0xF1400;
2314
2315 /* For Doorbell */
2316 regs->pciea_to_arm_drbl_reg = base + 0x10460;
2317 regs->arm_to_pciea_drbl_reg = base + 0x10480;
2318 regs->arm_to_pciea_mask_reg = base + 0x10484;
2319 regs->pciea_to_arm_msg0 = base + 0x10400;
2320 regs->pciea_to_arm_msg1 = base + 0x10404;
2321 regs->arm_to_pciea_msg0 = base + 0x10420;
2322 regs->arm_to_pciea_msg1 = base + 0x10424;
2323
2324 /* For reset*/
2325 regs->reset_request = base + 0x10108;
2326 regs->reset_enable = base + 0x1010c;
2327
2328 /* For Message Unit */
2329 regs->inb_aval_count_basel = base + 0x4008;
2330 regs->inb_aval_count_baseh = base + 0x400C;
2331 regs->inb_write_pointer = base + 0x4018;
2332 regs->inb_read_pointer = base + 0x401C;
2333 regs->outb_copy_basel = base + 0x4058;
2334 regs->outb_copy_baseh = base + 0x405C;
2335 regs->outb_copy_pointer = base + 0x406C;
2336 regs->outb_read_pointer = base + 0x4070;
2337 regs->outb_coal_cfg = base + 0x4080;
2338 regs->outb_isr_cause = base + 0x4088;
2339 /* Bit setting for HW */
2340 regs->int_comaout = 1 << 4;
2341 regs->int_dl_cpu2pciea = 1 << 12;
2342 regs->int_comaerr = 1 << 29;
2343 regs->cl_pointer_toggle = 1 << 14;
2344 regs->cl_slot_num_mask = 0x3FFF;
2345 regs->clic_irq = 1 << 0;
2346 regs->clic_out_err = 1 << 1;
2347 regs->int_drbl_int_mask = 0x3FFFFFFF;
2348 regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout;
2349 break;
2350 default:
2351 return -1;
2352 break;
2353 }
2354
2355 return 0;
2356}
2357
1684/** 2358/**
1685 * mvumi_init_fw - Initializes the FW 2359 * mvumi_init_fw - Initializes the FW
1686 * @mhba: Adapter soft state 2360 * @mhba: Adapter soft state
@@ -1699,15 +2373,18 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
1699 if (ret) 2373 if (ret)
1700 goto fail_ioremap; 2374 goto fail_ioremap;
1701 2375
1702 mhba->mmio = mhba->base_addr[0];
1703
1704 switch (mhba->pdev->device) { 2376 switch (mhba->pdev->device) {
1705 case PCI_DEVICE_ID_MARVELL_MV9143: 2377 case PCI_DEVICE_ID_MARVELL_MV9143:
1706 mhba->instancet = &mvumi_instance_template; 2378 mhba->instancet = &mvumi_instance_9143;
1707 mhba->io_seq = 0; 2379 mhba->io_seq = 0;
1708 mhba->max_sge = MVUMI_MAX_SG_ENTRY; 2380 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
1709 mhba->request_id_enabled = 1; 2381 mhba->request_id_enabled = 1;
1710 break; 2382 break;
2383 case PCI_DEVICE_ID_MARVELL_MV9580:
2384 mhba->instancet = &mvumi_instance_9580;
2385 mhba->io_seq = 0;
2386 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2387 break;
1711 default: 2388 default:
1712 dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n", 2389 dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
1713 mhba->pdev->device); 2390 mhba->pdev->device);
@@ -1717,15 +2394,21 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
1717 } 2394 }
1718 dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n", 2395 dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
1719 mhba->pdev->device); 2396 mhba->pdev->device);
1720 2397 ret = mvumi_cfg_hw_reg(mhba);
1721 mhba->handshake_page = kzalloc(HSP_MAX_SIZE, GFP_KERNEL); 2398 if (ret) {
2399 dev_err(&mhba->pdev->dev,
2400 "failed to allocate memory for reg\n");
2401 ret = -ENOMEM;
2402 goto fail_alloc_mem;
2403 }
2404 mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE,
2405 &mhba->handshake_page_phys);
1722 if (!mhba->handshake_page) { 2406 if (!mhba->handshake_page) {
1723 dev_err(&mhba->pdev->dev, 2407 dev_err(&mhba->pdev->dev,
1724 "failed to allocate memory for handshake\n"); 2408 "failed to allocate memory for handshake\n");
1725 ret = -ENOMEM; 2409 ret = -ENOMEM;
1726 goto fail_alloc_mem; 2410 goto fail_alloc_page;
1727 } 2411 }
1728 mhba->handshake_page_phys = virt_to_phys(mhba->handshake_page);
1729 2412
1730 if (mvumi_start(mhba)) { 2413 if (mvumi_start(mhba)) {
1731 ret = -EINVAL; 2414 ret = -EINVAL;
@@ -1739,7 +2422,10 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
1739 2422
1740fail_ready_state: 2423fail_ready_state:
1741 mvumi_release_mem_resource(mhba); 2424 mvumi_release_mem_resource(mhba);
1742 kfree(mhba->handshake_page); 2425 pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
2426 mhba->handshake_page, mhba->handshake_page_phys);
2427fail_alloc_page:
2428 kfree(mhba->regs);
1743fail_alloc_mem: 2429fail_alloc_mem:
1744 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); 2430 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
1745fail_ioremap: 2431fail_ioremap:
@@ -1755,6 +2441,7 @@ fail_ioremap:
1755static int mvumi_io_attach(struct mvumi_hba *mhba) 2441static int mvumi_io_attach(struct mvumi_hba *mhba)
1756{ 2442{
1757 struct Scsi_Host *host = mhba->shost; 2443 struct Scsi_Host *host = mhba->shost;
2444 struct scsi_device *sdev = NULL;
1758 int ret; 2445 int ret;
1759 unsigned int max_sg = (mhba->ib_max_size + 4 - 2446 unsigned int max_sg = (mhba->ib_max_size + 4 -
1760 sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl); 2447 sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
@@ -1764,7 +2451,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
1764 host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; 2451 host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
1765 host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge; 2452 host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
1766 host->max_sectors = mhba->max_transfer_size / 512; 2453 host->max_sectors = mhba->max_transfer_size / 512;
1767 host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; 2454 host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
1768 host->max_id = mhba->max_target_id; 2455 host->max_id = mhba->max_target_id;
1769 host->max_cmd_len = MAX_COMMAND_SIZE; 2456 host->max_cmd_len = MAX_COMMAND_SIZE;
1770 host->transportt = &mvumi_transport_template; 2457 host->transportt = &mvumi_transport_template;
@@ -1775,9 +2462,43 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
1775 return ret; 2462 return ret;
1776 } 2463 }
1777 mhba->fw_flag |= MVUMI_FW_ATTACH; 2464 mhba->fw_flag |= MVUMI_FW_ATTACH;
1778 scsi_scan_host(host);
1779 2465
2466 mutex_lock(&mhba->sas_discovery_mutex);
2467 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2468 ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0);
2469 else
2470 ret = 0;
2471 if (ret) {
2472 dev_err(&mhba->pdev->dev, "add virtual device failed\n");
2473 mutex_unlock(&mhba->sas_discovery_mutex);
2474 goto fail_add_device;
2475 }
2476
2477 mhba->dm_thread = kthread_create(mvumi_rescan_bus,
2478 mhba, "mvumi_scanthread");
2479 if (IS_ERR(mhba->dm_thread)) {
2480 dev_err(&mhba->pdev->dev,
2481 "failed to create device scan thread\n");
2482 mutex_unlock(&mhba->sas_discovery_mutex);
2483 goto fail_create_thread;
2484 }
2485 atomic_set(&mhba->pnp_count, 1);
2486 wake_up_process(mhba->dm_thread);
2487
2488 mutex_unlock(&mhba->sas_discovery_mutex);
1780 return 0; 2489 return 0;
2490
2491fail_create_thread:
2492 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2493 sdev = scsi_device_lookup(mhba->shost, 0,
2494 mhba->max_target_id - 1, 0);
2495 if (sdev) {
2496 scsi_remove_device(sdev);
2497 scsi_device_put(sdev);
2498 }
2499fail_add_device:
2500 scsi_remove_host(mhba->shost);
2501 return ret;
1781} 2502}
1782 2503
1783/** 2504/**
@@ -1828,8 +2549,12 @@ static int __devinit mvumi_probe_one(struct pci_dev *pdev,
1828 INIT_LIST_HEAD(&mhba->free_ob_list); 2549 INIT_LIST_HEAD(&mhba->free_ob_list);
1829 INIT_LIST_HEAD(&mhba->res_list); 2550 INIT_LIST_HEAD(&mhba->res_list);
1830 INIT_LIST_HEAD(&mhba->waiting_req_list); 2551 INIT_LIST_HEAD(&mhba->waiting_req_list);
2552 mutex_init(&mhba->device_lock);
2553 INIT_LIST_HEAD(&mhba->mhba_dev_list);
2554 INIT_LIST_HEAD(&mhba->shost_dev_list);
1831 atomic_set(&mhba->fw_outstanding, 0); 2555 atomic_set(&mhba->fw_outstanding, 0);
1832 init_waitqueue_head(&mhba->int_cmd_wait_q); 2556 init_waitqueue_head(&mhba->int_cmd_wait_q);
2557 mutex_init(&mhba->sas_discovery_mutex);
1833 2558
1834 mhba->pdev = pdev; 2559 mhba->pdev = pdev;
1835 mhba->shost = host; 2560 mhba->shost = host;
@@ -1845,19 +2570,22 @@ static int __devinit mvumi_probe_one(struct pci_dev *pdev,
1845 dev_err(&pdev->dev, "failed to register IRQ\n"); 2570 dev_err(&pdev->dev, "failed to register IRQ\n");
1846 goto fail_init_irq; 2571 goto fail_init_irq;
1847 } 2572 }
1848 mhba->instancet->enable_intr(mhba->mmio); 2573
2574 mhba->instancet->enable_intr(mhba);
1849 pci_set_drvdata(pdev, mhba); 2575 pci_set_drvdata(pdev, mhba);
1850 2576
1851 ret = mvumi_io_attach(mhba); 2577 ret = mvumi_io_attach(mhba);
1852 if (ret) 2578 if (ret)
1853 goto fail_io_attach; 2579 goto fail_io_attach;
2580
2581 mvumi_backup_bar_addr(mhba);
1854 dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n"); 2582 dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
1855 2583
1856 return 0; 2584 return 0;
1857 2585
1858fail_io_attach: 2586fail_io_attach:
1859 pci_set_drvdata(pdev, NULL); 2587 pci_set_drvdata(pdev, NULL);
1860 mhba->instancet->disable_intr(mhba->mmio); 2588 mhba->instancet->disable_intr(mhba);
1861 free_irq(mhba->pdev->irq, mhba); 2589 free_irq(mhba->pdev->irq, mhba);
1862fail_init_irq: 2590fail_init_irq:
1863 mvumi_release_fw(mhba); 2591 mvumi_release_fw(mhba);
@@ -1877,11 +2605,17 @@ static void mvumi_detach_one(struct pci_dev *pdev)
1877 struct mvumi_hba *mhba; 2605 struct mvumi_hba *mhba;
1878 2606
1879 mhba = pci_get_drvdata(pdev); 2607 mhba = pci_get_drvdata(pdev);
2608 if (mhba->dm_thread) {
2609 kthread_stop(mhba->dm_thread);
2610 mhba->dm_thread = NULL;
2611 }
2612
2613 mvumi_detach_devices(mhba);
1880 host = mhba->shost; 2614 host = mhba->shost;
1881 scsi_remove_host(mhba->shost); 2615 scsi_remove_host(mhba->shost);
1882 mvumi_flush_cache(mhba); 2616 mvumi_flush_cache(mhba);
1883 2617
1884 mhba->instancet->disable_intr(mhba->mmio); 2618 mhba->instancet->disable_intr(mhba);
1885 free_irq(mhba->pdev->irq, mhba); 2619 free_irq(mhba->pdev->irq, mhba);
1886 mvumi_release_fw(mhba); 2620 mvumi_release_fw(mhba);
1887 scsi_host_put(host); 2621 scsi_host_put(host);
@@ -1909,7 +2643,7 @@ static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
1909 mvumi_flush_cache(mhba); 2643 mvumi_flush_cache(mhba);
1910 2644
1911 pci_set_drvdata(pdev, mhba); 2645 pci_set_drvdata(pdev, mhba);
1912 mhba->instancet->disable_intr(mhba->mmio); 2646 mhba->instancet->disable_intr(mhba);
1913 free_irq(mhba->pdev->irq, mhba); 2647 free_irq(mhba->pdev->irq, mhba);
1914 mvumi_unmap_pci_addr(pdev, mhba->base_addr); 2648 mvumi_unmap_pci_addr(pdev, mhba->base_addr);
1915 pci_release_regions(pdev); 2649 pci_release_regions(pdev);
@@ -1956,8 +2690,13 @@ static int mvumi_resume(struct pci_dev *pdev)
1956 if (ret) 2690 if (ret)
1957 goto release_regions; 2691 goto release_regions;
1958 2692
2693 if (mvumi_cfg_hw_reg(mhba)) {
2694 ret = -EINVAL;
2695 goto unmap_pci_addr;
2696 }
2697
1959 mhba->mmio = mhba->base_addr[0]; 2698 mhba->mmio = mhba->base_addr[0];
1960 mvumi_reset(mhba->mmio); 2699 mvumi_reset(mhba);
1961 2700
1962 if (mvumi_start(mhba)) { 2701 if (mvumi_start(mhba)) {
1963 ret = -EINVAL; 2702 ret = -EINVAL;
@@ -1970,7 +2709,7 @@ static int mvumi_resume(struct pci_dev *pdev)
1970 dev_err(&pdev->dev, "failed to register IRQ\n"); 2709 dev_err(&pdev->dev, "failed to register IRQ\n");
1971 goto unmap_pci_addr; 2710 goto unmap_pci_addr;
1972 } 2711 }
1973 mhba->instancet->enable_intr(mhba->mmio); 2712 mhba->instancet->enable_intr(mhba);
1974 2713
1975 return 0; 2714 return 0;
1976 2715
diff --git a/drivers/scsi/mvumi.h b/drivers/scsi/mvumi.h
index 10b9237566f0..e360135fd1bd 100644
--- a/drivers/scsi/mvumi.h
+++ b/drivers/scsi/mvumi.h
@@ -34,51 +34,87 @@
34#define MV_DRIVER_NAME "mvumi" 34#define MV_DRIVER_NAME "mvumi"
35#define PCI_VENDOR_ID_MARVELL_2 0x1b4b 35#define PCI_VENDOR_ID_MARVELL_2 0x1b4b
36#define PCI_DEVICE_ID_MARVELL_MV9143 0x9143 36#define PCI_DEVICE_ID_MARVELL_MV9143 0x9143
37#define PCI_DEVICE_ID_MARVELL_MV9580 0x9580
37 38
38#define MVUMI_INTERNAL_CMD_WAIT_TIME 45 39#define MVUMI_INTERNAL_CMD_WAIT_TIME 45
40#define MVUMI_INQUIRY_LENGTH 44
41#define MVUMI_INQUIRY_UUID_OFF 36
42#define MVUMI_INQUIRY_UUID_LEN 8
39 43
40#define IS_DMA64 (sizeof(dma_addr_t) == 8) 44#define IS_DMA64 (sizeof(dma_addr_t) == 8)
41 45
42enum mvumi_qc_result { 46enum mvumi_qc_result {
43 MV_QUEUE_COMMAND_RESULT_SENT = 0, 47 MV_QUEUE_COMMAND_RESULT_SENT = 0,
44 MV_QUEUE_COMMAND_RESULT_NO_RESOURCE, 48 MV_QUEUE_COMMAND_RESULT_NO_RESOURCE,
45}; 49};
46 50
47enum { 51struct mvumi_hw_regs {
48 /*******************************************/ 52 /* For CPU */
49 53 void *main_int_cause_reg;
50 /* ARM Mbus Registers Map */ 54 void *enpointa_mask_reg;
51 55 void *enpointb_mask_reg;
52 /*******************************************/ 56 void *rstoutn_en_reg;
53 CPU_MAIN_INT_CAUSE_REG = 0x20200, 57 void *ctrl_sts_reg;
54 CPU_MAIN_IRQ_MASK_REG = 0x20204, 58 void *rstoutn_mask_reg;
55 CPU_MAIN_FIQ_MASK_REG = 0x20208, 59 void *sys_soft_rst_reg;
56 CPU_ENPOINTA_MASK_REG = 0x2020C, 60
57 CPU_ENPOINTB_MASK_REG = 0x20210, 61 /* For Doorbell */
58 62 void *pciea_to_arm_drbl_reg;
59 INT_MAP_COMAERR = 1 << 6, 63 void *arm_to_pciea_drbl_reg;
60 INT_MAP_COMAIN = 1 << 7, 64 void *arm_to_pciea_mask_reg;
61 INT_MAP_COMAOUT = 1 << 8, 65 void *pciea_to_arm_msg0;
62 INT_MAP_COMBERR = 1 << 9, 66 void *pciea_to_arm_msg1;
63 INT_MAP_COMBIN = 1 << 10, 67 void *arm_to_pciea_msg0;
64 INT_MAP_COMBOUT = 1 << 11, 68 void *arm_to_pciea_msg1;
65 69
66 INT_MAP_COMAINT = (INT_MAP_COMAOUT | INT_MAP_COMAERR), 70 /* reset register */
67 INT_MAP_COMBINT = (INT_MAP_COMBOUT | INT_MAP_COMBIN | INT_MAP_COMBERR), 71 void *reset_request;
68 72 void *reset_enable;
69 INT_MAP_DL_PCIEA2CPU = 1 << 0, 73
70 INT_MAP_DL_CPU2PCIEA = 1 << 1, 74 /* For Message Unit */
71 75 void *inb_list_basel;
72 /***************************************/ 76 void *inb_list_baseh;
77 void *inb_aval_count_basel;
78 void *inb_aval_count_baseh;
79 void *inb_write_pointer;
80 void *inb_read_pointer;
81 void *outb_list_basel;
82 void *outb_list_baseh;
83 void *outb_copy_basel;
84 void *outb_copy_baseh;
85 void *outb_copy_pointer;
86 void *outb_read_pointer;
87 void *inb_isr_cause;
88 void *outb_isr_cause;
89 void *outb_coal_cfg;
90 void *outb_coal_timeout;
91
92 /* Bit setting for HW */
93 u32 int_comaout;
94 u32 int_comaerr;
95 u32 int_dl_cpu2pciea;
96 u32 int_mu;
97 u32 int_drbl_int_mask;
98 u32 int_main_int_mask;
99 u32 cl_pointer_toggle;
100 u32 cl_slot_num_mask;
101 u32 clic_irq;
102 u32 clic_in_err;
103 u32 clic_out_err;
104};
73 105
74 /* ARM Doorbell Registers Map */ 106struct mvumi_dyn_list_entry {
107 u32 src_low_addr;
108 u32 src_high_addr;
109 u32 if_length;
110 u32 reserve;
111};
75 112
76 /***************************************/ 113#define SCSI_CMD_MARVELL_SPECIFIC 0xE1
77 CPU_PCIEA_TO_ARM_DRBL_REG = 0x20400, 114#define CDB_CORE_MODULE 0x1
78 CPU_PCIEA_TO_ARM_MASK_REG = 0x20404, 115#define CDB_CORE_SHUTDOWN 0xB
79 CPU_ARM_TO_PCIEA_DRBL_REG = 0x20408,
80 CPU_ARM_TO_PCIEA_MASK_REG = 0x2040C,
81 116
117enum {
82 DRBL_HANDSHAKE = 1 << 0, 118 DRBL_HANDSHAKE = 1 << 0,
83 DRBL_SOFT_RESET = 1 << 1, 119 DRBL_SOFT_RESET = 1 << 1,
84 DRBL_BUS_CHANGE = 1 << 2, 120 DRBL_BUS_CHANGE = 1 << 2,
@@ -86,46 +122,6 @@ enum {
86 DRBL_MU_RESET = 1 << 4, 122 DRBL_MU_RESET = 1 << 4,
87 DRBL_HANDSHAKE_ISR = DRBL_HANDSHAKE, 123 DRBL_HANDSHAKE_ISR = DRBL_HANDSHAKE,
88 124
89 CPU_PCIEA_TO_ARM_MSG0 = 0x20430,
90 CPU_PCIEA_TO_ARM_MSG1 = 0x20434,
91 CPU_ARM_TO_PCIEA_MSG0 = 0x20438,
92 CPU_ARM_TO_PCIEA_MSG1 = 0x2043C,
93
94 /*******************************************/
95
96 /* ARM Communication List Registers Map */
97
98 /*******************************************/
99 CLA_INB_LIST_BASEL = 0x500,
100 CLA_INB_LIST_BASEH = 0x504,
101 CLA_INB_AVAL_COUNT_BASEL = 0x508,
102 CLA_INB_AVAL_COUNT_BASEH = 0x50C,
103 CLA_INB_DESTI_LIST_BASEL = 0x510,
104 CLA_INB_DESTI_LIST_BASEH = 0x514,
105 CLA_INB_WRITE_POINTER = 0x518,
106 CLA_INB_READ_POINTER = 0x51C,
107
108 CLA_OUTB_LIST_BASEL = 0x530,
109 CLA_OUTB_LIST_BASEH = 0x534,
110 CLA_OUTB_SOURCE_LIST_BASEL = 0x538,
111 CLA_OUTB_SOURCE_LIST_BASEH = 0x53C,
112 CLA_OUTB_COPY_POINTER = 0x544,
113 CLA_OUTB_READ_POINTER = 0x548,
114
115 CLA_ISR_CAUSE = 0x560,
116 CLA_ISR_MASK = 0x564,
117
118 INT_MAP_MU = (INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAINT),
119
120 CL_POINTER_TOGGLE = 1 << 12,
121
122 CLIC_IN_IRQ = 1 << 0,
123 CLIC_OUT_IRQ = 1 << 1,
124 CLIC_IN_ERR_IRQ = 1 << 8,
125 CLIC_OUT_ERR_IRQ = 1 << 12,
126
127 CL_SLOT_NUM_MASK = 0xFFF,
128
129 /* 125 /*
130 * Command flag is the flag for the CDB command itself 126 * Command flag is the flag for the CDB command itself
131 */ 127 */
@@ -137,15 +133,23 @@ enum {
137 CMD_FLAG_DATA_IN = 1 << 3, 133 CMD_FLAG_DATA_IN = 1 << 3,
138 /* 1-host write data */ 134 /* 1-host write data */
139 CMD_FLAG_DATA_OUT = 1 << 4, 135 CMD_FLAG_DATA_OUT = 1 << 4,
140 136 CMD_FLAG_PRDT_IN_HOST = 1 << 5,
141 SCSI_CMD_MARVELL_SPECIFIC = 0xE1,
142 CDB_CORE_SHUTDOWN = 0xB,
143}; 137};
144 138
145#define APICDB0_EVENT 0xF4 139#define APICDB0_EVENT 0xF4
146#define APICDB1_EVENT_GETEVENT 0 140#define APICDB1_EVENT_GETEVENT 0
141#define APICDB1_HOST_GETEVENT 1
147#define MAX_EVENTS_RETURNED 6 142#define MAX_EVENTS_RETURNED 6
148 143
144#define DEVICE_OFFLINE 0
145#define DEVICE_ONLINE 1
146
147struct mvumi_hotplug_event {
148 u16 size;
149 u8 dummy[2];
150 u8 bitmap[0];
151};
152
149struct mvumi_driver_event { 153struct mvumi_driver_event {
150 u32 time_stamp; 154 u32 time_stamp;
151 u32 sequence_no; 155 u32 sequence_no;
@@ -172,8 +176,14 @@ struct mvumi_events_wq {
172 void *param; 176 void *param;
173}; 177};
174 178
179#define HS_CAPABILITY_SUPPORT_COMPACT_SG (1U << 4)
180#define HS_CAPABILITY_SUPPORT_PRD_HOST (1U << 5)
181#define HS_CAPABILITY_SUPPORT_DYN_SRC (1U << 6)
182#define HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF (1U << 14)
183
175#define MVUMI_MAX_SG_ENTRY 32 184#define MVUMI_MAX_SG_ENTRY 32
176#define SGD_EOT (1L << 27) 185#define SGD_EOT (1L << 27)
186#define SGD_EOT_CP (1L << 22)
177 187
178struct mvumi_sgl { 188struct mvumi_sgl {
179 u32 baseaddr_l; 189 u32 baseaddr_l;
@@ -181,6 +191,39 @@ struct mvumi_sgl {
181 u32 flags; 191 u32 flags;
182 u32 size; 192 u32 size;
183}; 193};
194struct mvumi_compact_sgl {
195 u32 baseaddr_l;
196 u32 baseaddr_h;
197 u32 flags;
198};
199
200#define GET_COMPACT_SGD_SIZE(sgd) \
201 ((((struct mvumi_compact_sgl *)(sgd))->flags) & 0x3FFFFFL)
202
203#define SET_COMPACT_SGD_SIZE(sgd, sz) do { \
204 (((struct mvumi_compact_sgl *)(sgd))->flags) &= ~0x3FFFFFL; \
205 (((struct mvumi_compact_sgl *)(sgd))->flags) |= (sz); \
206} while (0)
207#define sgd_getsz(_mhba, sgd, sz) do { \
208 if (_mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) \
209 (sz) = GET_COMPACT_SGD_SIZE(sgd); \
210 else \
211 (sz) = (sgd)->size; \
212} while (0)
213
214#define sgd_setsz(_mhba, sgd, sz) do { \
215 if (_mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) \
216 SET_COMPACT_SGD_SIZE(sgd, sz); \
217 else \
218 (sgd)->size = (sz); \
219} while (0)
220
221#define sgd_inc(_mhba, sgd) do { \
222 if (_mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) \
223 sgd = (struct mvumi_sgl *)(((unsigned char *) (sgd)) + 12); \
224 else \
225 sgd = (struct mvumi_sgl *)(((unsigned char *) (sgd)) + 16); \
226} while (0)
184 227
185struct mvumi_res { 228struct mvumi_res {
186 struct list_head entry; 229 struct list_head entry;
@@ -197,7 +240,7 @@ enum resource_type {
197}; 240};
198 241
199struct mvumi_sense_data { 242struct mvumi_sense_data {
200 u8 error_eode:7; 243 u8 error_code:7;
201 u8 valid:1; 244 u8 valid:1;
202 u8 segment_number; 245 u8 segment_number;
203 u8 sense_key:4; 246 u8 sense_key:4;
@@ -220,6 +263,7 @@ struct mvumi_sense_data {
220struct mvumi_cmd { 263struct mvumi_cmd {
221 struct list_head queue_pointer; 264 struct list_head queue_pointer;
222 struct mvumi_msg_frame *frame; 265 struct mvumi_msg_frame *frame;
266 dma_addr_t frame_phys;
223 struct scsi_cmnd *scmd; 267 struct scsi_cmnd *scmd;
224 atomic_t sync_cmd; 268 atomic_t sync_cmd;
225 void *data_buf; 269 void *data_buf;
@@ -393,7 +437,8 @@ struct mvumi_hs_page2 {
393 u16 frame_length; 437 u16 frame_length;
394 438
395 u8 host_type; 439 u8 host_type;
396 u8 reserved[3]; 440 u8 host_cap;
441 u8 reserved[2];
397 struct version_info host_ver; 442 struct version_info host_ver;
398 u32 system_io_bus; 443 u32 system_io_bus;
399 u32 slot_number; 444 u32 slot_number;
@@ -435,8 +480,17 @@ struct mvumi_tag {
435 unsigned short size; 480 unsigned short size;
436}; 481};
437 482
483struct mvumi_device {
484 struct list_head list;
485 struct scsi_device *sdev;
486 u64 wwid;
487 u8 dev_type;
488 int id;
489};
490
438struct mvumi_hba { 491struct mvumi_hba {
439 void *base_addr[MAX_BASE_ADDRESS]; 492 void *base_addr[MAX_BASE_ADDRESS];
493 u32 pci_base[MAX_BASE_ADDRESS];
440 void *mmio; 494 void *mmio;
441 struct list_head cmd_pool; 495 struct list_head cmd_pool;
442 struct Scsi_Host *shost; 496 struct Scsi_Host *shost;
@@ -449,6 +503,9 @@ struct mvumi_hba {
449 void *ib_list; 503 void *ib_list;
450 dma_addr_t ib_list_phys; 504 dma_addr_t ib_list_phys;
451 505
506 void *ib_frame;
507 dma_addr_t ib_frame_phys;
508
452 void *ob_list; 509 void *ob_list;
453 dma_addr_t ob_list_phys; 510 dma_addr_t ob_list_phys;
454 511
@@ -477,12 +534,14 @@ struct mvumi_hba {
477 unsigned char hba_total_pages; 534 unsigned char hba_total_pages;
478 unsigned char fw_flag; 535 unsigned char fw_flag;
479 unsigned char request_id_enabled; 536 unsigned char request_id_enabled;
537 unsigned char eot_flag;
480 unsigned short hba_capability; 538 unsigned short hba_capability;
481 unsigned short io_seq; 539 unsigned short io_seq;
482 540
483 unsigned int ib_cur_slot; 541 unsigned int ib_cur_slot;
484 unsigned int ob_cur_slot; 542 unsigned int ob_cur_slot;
485 unsigned int fw_state; 543 unsigned int fw_state;
544 struct mutex sas_discovery_mutex;
486 545
487 struct list_head ob_data_list; 546 struct list_head ob_data_list;
488 struct list_head free_ob_list; 547 struct list_head free_ob_list;
@@ -491,14 +550,24 @@ struct mvumi_hba {
491 550
492 struct mvumi_tag tag_pool; 551 struct mvumi_tag tag_pool;
493 struct mvumi_cmd **tag_cmd; 552 struct mvumi_cmd **tag_cmd;
553 struct mvumi_hw_regs *regs;
554 struct mutex device_lock;
555 struct list_head mhba_dev_list;
556 struct list_head shost_dev_list;
557 struct task_struct *dm_thread;
558 atomic_t pnp_count;
494}; 559};
495 560
496struct mvumi_instance_template { 561struct mvumi_instance_template {
497 void (*fire_cmd)(struct mvumi_hba *, struct mvumi_cmd *); 562 void (*fire_cmd) (struct mvumi_hba *, struct mvumi_cmd *);
498 void (*enable_intr)(void *) ; 563 void (*enable_intr) (struct mvumi_hba *);
499 void (*disable_intr)(void *); 564 void (*disable_intr) (struct mvumi_hba *);
500 int (*clear_intr)(void *); 565 int (*clear_intr) (void *);
501 unsigned int (*read_fw_status_reg)(void *); 566 unsigned int (*read_fw_status_reg) (struct mvumi_hba *);
567 unsigned int (*check_ib_list) (struct mvumi_hba *);
568 int (*check_ob_list) (struct mvumi_hba *, unsigned int *,
569 unsigned int *);
570 int (*reset_host) (struct mvumi_hba *);
502}; 571};
503 572
504extern struct timezone sys_tz; 573extern struct timezone sys_tz;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 799a58bb9859..48fca47384b7 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2080,6 +2080,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2080 uint8_t domain; 2080 uint8_t domain;
2081 char connect_type[22]; 2081 char connect_type[22];
2082 struct qla_hw_data *ha = vha->hw; 2082 struct qla_hw_data *ha = vha->hw;
2083 unsigned long flags;
2083 2084
2084 /* Get host addresses. */ 2085 /* Get host addresses. */
2085 rval = qla2x00_get_adapter_id(vha, 2086 rval = qla2x00_get_adapter_id(vha,
@@ -2154,9 +2155,9 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2154 vha->d_id.b.area = area; 2155 vha->d_id.b.area = area;
2155 vha->d_id.b.al_pa = al_pa; 2156 vha->d_id.b.al_pa = al_pa;
2156 2157
2157 spin_lock(&ha->vport_slock); 2158 spin_lock_irqsave(&ha->vport_slock, flags);
2158 qlt_update_vp_map(vha, SET_AL_PA); 2159 qlt_update_vp_map(vha, SET_AL_PA);
2159 spin_unlock(&ha->vport_slock); 2160 spin_unlock_irqrestore(&ha->vport_slock, flags);
2160 2161
2161 if (!vha->flags.init_done) 2162 if (!vha->flags.init_done)
2162 ql_log(ql_log_info, vha, 0x2010, 2163 ql_log(ql_log_info, vha, 0x2010,
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index bd4708a422cd..20fd974f903a 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -149,6 +149,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
149int 149int
150qla24xx_disable_vp(scsi_qla_host_t *vha) 150qla24xx_disable_vp(scsi_qla_host_t *vha)
151{ 151{
152 unsigned long flags;
152 int ret; 153 int ret;
153 154
154 ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); 155 ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
@@ -156,7 +157,9 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
156 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 157 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
157 158
158 /* Remove port id from vp target map */ 159 /* Remove port id from vp target map */
160 spin_lock_irqsave(&vha->hw->vport_slock, flags);
159 qlt_update_vp_map(vha, RESET_AL_PA); 161 qlt_update_vp_map(vha, RESET_AL_PA);
162 spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
160 163
161 qla2x00_mark_vp_devices_dead(vha); 164 qla2x00_mark_vp_devices_dead(vha);
162 atomic_set(&vha->vp_state, VP_FAILED); 165 atomic_set(&vha->vp_state, VP_FAILED);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index bddc97c5c8e9..62aa5584f644 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -557,6 +557,7 @@ static bool qlt_check_fcport_exist(struct scsi_qla_host *vha,
557 int pmap_len; 557 int pmap_len;
558 fc_port_t *fcport; 558 fc_port_t *fcport;
559 int global_resets; 559 int global_resets;
560 unsigned long flags;
560 561
561retry: 562retry:
562 global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count); 563 global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
@@ -625,10 +626,10 @@ retry:
625 sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain, 626 sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain,
626 fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id); 627 fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id);
627 628
628 sess->s_id = fcport->d_id; 629 spin_lock_irqsave(&ha->hardware_lock, flags);
629 sess->loop_id = fcport->loop_id; 630 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
630 sess->conf_compl_supported = !!(fcport->flags & 631 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
631 FCF_CONF_COMP_SUPPORTED); 632 spin_unlock_irqrestore(&ha->hardware_lock, flags);
632 633
633 res = true; 634 res = true;
634 635
@@ -740,10 +741,9 @@ static struct qla_tgt_sess *qlt_create_sess(
740 qlt_undelete_sess(sess); 741 qlt_undelete_sess(sess);
741 742
742 kref_get(&sess->se_sess->sess_kref); 743 kref_get(&sess->se_sess->sess_kref);
743 sess->s_id = fcport->d_id; 744 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
744 sess->loop_id = fcport->loop_id; 745 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
745 sess->conf_compl_supported = !!(fcport->flags & 746
746 FCF_CONF_COMP_SUPPORTED);
747 if (sess->local && !local) 747 if (sess->local && !local)
748 sess->local = 0; 748 sess->local = 0;
749 spin_unlock_irqrestore(&ha->hardware_lock, flags); 749 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -796,8 +796,7 @@ static struct qla_tgt_sess *qlt_create_sess(
796 */ 796 */
797 kref_get(&sess->se_sess->sess_kref); 797 kref_get(&sess->se_sess->sess_kref);
798 798
799 sess->conf_compl_supported = !!(fcport->flags & 799 sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED);
800 FCF_CONF_COMP_SUPPORTED);
801 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name)); 800 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
802 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); 801 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
803 802
@@ -869,10 +868,8 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
869 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, 868 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
870 "Reappeared sess %p\n", sess); 869 "Reappeared sess %p\n", sess);
871 } 870 }
872 sess->s_id = fcport->d_id; 871 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
873 sess->loop_id = fcport->loop_id; 872 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
874 sess->conf_compl_supported = !!(fcport->flags &
875 FCF_CONF_COMP_SUPPORTED);
876 } 873 }
877 874
878 if (sess && sess->local) { 875 if (sess && sess->local) {
@@ -1403,7 +1400,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
1403 ctio->u.status1.scsi_status = 1400 ctio->u.status1.scsi_status =
1404 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); 1401 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
1405 ctio->u.status1.response_len = __constant_cpu_to_le16(8); 1402 ctio->u.status1.response_len = __constant_cpu_to_le16(8);
1406 ((uint32_t *)ctio->u.status1.sense_data)[0] = cpu_to_be32(resp_code); 1403 ctio->u.status1.sense_data[0] = resp_code;
1407 1404
1408 qla2x00_start_iocbs(ha, ha->req); 1405 qla2x00_start_iocbs(ha, ha->req);
1409} 1406}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 170af1571214..bad749561ec2 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -648,6 +648,7 @@ struct qla_tgt_func_tmpl {
648 648
649 int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *, 649 int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
650 void *, uint8_t *, uint16_t); 650 void *, uint8_t *, uint16_t);
651 void (*update_sess)(struct qla_tgt_sess *, port_id_t, uint16_t, bool);
651 struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *, 652 struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *,
652 const uint16_t); 653 const uint16_t);
653 struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *, 654 struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *,
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 4752f65a9272..3d74f2f39ae1 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -237,7 +237,7 @@ static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
237 struct tcm_qla2xxx_tpg, se_tpg); 237 struct tcm_qla2xxx_tpg, se_tpg);
238 struct tcm_qla2xxx_lport *lport = tpg->lport; 238 struct tcm_qla2xxx_lport *lport = tpg->lport;
239 239
240 return &lport->lport_name[0]; 240 return lport->lport_naa_name;
241} 241}
242 242
243static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg) 243static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
@@ -735,17 +735,6 @@ static int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
735 return 0; 735 return 0;
736} 736}
737 737
738static u16 tcm_qla2xxx_get_fabric_sense_len(void)
739{
740 return 0;
741}
742
743static u16 tcm_qla2xxx_set_fabric_sense_len(struct se_cmd *se_cmd,
744 u32 sense_length)
745{
746 return 0;
747}
748
749/* Local pointer to allocated TCM configfs fabric module */ 738/* Local pointer to allocated TCM configfs fabric module */
750struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; 739struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
751struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; 740struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
@@ -1468,6 +1457,78 @@ static int tcm_qla2xxx_check_initiator_node_acl(
1468 return 0; 1457 return 0;
1469} 1458}
1470 1459
1460static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
1461 uint16_t loop_id, bool conf_compl_supported)
1462{
1463 struct qla_tgt *tgt = sess->tgt;
1464 struct qla_hw_data *ha = tgt->ha;
1465 struct tcm_qla2xxx_lport *lport = ha->tgt.target_lport_ptr;
1466 struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
1467 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
1468 struct tcm_qla2xxx_nacl, se_node_acl);
1469 u32 key;
1470
1471
1472 if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24)
1473 pr_info("Updating session %p from port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n",
1474 sess,
1475 sess->port_name[0], sess->port_name[1],
1476 sess->port_name[2], sess->port_name[3],
1477 sess->port_name[4], sess->port_name[5],
1478 sess->port_name[6], sess->port_name[7],
1479 sess->loop_id, loop_id,
1480 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
1481 s_id.b.domain, s_id.b.area, s_id.b.al_pa);
1482
1483 if (sess->loop_id != loop_id) {
1484 /*
1485 * Because we can shuffle loop IDs around and we
1486 * update different sessions non-atomically, we might
1487 * have overwritten this session's old loop ID
1488 * already, and we might end up overwriting some other
1489 * session that will be updated later. So we have to
1490 * be extra careful and we can't warn about those things...
1491 */
1492 if (lport->lport_loopid_map[sess->loop_id].se_nacl == se_nacl)
1493 lport->lport_loopid_map[sess->loop_id].se_nacl = NULL;
1494
1495 lport->lport_loopid_map[loop_id].se_nacl = se_nacl;
1496
1497 sess->loop_id = loop_id;
1498 }
1499
1500 if (sess->s_id.b24 != s_id.b24) {
1501 key = (((u32) sess->s_id.b.domain << 16) |
1502 ((u32) sess->s_id.b.area << 8) |
1503 ((u32) sess->s_id.b.al_pa));
1504
1505 if (btree_lookup32(&lport->lport_fcport_map, key))
1506 WARN(btree_remove32(&lport->lport_fcport_map, key) != se_nacl,
1507 "Found wrong se_nacl when updating s_id %x:%x:%x\n",
1508 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa);
1509 else
1510 WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n",
1511 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa);
1512
1513 key = (((u32) s_id.b.domain << 16) |
1514 ((u32) s_id.b.area << 8) |
1515 ((u32) s_id.b.al_pa));
1516
1517 if (btree_lookup32(&lport->lport_fcport_map, key)) {
1518 WARN(1, "Already have lport_fcport_map entry for s_id %x:%x:%x\n",
1519 s_id.b.domain, s_id.b.area, s_id.b.al_pa);
1520 btree_update32(&lport->lport_fcport_map, key, se_nacl);
1521 } else {
1522 btree_insert32(&lport->lport_fcport_map, key, se_nacl, GFP_ATOMIC);
1523 }
1524
1525 sess->s_id = s_id;
1526 nacl->nport_id = key;
1527 }
1528
1529 sess->conf_compl_supported = conf_compl_supported;
1530}
1531
1471/* 1532/*
1472 * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path. 1533 * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
1473 */ 1534 */
@@ -1478,6 +1539,7 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
1478 .free_cmd = tcm_qla2xxx_free_cmd, 1539 .free_cmd = tcm_qla2xxx_free_cmd,
1479 .free_mcmd = tcm_qla2xxx_free_mcmd, 1540 .free_mcmd = tcm_qla2xxx_free_mcmd,
1480 .free_session = tcm_qla2xxx_free_session, 1541 .free_session = tcm_qla2xxx_free_session,
1542 .update_sess = tcm_qla2xxx_update_sess,
1481 .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl, 1543 .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl,
1482 .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id, 1544 .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id,
1483 .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id, 1545 .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id,
@@ -1545,6 +1607,7 @@ static struct se_wwn *tcm_qla2xxx_make_lport(
1545 lport->lport_wwpn = wwpn; 1607 lport->lport_wwpn = wwpn;
1546 tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN, 1608 tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN,
1547 wwpn); 1609 wwpn);
1610 sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) wwpn);
1548 1611
1549 ret = tcm_qla2xxx_init_lport(lport); 1612 ret = tcm_qla2xxx_init_lport(lport);
1550 if (ret != 0) 1613 if (ret != 0)
@@ -1612,6 +1675,7 @@ static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
1612 lport->lport_npiv_wwnn = npiv_wwnn; 1675 lport->lport_npiv_wwnn = npiv_wwnn;
1613 tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0], 1676 tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
1614 TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn); 1677 TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
1678 sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn);
1615 1679
1616/* FIXME: tcm_qla2xxx_npiv_make_lport */ 1680/* FIXME: tcm_qla2xxx_npiv_make_lport */
1617 ret = -ENOSYS; 1681 ret = -ENOSYS;
@@ -1691,8 +1755,6 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
1691 .queue_data_in = tcm_qla2xxx_queue_data_in, 1755 .queue_data_in = tcm_qla2xxx_queue_data_in,
1692 .queue_status = tcm_qla2xxx_queue_status, 1756 .queue_status = tcm_qla2xxx_queue_status,
1693 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, 1757 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
1694 .get_fabric_sense_len = tcm_qla2xxx_get_fabric_sense_len,
1695 .set_fabric_sense_len = tcm_qla2xxx_set_fabric_sense_len,
1696 /* 1758 /*
1697 * Setup function pointers for generic logic in 1759 * Setup function pointers for generic logic in
1698 * target_core_fabric_configfs.c 1760 * target_core_fabric_configfs.c
@@ -1740,8 +1802,6 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
1740 .queue_data_in = tcm_qla2xxx_queue_data_in, 1802 .queue_data_in = tcm_qla2xxx_queue_data_in,
1741 .queue_status = tcm_qla2xxx_queue_status, 1803 .queue_status = tcm_qla2xxx_queue_status,
1742 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, 1804 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
1743 .get_fabric_sense_len = tcm_qla2xxx_get_fabric_sense_len,
1744 .set_fabric_sense_len = tcm_qla2xxx_set_fabric_sense_len,
1745 /* 1805 /*
1746 * Setup function pointers for generic logic in 1806 * Setup function pointers for generic logic in
1747 * target_core_fabric_configfs.c 1807 * target_core_fabric_configfs.c
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 825498103352..9ba075fe9781 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -61,6 +61,8 @@ struct tcm_qla2xxx_lport {
61 u64 lport_npiv_wwnn; 61 u64 lport_npiv_wwnn;
62 /* ASCII formatted WWPN for FC Target Lport */ 62 /* ASCII formatted WWPN for FC Target Lport */
63 char lport_name[TCM_QLA2XXX_NAMELEN]; 63 char lport_name[TCM_QLA2XXX_NAMELEN];
64 /* ASCII formatted naa WWPN for VPD page 83 etc */
65 char lport_naa_name[TCM_QLA2XXX_NAMELEN];
64 /* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */ 66 /* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */
65 char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN]; 67 char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];
66 /* map for fc_port pointers in 24-bit FC Port ID space */ 68 /* map for fc_port pointers in 24-bit FC Port ID space */
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 57fbd5a3d4e2..5cda11c07c68 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -2055,7 +2055,7 @@ static void unmap_region(sector_t lba, unsigned int len)
2055 block = lba + alignment; 2055 block = lba + alignment;
2056 rem = do_div(block, granularity); 2056 rem = do_div(block, granularity);
2057 2057
2058 if (rem == 0 && lba + granularity <= end && block < map_size) { 2058 if (rem == 0 && lba + granularity < end && block < map_size) {
2059 clear_bit(block, map_storep); 2059 clear_bit(block, map_storep);
2060 if (scsi_debug_lbprz) 2060 if (scsi_debug_lbprz)
2061 memset(fake_storep + 2061 memset(fake_storep +
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index de2337f255a7..c1b05a83d403 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -789,7 +789,6 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
789 int cmnd_size, int timeout, unsigned sense_bytes) 789 int cmnd_size, int timeout, unsigned sense_bytes)
790{ 790{
791 struct scsi_device *sdev = scmd->device; 791 struct scsi_device *sdev = scmd->device;
792 struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
793 struct Scsi_Host *shost = sdev->host; 792 struct Scsi_Host *shost = sdev->host;
794 DECLARE_COMPLETION_ONSTACK(done); 793 DECLARE_COMPLETION_ONSTACK(done);
795 unsigned long timeleft; 794 unsigned long timeleft;
@@ -845,8 +844,11 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
845 844
846 scsi_eh_restore_cmnd(scmd, &ses); 845 scsi_eh_restore_cmnd(scmd, &ses);
847 846
848 if (sdrv && sdrv->eh_action) 847 if (scmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
849 rtn = sdrv->eh_action(scmd, cmnd, cmnd_size, rtn); 848 struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
849 if (sdrv->eh_action)
850 rtn = sdrv->eh_action(scmd, cmnd, cmnd_size, rtn);
851 }
850 852
851 return rtn; 853 return rtn;
852} 854}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 528d52beaa1c..01440782feb2 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1221,7 +1221,12 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
1221 /* 1221 /*
1222 * At this point, all outstanding requests in the adapter 1222 * At this point, all outstanding requests in the adapter
1223 * should have been flushed out and return to us 1223 * should have been flushed out and return to us
1224 * There is a potential race here where the host may be in
1225 * the process of responding when we return from here.
1226 * Just wait for all in-transit packets to be accounted for
1227 * before we return from here.
1224 */ 1228 */
1229 storvsc_wait_to_drain(stor_device);
1225 1230
1226 return SUCCESS; 1231 return SUCCESS;
1227} 1232}
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 3e79a2f00042..595af1ae4421 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -219,7 +219,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
219 struct scatterlist sg; 219 struct scatterlist sg;
220 unsigned long flags; 220 unsigned long flags;
221 221
222 sg_set_buf(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); 222 sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
223 223
224 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); 224 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
225 225
@@ -279,6 +279,31 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
279 } 279 }
280} 280}
281 281
282static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
283 struct virtio_scsi_event *event)
284{
285 struct scsi_device *sdev;
286 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
287 unsigned int target = event->lun[1];
288 unsigned int lun = (event->lun[2] << 8) | event->lun[3];
289 u8 asc = event->reason & 255;
290 u8 ascq = event->reason >> 8;
291
292 sdev = scsi_device_lookup(shost, 0, target, lun);
293 if (!sdev) {
294 pr_err("SCSI device %d 0 %d %d not found\n",
295 shost->host_no, target, lun);
296 return;
297 }
298
299 /* Handle "Parameters changed", "Mode parameters changed", and
300 "Capacity data has changed". */
301 if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
302 scsi_rescan_device(&sdev->sdev_gendev);
303
304 scsi_device_put(sdev);
305}
306
282static void virtscsi_handle_event(struct work_struct *work) 307static void virtscsi_handle_event(struct work_struct *work)
283{ 308{
284 struct virtio_scsi_event_node *event_node = 309 struct virtio_scsi_event_node *event_node =
@@ -297,6 +322,9 @@ static void virtscsi_handle_event(struct work_struct *work)
297 case VIRTIO_SCSI_T_TRANSPORT_RESET: 322 case VIRTIO_SCSI_T_TRANSPORT_RESET:
298 virtscsi_handle_transport_reset(vscsi, event); 323 virtscsi_handle_transport_reset(vscsi, event);
299 break; 324 break;
325 case VIRTIO_SCSI_T_PARAM_CHANGE:
326 virtscsi_handle_param_change(vscsi, event);
327 break;
300 default: 328 default:
301 pr_err("Unsupport virtio scsi event %x\n", event->event); 329 pr_err("Unsupport virtio scsi event %x\n", event->event);
302 } 330 }
@@ -677,7 +705,11 @@ static int __devinit virtscsi_probe(struct virtio_device *vdev)
677 cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; 705 cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
678 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); 706 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
679 shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; 707 shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
680 shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1; 708
709 /* LUNs > 256 are reported with format 1, so they go in the range
710 * 16640-32767.
711 */
712 shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
681 shost->max_id = num_targets; 713 shost->max_id = num_targets;
682 shost->max_channel = 0; 714 shost->max_channel = 0;
683 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; 715 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
@@ -733,7 +765,8 @@ static struct virtio_device_id id_table[] = {
733}; 765};
734 766
735static unsigned int features[] = { 767static unsigned int features[] = {
736 VIRTIO_SCSI_F_HOTPLUG 768 VIRTIO_SCSI_F_HOTPLUG,
769 VIRTIO_SCSI_F_CHANGE,
737}; 770};
738 771
739static struct virtio_driver virtio_scsi_driver = { 772static struct virtio_driver virtio_scsi_driver = {
diff --git a/drivers/sh/intc/access.c b/drivers/sh/intc/access.c
index f892ae1d212a..114390f967d2 100644
--- a/drivers/sh/intc/access.c
+++ b/drivers/sh/intc/access.c
@@ -75,54 +75,61 @@ unsigned long intc_get_field_from_handle(unsigned int value, unsigned int handle
75static unsigned long test_8(unsigned long addr, unsigned long h, 75static unsigned long test_8(unsigned long addr, unsigned long h,
76 unsigned long ignore) 76 unsigned long ignore)
77{ 77{
78 return intc_get_field_from_handle(__raw_readb(addr), h); 78 void __iomem *ptr = (void __iomem *)addr;
79 return intc_get_field_from_handle(__raw_readb(ptr), h);
79} 80}
80 81
81static unsigned long test_16(unsigned long addr, unsigned long h, 82static unsigned long test_16(unsigned long addr, unsigned long h,
82 unsigned long ignore) 83 unsigned long ignore)
83{ 84{
84 return intc_get_field_from_handle(__raw_readw(addr), h); 85 void __iomem *ptr = (void __iomem *)addr;
86 return intc_get_field_from_handle(__raw_readw(ptr), h);
85} 87}
86 88
87static unsigned long test_32(unsigned long addr, unsigned long h, 89static unsigned long test_32(unsigned long addr, unsigned long h,
88 unsigned long ignore) 90 unsigned long ignore)
89{ 91{
90 return intc_get_field_from_handle(__raw_readl(addr), h); 92 void __iomem *ptr = (void __iomem *)addr;
93 return intc_get_field_from_handle(__raw_readl(ptr), h);
91} 94}
92 95
93static unsigned long write_8(unsigned long addr, unsigned long h, 96static unsigned long write_8(unsigned long addr, unsigned long h,
94 unsigned long data) 97 unsigned long data)
95{ 98{
96 __raw_writeb(intc_set_field_from_handle(0, data, h), addr); 99 void __iomem *ptr = (void __iomem *)addr;
97 (void)__raw_readb(addr); /* Defeat write posting */ 100 __raw_writeb(intc_set_field_from_handle(0, data, h), ptr);
101 (void)__raw_readb(ptr); /* Defeat write posting */
98 return 0; 102 return 0;
99} 103}
100 104
101static unsigned long write_16(unsigned long addr, unsigned long h, 105static unsigned long write_16(unsigned long addr, unsigned long h,
102 unsigned long data) 106 unsigned long data)
103{ 107{
104 __raw_writew(intc_set_field_from_handle(0, data, h), addr); 108 void __iomem *ptr = (void __iomem *)addr;
105 (void)__raw_readw(addr); /* Defeat write posting */ 109 __raw_writew(intc_set_field_from_handle(0, data, h), ptr);
110 (void)__raw_readw(ptr); /* Defeat write posting */
106 return 0; 111 return 0;
107} 112}
108 113
109static unsigned long write_32(unsigned long addr, unsigned long h, 114static unsigned long write_32(unsigned long addr, unsigned long h,
110 unsigned long data) 115 unsigned long data)
111{ 116{
112 __raw_writel(intc_set_field_from_handle(0, data, h), addr); 117 void __iomem *ptr = (void __iomem *)addr;
113 (void)__raw_readl(addr); /* Defeat write posting */ 118 __raw_writel(intc_set_field_from_handle(0, data, h), ptr);
119 (void)__raw_readl(ptr); /* Defeat write posting */
114 return 0; 120 return 0;
115} 121}
116 122
117static unsigned long modify_8(unsigned long addr, unsigned long h, 123static unsigned long modify_8(unsigned long addr, unsigned long h,
118 unsigned long data) 124 unsigned long data)
119{ 125{
126 void __iomem *ptr = (void __iomem *)addr;
120 unsigned long flags; 127 unsigned long flags;
121 unsigned int value; 128 unsigned int value;
122 local_irq_save(flags); 129 local_irq_save(flags);
123 value = intc_set_field_from_handle(__raw_readb(addr), data, h); 130 value = intc_set_field_from_handle(__raw_readb(ptr), data, h);
124 __raw_writeb(value, addr); 131 __raw_writeb(value, ptr);
125 (void)__raw_readb(addr); /* Defeat write posting */ 132 (void)__raw_readb(ptr); /* Defeat write posting */
126 local_irq_restore(flags); 133 local_irq_restore(flags);
127 return 0; 134 return 0;
128} 135}
@@ -130,12 +137,13 @@ static unsigned long modify_8(unsigned long addr, unsigned long h,
130static unsigned long modify_16(unsigned long addr, unsigned long h, 137static unsigned long modify_16(unsigned long addr, unsigned long h,
131 unsigned long data) 138 unsigned long data)
132{ 139{
140 void __iomem *ptr = (void __iomem *)addr;
133 unsigned long flags; 141 unsigned long flags;
134 unsigned int value; 142 unsigned int value;
135 local_irq_save(flags); 143 local_irq_save(flags);
136 value = intc_set_field_from_handle(__raw_readw(addr), data, h); 144 value = intc_set_field_from_handle(__raw_readw(ptr), data, h);
137 __raw_writew(value, addr); 145 __raw_writew(value, ptr);
138 (void)__raw_readw(addr); /* Defeat write posting */ 146 (void)__raw_readw(ptr); /* Defeat write posting */
139 local_irq_restore(flags); 147 local_irq_restore(flags);
140 return 0; 148 return 0;
141} 149}
@@ -143,12 +151,13 @@ static unsigned long modify_16(unsigned long addr, unsigned long h,
143static unsigned long modify_32(unsigned long addr, unsigned long h, 151static unsigned long modify_32(unsigned long addr, unsigned long h,
144 unsigned long data) 152 unsigned long data)
145{ 153{
154 void __iomem *ptr = (void __iomem *)addr;
146 unsigned long flags; 155 unsigned long flags;
147 unsigned int value; 156 unsigned int value;
148 local_irq_save(flags); 157 local_irq_save(flags);
149 value = intc_set_field_from_handle(__raw_readl(addr), data, h); 158 value = intc_set_field_from_handle(__raw_readl(ptr), data, h);
150 __raw_writel(value, addr); 159 __raw_writel(value, ptr);
151 (void)__raw_readl(addr); /* Defeat write posting */ 160 (void)__raw_readl(ptr); /* Defeat write posting */
152 local_irq_restore(flags); 161 local_irq_restore(flags);
153 return 0; 162 return 0;
154} 163}
diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c
index 012df2676a26..46427b48e2f1 100644
--- a/drivers/sh/intc/chip.c
+++ b/drivers/sh/intc/chip.c
@@ -83,7 +83,7 @@ static void intc_mask_ack(struct irq_data *data)
83 unsigned int irq = data->irq; 83 unsigned int irq = data->irq;
84 struct intc_desc_int *d = get_intc_desc(irq); 84 struct intc_desc_int *d = get_intc_desc(irq);
85 unsigned long handle = intc_get_ack_handle(irq); 85 unsigned long handle = intc_get_ack_handle(irq);
86 unsigned long addr; 86 void __iomem *addr;
87 87
88 intc_disable(data); 88 intc_disable(data);
89 89
@@ -91,7 +91,7 @@ static void intc_mask_ack(struct irq_data *data)
91 if (handle) { 91 if (handle) {
92 unsigned int value; 92 unsigned int value;
93 93
94 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0); 94 addr = (void __iomem *)INTC_REG(d, _INTC_ADDR_D(handle), 0);
95 value = intc_set_field_from_handle(0, 1, handle); 95 value = intc_set_field_from_handle(0, 1, handle);
96 96
97 switch (_INTC_FN(handle)) { 97 switch (_INTC_FN(handle)) {
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 8c2ff2490d99..1acae359cabe 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -134,6 +134,7 @@ config SPI_DAVINCI
134 tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller" 134 tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller"
135 depends on ARCH_DAVINCI 135 depends on ARCH_DAVINCI
136 select SPI_BITBANG 136 select SPI_BITBANG
137 select TI_EDMA
137 help 138 help
138 SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. 139 SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
139 140
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 3afe2f4f5b8e..147dfa87a64b 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -25,13 +25,14 @@
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/clk.h> 27#include <linux/clk.h>
28#include <linux/dmaengine.h>
28#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
30#include <linux/edma.h>
29#include <linux/spi/spi.h> 31#include <linux/spi/spi.h>
30#include <linux/spi/spi_bitbang.h> 32#include <linux/spi/spi_bitbang.h>
31#include <linux/slab.h> 33#include <linux/slab.h>
32 34
33#include <linux/platform_data/spi-davinci.h> 35#include <linux/platform_data/spi-davinci.h>
34#include <mach/edma.h>
35 36
36#define SPI_NO_RESOURCE ((resource_size_t)-1) 37#define SPI_NO_RESOURCE ((resource_size_t)-1)
37 38
@@ -113,14 +114,6 @@
113#define SPIDEF 0x4c 114#define SPIDEF 0x4c
114#define SPIFMT0 0x50 115#define SPIFMT0 0x50
115 116
116/* We have 2 DMA channels per CS, one for RX and one for TX */
117struct davinci_spi_dma {
118 int tx_channel;
119 int rx_channel;
120 int dummy_param_slot;
121 enum dma_event_q eventq;
122};
123
124/* SPI Controller driver's private data. */ 117/* SPI Controller driver's private data. */
125struct davinci_spi { 118struct davinci_spi {
126 struct spi_bitbang bitbang; 119 struct spi_bitbang bitbang;
@@ -134,11 +127,14 @@ struct davinci_spi {
134 127
135 const void *tx; 128 const void *tx;
136 void *rx; 129 void *rx;
137#define SPI_TMP_BUFSZ (SMP_CACHE_BYTES + 1)
138 u8 rx_tmp_buf[SPI_TMP_BUFSZ];
139 int rcount; 130 int rcount;
140 int wcount; 131 int wcount;
141 struct davinci_spi_dma dma; 132
133 struct dma_chan *dma_rx;
134 struct dma_chan *dma_tx;
135 int dma_rx_chnum;
136 int dma_tx_chnum;
137
142 struct davinci_spi_platform_data *pdata; 138 struct davinci_spi_platform_data *pdata;
143 139
144 void (*get_rx)(u32 rx_data, struct davinci_spi *); 140 void (*get_rx)(u32 rx_data, struct davinci_spi *);
@@ -496,21 +492,23 @@ out:
496 return errors; 492 return errors;
497} 493}
498 494
499static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data) 495static void davinci_spi_dma_rx_callback(void *data)
500{ 496{
501 struct davinci_spi *dspi = data; 497 struct davinci_spi *dspi = (struct davinci_spi *)data;
502 struct davinci_spi_dma *dma = &dspi->dma;
503 498
504 edma_stop(lch); 499 dspi->rcount = 0;
505 500
506 if (status == DMA_COMPLETE) { 501 if (!dspi->wcount && !dspi->rcount)
507 if (lch == dma->rx_channel) 502 complete(&dspi->done);
508 dspi->rcount = 0; 503}
509 if (lch == dma->tx_channel)
510 dspi->wcount = 0;
511 }
512 504
513 if ((!dspi->wcount && !dspi->rcount) || (status != DMA_COMPLETE)) 505static void davinci_spi_dma_tx_callback(void *data)
506{
507 struct davinci_spi *dspi = (struct davinci_spi *)data;
508
509 dspi->wcount = 0;
510
511 if (!dspi->wcount && !dspi->rcount)
514 complete(&dspi->done); 512 complete(&dspi->done);
515} 513}
516 514
@@ -526,20 +524,20 @@ static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data)
526static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) 524static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
527{ 525{
528 struct davinci_spi *dspi; 526 struct davinci_spi *dspi;
529 int data_type, ret; 527 int data_type, ret = -ENOMEM;
530 u32 tx_data, spidat1; 528 u32 tx_data, spidat1;
531 u32 errors = 0; 529 u32 errors = 0;
532 struct davinci_spi_config *spicfg; 530 struct davinci_spi_config *spicfg;
533 struct davinci_spi_platform_data *pdata; 531 struct davinci_spi_platform_data *pdata;
534 unsigned uninitialized_var(rx_buf_count); 532 unsigned uninitialized_var(rx_buf_count);
535 struct device *sdev; 533 void *dummy_buf = NULL;
534 struct scatterlist sg_rx, sg_tx;
536 535
537 dspi = spi_master_get_devdata(spi->master); 536 dspi = spi_master_get_devdata(spi->master);
538 pdata = dspi->pdata; 537 pdata = dspi->pdata;
539 spicfg = (struct davinci_spi_config *)spi->controller_data; 538 spicfg = (struct davinci_spi_config *)spi->controller_data;
540 if (!spicfg) 539 if (!spicfg)
541 spicfg = &davinci_spi_default_cfg; 540 spicfg = &davinci_spi_default_cfg;
542 sdev = dspi->bitbang.master->dev.parent;
543 541
544 /* convert len to words based on bits_per_word */ 542 /* convert len to words based on bits_per_word */
545 data_type = dspi->bytes_per_word[spi->chip_select]; 543 data_type = dspi->bytes_per_word[spi->chip_select];
@@ -567,112 +565,83 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
567 spidat1 |= tx_data & 0xFFFF; 565 spidat1 |= tx_data & 0xFFFF;
568 iowrite32(spidat1, dspi->base + SPIDAT1); 566 iowrite32(spidat1, dspi->base + SPIDAT1);
569 } else { 567 } else {
570 struct davinci_spi_dma *dma; 568 struct dma_slave_config dma_rx_conf = {
571 unsigned long tx_reg, rx_reg; 569 .direction = DMA_DEV_TO_MEM,
572 struct edmacc_param param; 570 .src_addr = (unsigned long)dspi->pbase + SPIBUF,
573 void *rx_buf; 571 .src_addr_width = data_type,
574 int b, c; 572 .src_maxburst = 1,
575 573 };
576 dma = &dspi->dma; 574 struct dma_slave_config dma_tx_conf = {
577 575 .direction = DMA_MEM_TO_DEV,
578 tx_reg = (unsigned long)dspi->pbase + SPIDAT1; 576 .dst_addr = (unsigned long)dspi->pbase + SPIDAT1,
579 rx_reg = (unsigned long)dspi->pbase + SPIBUF; 577 .dst_addr_width = data_type,
580 578 .dst_maxburst = 1,
581 /* 579 };
582 * Transmit DMA setup 580 struct dma_async_tx_descriptor *rxdesc;
583 * 581 struct dma_async_tx_descriptor *txdesc;
584 * If there is transmit data, map the transmit buffer, set it 582 void *buf;
585 * as the source of data and set the source B index to data 583
586 * size. If there is no transmit data, set the transmit register 584 dummy_buf = kzalloc(t->len, GFP_KERNEL);
587 * as the source of data, and set the source B index to zero. 585 if (!dummy_buf)
588 * 586 goto err_alloc_dummy_buf;
589 * The destination is always the transmit register itself. And 587
590 * the destination never increments. 588 dmaengine_slave_config(dspi->dma_rx, &dma_rx_conf);
591 */ 589 dmaengine_slave_config(dspi->dma_tx, &dma_tx_conf);
592 590
593 if (t->tx_buf) { 591 sg_init_table(&sg_rx, 1);
594 t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, 592 if (!t->rx_buf)
595 t->len, DMA_TO_DEVICE); 593 buf = dummy_buf;
596 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
597 dev_dbg(sdev, "Unable to DMA map %d bytes"
598 "TX buffer\n", t->len);
599 return -ENOMEM;
600 }
601 }
602
603 /*
604 * If number of words is greater than 65535, then we need
605 * to configure a 3 dimension transfer. Use the BCNTRLD
606 * feature to allow for transfers that aren't even multiples
607 * of 65535 (or any other possible b size) by first transferring
608 * the remainder amount then grabbing the next N blocks of
609 * 65535 words.
610 */
611
612 c = dspi->wcount / (SZ_64K - 1); /* N 65535 Blocks */
613 b = dspi->wcount - c * (SZ_64K - 1); /* Remainder */
614 if (b)
615 c++;
616 else 594 else
617 b = SZ_64K - 1; 595 buf = t->rx_buf;
618 596 t->rx_dma = dma_map_single(&spi->dev, buf,
619 param.opt = TCINTEN | EDMA_TCC(dma->tx_channel); 597 t->len, DMA_FROM_DEVICE);
620 param.src = t->tx_buf ? t->tx_dma : tx_reg; 598 if (!t->rx_dma) {
621 param.a_b_cnt = b << 16 | data_type; 599 ret = -EFAULT;
622 param.dst = tx_reg; 600 goto err_rx_map;
623 param.src_dst_bidx = t->tx_buf ? data_type : 0;
624 param.link_bcntrld = 0xffffffff;
625 param.src_dst_cidx = t->tx_buf ? data_type : 0;
626 param.ccnt = c;
627 edma_write_slot(dma->tx_channel, &param);
628 edma_link(dma->tx_channel, dma->dummy_param_slot);
629
630 /*
631 * Receive DMA setup
632 *
633 * If there is receive buffer, use it to receive data. If there
634 * is none provided, use a temporary receive buffer. Set the
635 * destination B index to 0 so effectively only one byte is used
636 * in the temporary buffer (address does not increment).
637 *
638 * The source of receive data is the receive data register. The
639 * source address never increments.
640 */
641
642 if (t->rx_buf) {
643 rx_buf = t->rx_buf;
644 rx_buf_count = t->len;
645 } else {
646 rx_buf = dspi->rx_tmp_buf;
647 rx_buf_count = sizeof(dspi->rx_tmp_buf);
648 } 601 }
602 sg_dma_address(&sg_rx) = t->rx_dma;
603 sg_dma_len(&sg_rx) = t->len;
649 604
650 t->rx_dma = dma_map_single(&spi->dev, rx_buf, rx_buf_count, 605 sg_init_table(&sg_tx, 1);
651 DMA_FROM_DEVICE); 606 if (!t->tx_buf)
652 if (dma_mapping_error(&spi->dev, t->rx_dma)) { 607 buf = dummy_buf;
653 dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", 608 else
654 rx_buf_count); 609 buf = (void *)t->tx_buf;
655 if (t->tx_buf) 610 t->tx_dma = dma_map_single(&spi->dev, buf,
656 dma_unmap_single(&spi->dev, t->tx_dma, t->len, 611 t->len, DMA_FROM_DEVICE);
657 DMA_TO_DEVICE); 612 if (!t->tx_dma) {
658 return -ENOMEM; 613 ret = -EFAULT;
614 goto err_tx_map;
659 } 615 }
660 616 sg_dma_address(&sg_tx) = t->tx_dma;
661 param.opt = TCINTEN | EDMA_TCC(dma->rx_channel); 617 sg_dma_len(&sg_tx) = t->len;
662 param.src = rx_reg; 618
663 param.a_b_cnt = b << 16 | data_type; 619 rxdesc = dmaengine_prep_slave_sg(dspi->dma_rx,
664 param.dst = t->rx_dma; 620 &sg_rx, 1, DMA_DEV_TO_MEM,
665 param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16; 621 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
666 param.link_bcntrld = 0xffffffff; 622 if (!rxdesc)
667 param.src_dst_cidx = (t->rx_buf ? data_type : 0) << 16; 623 goto err_desc;
668 param.ccnt = c; 624
669 edma_write_slot(dma->rx_channel, &param); 625 txdesc = dmaengine_prep_slave_sg(dspi->dma_tx,
626 &sg_tx, 1, DMA_MEM_TO_DEV,
627 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
628 if (!txdesc)
629 goto err_desc;
630
631 rxdesc->callback = davinci_spi_dma_rx_callback;
632 rxdesc->callback_param = (void *)dspi;
633 txdesc->callback = davinci_spi_dma_tx_callback;
634 txdesc->callback_param = (void *)dspi;
670 635
671 if (pdata->cshold_bug) 636 if (pdata->cshold_bug)
672 iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2); 637 iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2);
673 638
674 edma_start(dma->rx_channel); 639 dmaengine_submit(rxdesc);
675 edma_start(dma->tx_channel); 640 dmaengine_submit(txdesc);
641
642 dma_async_issue_pending(dspi->dma_rx);
643 dma_async_issue_pending(dspi->dma_tx);
644
676 set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); 645 set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
677 } 646 }
678 647
@@ -690,15 +659,13 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
690 659
691 clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL); 660 clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL);
692 if (spicfg->io_type == SPI_IO_TYPE_DMA) { 661 if (spicfg->io_type == SPI_IO_TYPE_DMA) {
693
694 if (t->tx_buf)
695 dma_unmap_single(&spi->dev, t->tx_dma, t->len,
696 DMA_TO_DEVICE);
697
698 dma_unmap_single(&spi->dev, t->rx_dma, rx_buf_count,
699 DMA_FROM_DEVICE);
700
701 clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); 662 clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
663
664 dma_unmap_single(&spi->dev, t->rx_dma,
665 t->len, DMA_FROM_DEVICE);
666 dma_unmap_single(&spi->dev, t->tx_dma,
667 t->len, DMA_TO_DEVICE);
668 kfree(dummy_buf);
702 } 669 }
703 670
704 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); 671 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
@@ -716,11 +683,20 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
716 } 683 }
717 684
718 if (dspi->rcount != 0 || dspi->wcount != 0) { 685 if (dspi->rcount != 0 || dspi->wcount != 0) {
719 dev_err(sdev, "SPI data transfer error\n"); 686 dev_err(&spi->dev, "SPI data transfer error\n");
720 return -EIO; 687 return -EIO;
721 } 688 }
722 689
723 return t->len; 690 return t->len;
691
692err_desc:
693 dma_unmap_single(&spi->dev, t->tx_dma, t->len, DMA_TO_DEVICE);
694err_tx_map:
695 dma_unmap_single(&spi->dev, t->rx_dma, t->len, DMA_FROM_DEVICE);
696err_rx_map:
697 kfree(dummy_buf);
698err_alloc_dummy_buf:
699 return ret;
724} 700}
725 701
726/** 702/**
@@ -751,39 +727,33 @@ static irqreturn_t davinci_spi_irq(s32 irq, void *data)
751 727
752static int davinci_spi_request_dma(struct davinci_spi *dspi) 728static int davinci_spi_request_dma(struct davinci_spi *dspi)
753{ 729{
730 dma_cap_mask_t mask;
731 struct device *sdev = dspi->bitbang.master->dev.parent;
754 int r; 732 int r;
755 struct davinci_spi_dma *dma = &dspi->dma;
756 733
757 r = edma_alloc_channel(dma->rx_channel, davinci_spi_dma_callback, dspi, 734 dma_cap_zero(mask);
758 dma->eventq); 735 dma_cap_set(DMA_SLAVE, mask);
759 if (r < 0) { 736
760 pr_err("Unable to request DMA channel for SPI RX\n"); 737 dspi->dma_rx = dma_request_channel(mask, edma_filter_fn,
761 r = -EAGAIN; 738 &dspi->dma_rx_chnum);
739 if (!dspi->dma_rx) {
740 dev_err(sdev, "request RX DMA channel failed\n");
741 r = -ENODEV;
762 goto rx_dma_failed; 742 goto rx_dma_failed;
763 } 743 }
764 744
765 r = edma_alloc_channel(dma->tx_channel, davinci_spi_dma_callback, dspi, 745 dspi->dma_tx = dma_request_channel(mask, edma_filter_fn,
766 dma->eventq); 746 &dspi->dma_tx_chnum);
767 if (r < 0) { 747 if (!dspi->dma_tx) {
768 pr_err("Unable to request DMA channel for SPI TX\n"); 748 dev_err(sdev, "request TX DMA channel failed\n");
769 r = -EAGAIN; 749 r = -ENODEV;
770 goto tx_dma_failed; 750 goto tx_dma_failed;
771 } 751 }
772 752
773 r = edma_alloc_slot(EDMA_CTLR(dma->tx_channel), EDMA_SLOT_ANY);
774 if (r < 0) {
775 pr_err("Unable to request SPI TX DMA param slot\n");
776 r = -EAGAIN;
777 goto param_failed;
778 }
779 dma->dummy_param_slot = r;
780 edma_link(dma->dummy_param_slot, dma->dummy_param_slot);
781
782 return 0; 753 return 0;
783param_failed: 754
784 edma_free_channel(dma->tx_channel);
785tx_dma_failed: 755tx_dma_failed:
786 edma_free_channel(dma->rx_channel); 756 dma_release_channel(dspi->dma_rx);
787rx_dma_failed: 757rx_dma_failed:
788 return r; 758 return r;
789} 759}
@@ -898,9 +868,8 @@ static int __devinit davinci_spi_probe(struct platform_device *pdev)
898 dspi->bitbang.txrx_bufs = davinci_spi_bufs; 868 dspi->bitbang.txrx_bufs = davinci_spi_bufs;
899 if (dma_rx_chan != SPI_NO_RESOURCE && 869 if (dma_rx_chan != SPI_NO_RESOURCE &&
900 dma_tx_chan != SPI_NO_RESOURCE) { 870 dma_tx_chan != SPI_NO_RESOURCE) {
901 dspi->dma.rx_channel = dma_rx_chan; 871 dspi->dma_rx_chnum = dma_rx_chan;
902 dspi->dma.tx_channel = dma_tx_chan; 872 dspi->dma_tx_chnum = dma_tx_chan;
903 dspi->dma.eventq = pdata->dma_event_q;
904 873
905 ret = davinci_spi_request_dma(dspi); 874 ret = davinci_spi_request_dma(dspi);
906 if (ret) 875 if (ret)
@@ -955,9 +924,8 @@ static int __devinit davinci_spi_probe(struct platform_device *pdev)
955 return ret; 924 return ret;
956 925
957free_dma: 926free_dma:
958 edma_free_channel(dspi->dma.tx_channel); 927 dma_release_channel(dspi->dma_rx);
959 edma_free_channel(dspi->dma.rx_channel); 928 dma_release_channel(dspi->dma_tx);
960 edma_free_slot(dspi->dma.dummy_param_slot);
961free_clk: 929free_clk:
962 clk_disable(dspi->clk); 930 clk_disable(dspi->clk);
963 clk_put(dspi->clk); 931 clk_put(dspi->clk);
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index edf1360ab09e..86dd04d6bc87 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -323,6 +323,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
323 if (!ret) { 323 if (!ret) {
324 dev_err(ssp->dev, "DMA transfer timeout\n"); 324 dev_err(ssp->dev, "DMA transfer timeout\n");
325 ret = -ETIMEDOUT; 325 ret = -ETIMEDOUT;
326 dmaengine_terminate_all(ssp->dmach);
326 goto err_vmalloc; 327 goto err_vmalloc;
327 } 328 }
328 329
@@ -480,7 +481,7 @@ static int mxs_spi_transfer_one(struct spi_master *master,
480 first = last = 0; 481 first = last = 0;
481 } 482 }
482 483
483 m->status = 0; 484 m->status = status;
484 spi_finalize_current_message(master); 485 spi_finalize_current_message(master);
485 486
486 return status; 487 return status;
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 919464102d33..a1db91a99b89 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -2186,8 +2186,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2186 printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n", 2186 printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
2187 adev->res.start, pl022->virtbase); 2187 adev->res.start, pl022->virtbase);
2188 2188
2189 pm_runtime_resume(dev);
2190
2191 pl022->clk = devm_clk_get(&adev->dev, NULL); 2189 pl022->clk = devm_clk_get(&adev->dev, NULL);
2192 if (IS_ERR(pl022->clk)) { 2190 if (IS_ERR(pl022->clk)) {
2193 status = PTR_ERR(pl022->clk); 2191 status = PTR_ERR(pl022->clk);
@@ -2292,7 +2290,6 @@ pl022_remove(struct amba_device *adev)
2292 2290
2293 clk_disable(pl022->clk); 2291 clk_disable(pl022->clk);
2294 clk_unprepare(pl022->clk); 2292 clk_unprepare(pl022->clk);
2295 pm_runtime_disable(&adev->dev);
2296 amba_release_regions(adev); 2293 amba_release_regions(adev);
2297 tasklet_disable(&pl022->pump_transfers); 2294 tasklet_disable(&pl022->pump_transfers);
2298 spi_unregister_master(pl022->master); 2295 spi_unregister_master(pl022->master);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 4894bde4bbff..30faf6d4ab91 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -147,8 +147,6 @@ struct rspi_data {
147 unsigned char spsr; 147 unsigned char spsr;
148 148
149 /* for dmaengine */ 149 /* for dmaengine */
150 struct sh_dmae_slave dma_tx;
151 struct sh_dmae_slave dma_rx;
152 struct dma_chan *chan_tx; 150 struct dma_chan *chan_tx;
153 struct dma_chan *chan_rx; 151 struct dma_chan *chan_rx;
154 int irq; 152 int irq;
@@ -663,20 +661,16 @@ static irqreturn_t rspi_irq(int irq, void *_sr)
663 return ret; 661 return ret;
664} 662}
665 663
666static bool rspi_filter(struct dma_chan *chan, void *filter_param) 664static int __devinit rspi_request_dma(struct rspi_data *rspi,
667{ 665 struct platform_device *pdev)
668 chan->private = filter_param;
669 return true;
670}
671
672static void __devinit rspi_request_dma(struct rspi_data *rspi,
673 struct platform_device *pdev)
674{ 666{
675 struct rspi_plat_data *rspi_pd = pdev->dev.platform_data; 667 struct rspi_plat_data *rspi_pd = pdev->dev.platform_data;
676 dma_cap_mask_t mask; 668 dma_cap_mask_t mask;
669 struct dma_slave_config cfg;
670 int ret;
677 671
678 if (!rspi_pd) 672 if (!rspi_pd)
679 return; 673 return 0; /* The driver assumes no error. */
680 674
681 rspi->dma_width_16bit = rspi_pd->dma_width_16bit; 675 rspi->dma_width_16bit = rspi_pd->dma_width_16bit;
682 676
@@ -684,21 +678,35 @@ static void __devinit rspi_request_dma(struct rspi_data *rspi,
684 if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) { 678 if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) {
685 dma_cap_zero(mask); 679 dma_cap_zero(mask);
686 dma_cap_set(DMA_SLAVE, mask); 680 dma_cap_set(DMA_SLAVE, mask);
687 rspi->dma_rx.slave_id = rspi_pd->dma_rx_id; 681 rspi->chan_rx = dma_request_channel(mask, shdma_chan_filter,
688 rspi->chan_rx = dma_request_channel(mask, rspi_filter, 682 (void *)rspi_pd->dma_rx_id);
689 &rspi->dma_rx); 683 if (rspi->chan_rx) {
690 if (rspi->chan_rx) 684 cfg.slave_id = rspi_pd->dma_rx_id;
691 dev_info(&pdev->dev, "Use DMA when rx.\n"); 685 cfg.direction = DMA_DEV_TO_MEM;
686 ret = dmaengine_slave_config(rspi->chan_rx, &cfg);
687 if (!ret)
688 dev_info(&pdev->dev, "Use DMA when rx.\n");
689 else
690 return ret;
691 }
692 } 692 }
693 if (rspi_pd->dma_tx_id) { 693 if (rspi_pd->dma_tx_id) {
694 dma_cap_zero(mask); 694 dma_cap_zero(mask);
695 dma_cap_set(DMA_SLAVE, mask); 695 dma_cap_set(DMA_SLAVE, mask);
696 rspi->dma_tx.slave_id = rspi_pd->dma_tx_id; 696 rspi->chan_tx = dma_request_channel(mask, shdma_chan_filter,
697 rspi->chan_tx = dma_request_channel(mask, rspi_filter, 697 (void *)rspi_pd->dma_tx_id);
698 &rspi->dma_tx); 698 if (rspi->chan_tx) {
699 if (rspi->chan_tx) 699 cfg.slave_id = rspi_pd->dma_tx_id;
700 dev_info(&pdev->dev, "Use DMA when tx\n"); 700 cfg.direction = DMA_MEM_TO_DEV;
701 ret = dmaengine_slave_config(rspi->chan_tx, &cfg);
702 if (!ret)
703 dev_info(&pdev->dev, "Use DMA when tx\n");
704 else
705 return ret;
706 }
701 } 707 }
708
709 return 0;
702} 710}
703 711
704static void __devexit rspi_release_dma(struct rspi_data *rspi) 712static void __devexit rspi_release_dma(struct rspi_data *rspi)
@@ -788,7 +796,11 @@ static int __devinit rspi_probe(struct platform_device *pdev)
788 } 796 }
789 797
790 rspi->irq = irq; 798 rspi->irq = irq;
791 rspi_request_dma(rspi, pdev); 799 ret = rspi_request_dma(rspi, pdev);
800 if (ret < 0) {
801 dev_err(&pdev->dev, "rspi_request_dma failed.\n");
802 goto error4;
803 }
792 804
793 ret = spi_register_master(master); 805 ret = spi_register_master(master);
794 if (ret < 0) { 806 if (ret < 0) {
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 1a81c90a4a71..6e7a805d324d 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -132,7 +132,7 @@
132 132
133struct s3c64xx_spi_dma_data { 133struct s3c64xx_spi_dma_data {
134 unsigned ch; 134 unsigned ch;
135 enum dma_data_direction direction; 135 enum dma_transfer_direction direction;
136 enum dma_ch dmach; 136 enum dma_ch dmach;
137 struct property *dma_prop; 137 struct property *dma_prop;
138}; 138};
@@ -1067,11 +1067,11 @@ static int __devinit s3c64xx_spi_get_dmares(
1067 1067
1068 if (tx) { 1068 if (tx) {
1069 dma_data = &sdd->tx_dma; 1069 dma_data = &sdd->tx_dma;
1070 dma_data->direction = DMA_TO_DEVICE; 1070 dma_data->direction = DMA_MEM_TO_DEV;
1071 chan_str = "tx"; 1071 chan_str = "tx";
1072 } else { 1072 } else {
1073 dma_data = &sdd->rx_dma; 1073 dma_data = &sdd->rx_dma;
1074 dma_data->direction = DMA_FROM_DEVICE; 1074 dma_data->direction = DMA_DEV_TO_MEM;
1075 chan_str = "rx"; 1075 chan_str = "rx";
1076 } 1076 }
1077 1077
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 7b0ba92e7e46..5d4610babd8a 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -567,7 +567,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
567 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 567 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
568 568
569 BUG_ON(*page); 569 BUG_ON(*page);
570 *page = alloc_page(GFP_KERNEL | __GFP_ZERO); 570 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
571 if (*page == NULL) { 571 if (*page == NULL) {
572 pr_err("binder: %d: binder_alloc_buf failed " 572 pr_err("binder: %d: binder_alloc_buf failed "
573 "for page at %p\n", proc->pid, page_addr); 573 "for page at %p\n", proc->pid, page_addr);
@@ -2419,14 +2419,38 @@ static void binder_release_work(struct list_head *list)
2419 struct binder_transaction *t; 2419 struct binder_transaction *t;
2420 2420
2421 t = container_of(w, struct binder_transaction, work); 2421 t = container_of(w, struct binder_transaction, work);
2422 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) 2422 if (t->buffer->target_node &&
2423 !(t->flags & TF_ONE_WAY)) {
2423 binder_send_failed_reply(t, BR_DEAD_REPLY); 2424 binder_send_failed_reply(t, BR_DEAD_REPLY);
2425 } else {
2426 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2427 "binder: undelivered transaction %d\n",
2428 t->debug_id);
2429 t->buffer->transaction = NULL;
2430 kfree(t);
2431 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2432 }
2424 } break; 2433 } break;
2425 case BINDER_WORK_TRANSACTION_COMPLETE: { 2434 case BINDER_WORK_TRANSACTION_COMPLETE: {
2435 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2436 "binder: undelivered TRANSACTION_COMPLETE\n");
2426 kfree(w); 2437 kfree(w);
2427 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2438 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2428 } break; 2439 } break;
2440 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2441 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2442 struct binder_ref_death *death;
2443
2444 death = container_of(w, struct binder_ref_death, work);
2445 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2446 "binder: undelivered death notification, %p\n",
2447 death->cookie);
2448 kfree(death);
2449 binder_stats_deleted(BINDER_STAT_DEATH);
2450 } break;
2429 default: 2451 default:
2452 pr_err("binder: unexpected work type, %d, not freed\n",
2453 w->type);
2430 break; 2454 break;
2431 } 2455 }
2432 } 2456 }
@@ -2899,6 +2923,7 @@ static void binder_deferred_release(struct binder_proc *proc)
2899 nodes++; 2923 nodes++;
2900 rb_erase(&node->rb_node, &proc->nodes); 2924 rb_erase(&node->rb_node, &proc->nodes);
2901 list_del_init(&node->work.entry); 2925 list_del_init(&node->work.entry);
2926 binder_release_work(&node->async_todo);
2902 if (hlist_empty(&node->refs)) { 2927 if (hlist_empty(&node->refs)) {
2903 kfree(node); 2928 kfree(node);
2904 binder_stats_deleted(BINDER_STAT_NODE); 2929 binder_stats_deleted(BINDER_STAT_NODE);
@@ -2937,6 +2962,7 @@ static void binder_deferred_release(struct binder_proc *proc)
2937 binder_delete_ref(ref); 2962 binder_delete_ref(ref);
2938 } 2963 }
2939 binder_release_work(&proc->todo); 2964 binder_release_work(&proc->todo);
2965 binder_release_work(&proc->delivered_death);
2940 buffers = 0; 2966 buffers = 0;
2941 2967
2942 while ((n = rb_first(&proc->allocated_buffers))) { 2968 while ((n = rb_first(&proc->allocated_buffers))) {
diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
index 7dff3c01dc29..d00aff6671df 100644
--- a/drivers/staging/comedi/drivers/8255_pci.c
+++ b/drivers/staging/comedi/drivers/8255_pci.c
@@ -289,6 +289,8 @@ static void pci_8255_detach(struct comedi_device *dev)
289 struct comedi_subdevice *s; 289 struct comedi_subdevice *s;
290 int i; 290 int i;
291 291
292 if (!board || !devpriv)
293 return;
292 if (dev->subdevices) { 294 if (dev->subdevices) {
293 for (i = 0; i < board->n_8255; i++) { 295 for (i = 0; i < board->n_8255; i++) {
294 s = &dev->subdevices[i]; 296 s = &dev->subdevices[i];
diff --git a/drivers/staging/comedi/drivers/amplc_dio200.c b/drivers/staging/comedi/drivers/amplc_dio200.c
index 08f305210a69..29eb52d11d2f 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200.c
@@ -1410,6 +1410,8 @@ static void dio200_detach(struct comedi_device *dev)
1410 const struct dio200_layout_struct *layout; 1410 const struct dio200_layout_struct *layout;
1411 unsigned n; 1411 unsigned n;
1412 1412
1413 if (!thisboard)
1414 return;
1413 if (dev->irq) 1415 if (dev->irq)
1414 free_irq(dev->irq, dev); 1416 free_irq(dev->irq, dev);
1415 if (dev->subdevices) { 1417 if (dev->subdevices) {
diff --git a/drivers/staging/comedi/drivers/amplc_pc236.c b/drivers/staging/comedi/drivers/amplc_pc236.c
index eacb5e4735d7..4e4f3c15df87 100644
--- a/drivers/staging/comedi/drivers/amplc_pc236.c
+++ b/drivers/staging/comedi/drivers/amplc_pc236.c
@@ -573,9 +573,10 @@ static int __devinit pc236_attach_pci(struct comedi_device *dev,
573static void pc236_detach(struct comedi_device *dev) 573static void pc236_detach(struct comedi_device *dev)
574{ 574{
575 const struct pc236_board *thisboard = comedi_board(dev); 575 const struct pc236_board *thisboard = comedi_board(dev);
576 struct pc236_private *devpriv = dev->private;
577 576
578 if (devpriv) 577 if (!thisboard)
578 return;
579 if (dev->iobase)
579 pc236_intr_disable(dev); 580 pc236_intr_disable(dev);
580 if (dev->irq) 581 if (dev->irq)
581 free_irq(dev->irq, dev); 582 free_irq(dev->irq, dev);
diff --git a/drivers/staging/comedi/drivers/amplc_pc263.c b/drivers/staging/comedi/drivers/amplc_pc263.c
index 60830ccfb903..d0a4c441228b 100644
--- a/drivers/staging/comedi/drivers/amplc_pc263.c
+++ b/drivers/staging/comedi/drivers/amplc_pc263.c
@@ -323,6 +323,8 @@ static void pc263_detach(struct comedi_device *dev)
323{ 323{
324 const struct pc263_board *thisboard = comedi_board(dev); 324 const struct pc263_board *thisboard = comedi_board(dev);
325 325
326 if (!thisboard)
327 return;
326 if (is_isa_board(thisboard)) { 328 if (is_isa_board(thisboard)) {
327 if (dev->iobase) 329 if (dev->iobase)
328 release_region(dev->iobase, PC263_IO_SIZE); 330 release_region(dev->iobase, PC263_IO_SIZE);
diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
index 5fd21fa6c1c7..c304528cfb13 100644
--- a/drivers/staging/comedi/drivers/das08.c
+++ b/drivers/staging/comedi/drivers/das08.c
@@ -846,6 +846,8 @@ static void __maybe_unused das08_detach(struct comedi_device *dev)
846{ 846{
847 const struct das08_board_struct *thisboard = comedi_board(dev); 847 const struct das08_board_struct *thisboard = comedi_board(dev);
848 848
849 if (!thisboard)
850 return;
849 das08_common_detach(dev); 851 das08_common_detach(dev);
850 if (is_isa_board(thisboard)) { 852 if (is_isa_board(thisboard)) {
851 if (dev->iobase) 853 if (dev->iobase)
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index 2ba0ade45c64..68d7c6a5db7d 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -95,7 +95,7 @@ static int daq700_dio_insn_bits(struct comedi_device *dev,
95 } 95 }
96 96
97 data[1] = s->state & 0xff; 97 data[1] = s->state & 0xff;
98 data[1] |= inb(dev->iobase + DIO_R); 98 data[1] |= inb(dev->iobase + DIO_R) << 8;
99 99
100 return insn->n; 100 return insn->n;
101} 101}
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index 28b91a6c3789..b5a19a0863fb 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -772,6 +772,8 @@ void labpc_common_detach(struct comedi_device *dev)
772{ 772{
773 struct comedi_subdevice *s; 773 struct comedi_subdevice *s;
774 774
775 if (!thisboard)
776 return;
775 if (dev->subdevices) { 777 if (dev->subdevices) {
776 s = &dev->subdevices[2]; 778 s = &dev->subdevices[2];
777 subdev_8255_cleanup(dev, s); 779 subdev_8255_cleanup(dev, s);
diff --git a/drivers/staging/dgrp/dgrp_mon_ops.c b/drivers/staging/dgrp/dgrp_mon_ops.c
index 268dcb95204b..4792d056a365 100644
--- a/drivers/staging/dgrp/dgrp_mon_ops.c
+++ b/drivers/staging/dgrp/dgrp_mon_ops.c
@@ -38,6 +38,7 @@
38#include <linux/sched.h> 38#include <linux/sched.h>
39#include <asm/unaligned.h> 39#include <asm/unaligned.h>
40#include <linux/proc_fs.h> 40#include <linux/proc_fs.h>
41#include <linux/uaccess.h>
41 42
42#include "dgrp_common.h" 43#include "dgrp_common.h"
43 44
diff --git a/drivers/staging/dgrp/dgrp_specproc.c b/drivers/staging/dgrp/dgrp_specproc.c
index 28f5c9ab6b43..24327c3bad83 100644
--- a/drivers/staging/dgrp/dgrp_specproc.c
+++ b/drivers/staging/dgrp/dgrp_specproc.c
@@ -39,6 +39,7 @@
39#include <linux/proc_fs.h> 39#include <linux/proc_fs.h>
40#include <linux/ctype.h> 40#include <linux/ctype.h>
41#include <linux/seq_file.h> 41#include <linux/seq_file.h>
42#include <linux/uaccess.h>
42#include <linux/vmalloc.h> 43#include <linux/vmalloc.h>
43 44
44#include "dgrp_common.h" 45#include "dgrp_common.h"
@@ -228,6 +229,9 @@ static void register_proc_table(struct dgrp_proc_entry *table,
228 int len; 229 int len;
229 mode_t mode; 230 mode_t mode;
230 231
232 if (table == NULL)
233 return;
234
231 for (; table->id; table++) { 235 for (; table->id; table++) {
232 /* Can't do anything without a proc name. */ 236 /* Can't do anything without a proc name. */
233 if (!table->name) 237 if (!table->name)
@@ -296,6 +300,9 @@ static void unregister_proc_table(struct dgrp_proc_entry *table,
296 struct proc_dir_entry *de; 300 struct proc_dir_entry *de;
297 struct nd_struct *tmp; 301 struct nd_struct *tmp;
298 302
303 if (table == NULL)
304 return;
305
299 list_for_each_entry(tmp, &nd_struct_list, list) { 306 list_for_each_entry(tmp, &nd_struct_list, list) {
300 if ((table == dgrp_net_table) && (tmp->nd_net_de)) { 307 if ((table == dgrp_net_table) && (tmp->nd_net_de)) {
301 unregister_dgrp_device(tmp->nd_net_de); 308 unregister_dgrp_device(tmp->nd_net_de);
diff --git a/drivers/staging/dgrp/dgrp_tty.c b/drivers/staging/dgrp/dgrp_tty.c
index 7d7de873870c..e125b03598d7 100644
--- a/drivers/staging/dgrp/dgrp_tty.c
+++ b/drivers/staging/dgrp/dgrp_tty.c
@@ -40,6 +40,7 @@
40#include <linux/tty.h> 40#include <linux/tty.h>
41#include <linux/tty_flip.h> 41#include <linux/tty_flip.h>
42#include <linux/sched.h> 42#include <linux/sched.h>
43#include <linux/uaccess.h>
43 44
44#include "dgrp_common.h" 45#include "dgrp_common.h"
45 46
@@ -3172,6 +3173,9 @@ dgrp_tty_init(struct nd_struct *nd)
3172 */ 3173 */
3173 3174
3174 nd->nd_serial_ttdriver = alloc_tty_driver(CHAN_MAX); 3175 nd->nd_serial_ttdriver = alloc_tty_driver(CHAN_MAX);
3176 if (!nd->nd_serial_ttdriver)
3177 return -ENOMEM;
3178
3175 sprintf(nd->nd_serial_name, "tty_dgrp_%s_", id); 3179 sprintf(nd->nd_serial_name, "tty_dgrp_%s_", id);
3176 3180
3177 nd->nd_serial_ttdriver->owner = THIS_MODULE; 3181 nd->nd_serial_ttdriver->owner = THIS_MODULE;
@@ -3231,6 +3235,9 @@ dgrp_tty_init(struct nd_struct *nd)
3231 } 3235 }
3232 3236
3233 nd->nd_callout_ttdriver = alloc_tty_driver(CHAN_MAX); 3237 nd->nd_callout_ttdriver = alloc_tty_driver(CHAN_MAX);
3238 if (!nd->nd_callout_ttdriver)
3239 return -ENOMEM;
3240
3234 sprintf(nd->nd_callout_name, "cu_dgrp_%s_", id); 3241 sprintf(nd->nd_callout_name, "cu_dgrp_%s_", id);
3235 3242
3236 nd->nd_callout_ttdriver->owner = THIS_MODULE; 3243 nd->nd_callout_ttdriver->owner = THIS_MODULE;
@@ -3268,6 +3275,9 @@ dgrp_tty_init(struct nd_struct *nd)
3268 3275
3269 3276
3270 nd->nd_xprint_ttdriver = alloc_tty_driver(CHAN_MAX); 3277 nd->nd_xprint_ttdriver = alloc_tty_driver(CHAN_MAX);
3278 if (!nd->nd_xprint_ttdriver)
3279 return -ENOMEM;
3280
3271 sprintf(nd->nd_xprint_name, "pr_dgrp_%s_", id); 3281 sprintf(nd->nd_xprint_name, "pr_dgrp_%s_", id);
3272 3282
3273 nd->nd_xprint_ttdriver->owner = THIS_MODULE; 3283 nd->nd_xprint_ttdriver->owner = THIS_MODULE;
diff --git a/drivers/staging/iio/accel/adis16201_core.c b/drivers/staging/iio/accel/adis16201_core.c
index 8e37d6e04277..b12ca68cd9e4 100644
--- a/drivers/staging/iio/accel/adis16201_core.c
+++ b/drivers/staging/iio/accel/adis16201_core.c
@@ -310,30 +310,32 @@ static int adis16201_read_raw(struct iio_dev *indio_dev,
310 case IIO_CHAN_INFO_SCALE: 310 case IIO_CHAN_INFO_SCALE:
311 switch (chan->type) { 311 switch (chan->type) {
312 case IIO_VOLTAGE: 312 case IIO_VOLTAGE:
313 *val = 0; 313 if (chan->channel == 0) {
314 if (chan->channel == 0) 314 *val = 1;
315 *val2 = 1220; 315 *val2 = 220000; /* 1.22 mV */
316 else 316 } else {
317 *val2 = 610; 317 *val = 0;
318 *val2 = 610000; /* 0.610 mV */
319 }
318 return IIO_VAL_INT_PLUS_MICRO; 320 return IIO_VAL_INT_PLUS_MICRO;
319 case IIO_TEMP: 321 case IIO_TEMP:
320 *val = 0; 322 *val = -470; /* 0.47 C */
321 *val2 = -470000; 323 *val2 = 0;
322 return IIO_VAL_INT_PLUS_MICRO; 324 return IIO_VAL_INT_PLUS_MICRO;
323 case IIO_ACCEL: 325 case IIO_ACCEL:
324 *val = 0; 326 *val = 0;
325 *val2 = 462500; 327 *val2 = IIO_G_TO_M_S_2(462400); /* 0.4624 mg */
326 return IIO_VAL_INT_PLUS_MICRO; 328 return IIO_VAL_INT_PLUS_NANO;
327 case IIO_INCLI: 329 case IIO_INCLI:
328 *val = 0; 330 *val = 0;
329 *val2 = 100000; 331 *val2 = 100000; /* 0.1 degree */
330 return IIO_VAL_INT_PLUS_MICRO; 332 return IIO_VAL_INT_PLUS_MICRO;
331 default: 333 default:
332 return -EINVAL; 334 return -EINVAL;
333 } 335 }
334 break; 336 break;
335 case IIO_CHAN_INFO_OFFSET: 337 case IIO_CHAN_INFO_OFFSET:
336 *val = 25; 338 *val = 25000 / -470 - 1278; /* 25 C = 1278 */
337 return IIO_VAL_INT; 339 return IIO_VAL_INT;
338 case IIO_CHAN_INFO_CALIBBIAS: 340 case IIO_CHAN_INFO_CALIBBIAS:
339 switch (chan->type) { 341 switch (chan->type) {
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c
index 002fa9dfc375..e7b3441115ae 100644
--- a/drivers/staging/iio/accel/adis16203_core.c
+++ b/drivers/staging/iio/accel/adis16203_core.c
@@ -316,25 +316,27 @@ static int adis16203_read_raw(struct iio_dev *indio_dev,
316 case IIO_CHAN_INFO_SCALE: 316 case IIO_CHAN_INFO_SCALE:
317 switch (chan->type) { 317 switch (chan->type) {
318 case IIO_VOLTAGE: 318 case IIO_VOLTAGE:
319 *val = 0; 319 if (chan->channel == 0) {
320 if (chan->channel == 0) 320 *val = 1;
321 *val2 = 1220; 321 *val2 = 220000; /* 1.22 mV */
322 else 322 } else {
323 *val2 = 610; 323 *val = 0;
324 *val2 = 610000; /* 0.61 mV */
325 }
324 return IIO_VAL_INT_PLUS_MICRO; 326 return IIO_VAL_INT_PLUS_MICRO;
325 case IIO_TEMP: 327 case IIO_TEMP:
326 *val = 0; 328 *val = -470; /* -0.47 C */
327 *val2 = -470000; 329 *val2 = 0;
328 return IIO_VAL_INT_PLUS_MICRO; 330 return IIO_VAL_INT_PLUS_MICRO;
329 case IIO_INCLI: 331 case IIO_INCLI:
330 *val = 0; 332 *val = 0;
331 *val2 = 25000; 333 *val2 = 25000; /* 0.025 degree */
332 return IIO_VAL_INT_PLUS_MICRO; 334 return IIO_VAL_INT_PLUS_MICRO;
333 default: 335 default:
334 return -EINVAL; 336 return -EINVAL;
335 } 337 }
336 case IIO_CHAN_INFO_OFFSET: 338 case IIO_CHAN_INFO_OFFSET:
337 *val = 25; 339 *val = 25000 / -470 - 1278; /* 25 C = 1278 */
338 return IIO_VAL_INT; 340 return IIO_VAL_INT;
339 case IIO_CHAN_INFO_CALIBBIAS: 341 case IIO_CHAN_INFO_CALIBBIAS:
340 bits = 14; 342 bits = 14;
diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c
index 05bdb7c2c8e3..c6234c2f46aa 100644
--- a/drivers/staging/iio/accel/adis16204_core.c
+++ b/drivers/staging/iio/accel/adis16204_core.c
@@ -317,26 +317,28 @@ static int adis16204_read_raw(struct iio_dev *indio_dev,
317 case IIO_CHAN_INFO_SCALE: 317 case IIO_CHAN_INFO_SCALE:
318 switch (chan->type) { 318 switch (chan->type) {
319 case IIO_VOLTAGE: 319 case IIO_VOLTAGE:
320 *val = 0; 320 if (chan->channel == 0) {
321 if (chan->channel == 0) 321 *val = 1;
322 *val2 = 1220; 322 *val2 = 220000; /* 1.22 mV */
323 else 323 } else {
324 *val2 = 610; 324 *val = 0;
325 *val2 = 610000; /* 0.61 mV */
326 }
325 return IIO_VAL_INT_PLUS_MICRO; 327 return IIO_VAL_INT_PLUS_MICRO;
326 case IIO_TEMP: 328 case IIO_TEMP:
327 *val = 0; 329 *val = -470; /* 0.47 C */
328 *val2 = -470000; 330 *val2 = 0;
329 return IIO_VAL_INT_PLUS_MICRO; 331 return IIO_VAL_INT_PLUS_MICRO;
330 case IIO_ACCEL: 332 case IIO_ACCEL:
331 *val = 0; 333 *val = 0;
332 switch (chan->channel2) { 334 switch (chan->channel2) {
333 case IIO_MOD_X: 335 case IIO_MOD_X:
334 case IIO_MOD_ROOT_SUM_SQUARED_X_Y: 336 case IIO_MOD_ROOT_SUM_SQUARED_X_Y:
335 *val2 = 17125; 337 *val2 = IIO_G_TO_M_S_2(17125); /* 17.125 mg */
336 break; 338 break;
337 case IIO_MOD_Y: 339 case IIO_MOD_Y:
338 case IIO_MOD_Z: 340 case IIO_MOD_Z:
339 *val2 = 8407; 341 *val2 = IIO_G_TO_M_S_2(8407); /* 8.407 mg */
340 break; 342 break;
341 } 343 }
342 return IIO_VAL_INT_PLUS_MICRO; 344 return IIO_VAL_INT_PLUS_MICRO;
@@ -345,7 +347,7 @@ static int adis16204_read_raw(struct iio_dev *indio_dev,
345 } 347 }
346 break; 348 break;
347 case IIO_CHAN_INFO_OFFSET: 349 case IIO_CHAN_INFO_OFFSET:
348 *val = 25; 350 *val = 25000 / -470 - 1278; /* 25 C = 1278 */
349 return IIO_VAL_INT; 351 return IIO_VAL_INT;
350 case IIO_CHAN_INFO_CALIBBIAS: 352 case IIO_CHAN_INFO_CALIBBIAS:
351 case IIO_CHAN_INFO_PEAK: 353 case IIO_CHAN_INFO_PEAK:
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c
index b7333bfe0b2f..7ee974b45d7d 100644
--- a/drivers/staging/iio/accel/adis16209_core.c
+++ b/drivers/staging/iio/accel/adis16209_core.c
@@ -343,28 +343,29 @@ static int adis16209_read_raw(struct iio_dev *indio_dev,
343 case IIO_VOLTAGE: 343 case IIO_VOLTAGE:
344 *val = 0; 344 *val = 0;
345 if (chan->channel == 0) 345 if (chan->channel == 0)
346 *val2 = 305180; 346 *val2 = 305180; /* 0.30518 mV */
347 else 347 else
348 *val2 = 610500; 348 *val2 = 610500; /* 0.6105 mV */
349 return IIO_VAL_INT_PLUS_MICRO; 349 return IIO_VAL_INT_PLUS_MICRO;
350 case IIO_TEMP: 350 case IIO_TEMP:
351 *val = 0; 351 *val = -470; /* -0.47 C */
352 *val2 = -470000; 352 *val2 = 0;
353 return IIO_VAL_INT_PLUS_MICRO; 353 return IIO_VAL_INT_PLUS_MICRO;
354 case IIO_ACCEL: 354 case IIO_ACCEL:
355 *val = 0; 355 *val = 0;
356 *val2 = 2394; 356 *val2 = IIO_G_TO_M_S_2(244140); /* 0.244140 mg */
357 return IIO_VAL_INT_PLUS_MICRO; 357 return IIO_VAL_INT_PLUS_NANO;
358 case IIO_INCLI: 358 case IIO_INCLI:
359 case IIO_ROT:
359 *val = 0; 360 *val = 0;
360 *val2 = 436; 361 *val2 = 25000; /* 0.025 degree */
361 return IIO_VAL_INT_PLUS_MICRO; 362 return IIO_VAL_INT_PLUS_MICRO;
362 default: 363 default:
363 return -EINVAL; 364 return -EINVAL;
364 } 365 }
365 break; 366 break;
366 case IIO_CHAN_INFO_OFFSET: 367 case IIO_CHAN_INFO_OFFSET:
367 *val = 25; 368 *val = 25000 / -470 - 0x4FE; /* 25 C = 0x4FE */
368 return IIO_VAL_INT; 369 return IIO_VAL_INT;
369 case IIO_CHAN_INFO_CALIBBIAS: 370 case IIO_CHAN_INFO_CALIBBIAS:
370 switch (chan->type) { 371 switch (chan->type) {
@@ -491,6 +492,7 @@ static const struct iio_chan_spec adis16209_channels[] = {
491 .modified = 1, 492 .modified = 1,
492 .channel2 = IIO_MOD_X, 493 .channel2 = IIO_MOD_X,
493 .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT, 494 .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT,
495 IIO_CHAN_INFO_SCALE_SHARED_BIT,
494 .address = rot, 496 .address = rot,
495 .scan_index = ADIS16209_SCAN_ROT, 497 .scan_index = ADIS16209_SCAN_ROT,
496 .scan_type = { 498 .scan_type = {
diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
index c755089c7117..eaadd9df3f78 100644
--- a/drivers/staging/iio/accel/adis16220_core.c
+++ b/drivers/staging/iio/accel/adis16220_core.c
@@ -486,7 +486,7 @@ static int adis16220_read_raw(struct iio_dev *indio_dev,
486 break; 486 break;
487 case IIO_CHAN_INFO_OFFSET: 487 case IIO_CHAN_INFO_OFFSET:
488 if (chan->type == IIO_TEMP) { 488 if (chan->type == IIO_TEMP) {
489 *val = 25; 489 *val = 25000 / -470 - 1278; /* 25 C = 1278 */
490 return IIO_VAL_INT; 490 return IIO_VAL_INT;
491 } 491 }
492 addrind = 1; 492 addrind = 1;
@@ -495,19 +495,22 @@ static int adis16220_read_raw(struct iio_dev *indio_dev,
495 addrind = 2; 495 addrind = 2;
496 break; 496 break;
497 case IIO_CHAN_INFO_SCALE: 497 case IIO_CHAN_INFO_SCALE:
498 *val = 0;
499 switch (chan->type) { 498 switch (chan->type) {
500 case IIO_TEMP: 499 case IIO_TEMP:
501 *val2 = -470000; 500 *val = -470; /* -0.47 C */
501 *val2 = 0;
502 return IIO_VAL_INT_PLUS_MICRO; 502 return IIO_VAL_INT_PLUS_MICRO;
503 case IIO_ACCEL: 503 case IIO_ACCEL:
504 *val2 = 1887042; 504 *val2 = IIO_G_TO_M_S_2(19073); /* 19.073 g */
505 return IIO_VAL_INT_PLUS_MICRO; 505 return IIO_VAL_INT_PLUS_MICRO;
506 case IIO_VOLTAGE: 506 case IIO_VOLTAGE:
507 if (chan->channel == 0) 507 if (chan->channel == 0) {
508 *val2 = 0012221; 508 *val = 1;
509 else /* Should really be dependent on VDD */ 509 *val2 = 220700; /* 1.2207 mV */
510 *val2 = 305; 510 } else {
511 /* Should really be dependent on VDD */
512 *val2 = 305180; /* 305.18 uV */
513 }
511 return IIO_VAL_INT_PLUS_MICRO; 514 return IIO_VAL_INT_PLUS_MICRO;
512 default: 515 default:
513 return -EINVAL; 516 return -EINVAL;
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c
index 0fc26a49d681..35e093973d5c 100644
--- a/drivers/staging/iio/accel/adis16240_core.c
+++ b/drivers/staging/iio/accel/adis16240_core.c
@@ -373,30 +373,31 @@ static int adis16240_read_raw(struct iio_dev *indio_dev,
373 case IIO_CHAN_INFO_SCALE: 373 case IIO_CHAN_INFO_SCALE:
374 switch (chan->type) { 374 switch (chan->type) {
375 case IIO_VOLTAGE: 375 case IIO_VOLTAGE:
376 *val = 0; 376 if (chan->channel == 0) {
377 if (chan->channel == 0) 377 *val = 4;
378 *val2 = 4880; 378 *val2 = 880000; /* 4.88 mV */
379 else 379 return IIO_VAL_INT_PLUS_MICRO;
380 } else {
380 return -EINVAL; 381 return -EINVAL;
381 return IIO_VAL_INT_PLUS_MICRO; 382 }
382 case IIO_TEMP: 383 case IIO_TEMP:
383 *val = 0; 384 *val = 244; /* 0.244 C */
384 *val2 = 244000; 385 *val2 = 0;
385 return IIO_VAL_INT_PLUS_MICRO; 386 return IIO_VAL_INT_PLUS_MICRO;
386 case IIO_ACCEL: 387 case IIO_ACCEL:
387 *val = 0; 388 *val = 0;
388 *val2 = 504062; 389 *val2 = IIO_G_TO_M_S_2(51400); /* 51.4 mg */
389 return IIO_VAL_INT_PLUS_MICRO; 390 return IIO_VAL_INT_PLUS_MICRO;
390 default: 391 default:
391 return -EINVAL; 392 return -EINVAL;
392 } 393 }
393 break; 394 break;
394 case IIO_CHAN_INFO_PEAK_SCALE: 395 case IIO_CHAN_INFO_PEAK_SCALE:
395 *val = 6; 396 *val = 0;
396 *val2 = 629295; 397 *val2 = IIO_G_TO_M_S_2(51400); /* 51.4 mg */
397 return IIO_VAL_INT_PLUS_MICRO; 398 return IIO_VAL_INT_PLUS_MICRO;
398 case IIO_CHAN_INFO_OFFSET: 399 case IIO_CHAN_INFO_OFFSET:
399 *val = 25; 400 *val = 25000 / 244 - 0x133; /* 25 C = 0x133 */
400 return IIO_VAL_INT; 401 return IIO_VAL_INT;
401 case IIO_CHAN_INFO_CALIBBIAS: 402 case IIO_CHAN_INFO_CALIBBIAS:
402 bits = 10; 403 bits = 10;
diff --git a/drivers/staging/iio/gyro/adis16260_core.c b/drivers/staging/iio/gyro/adis16260_core.c
index 9571c03aa4cc..aa964a2d8290 100644
--- a/drivers/staging/iio/gyro/adis16260_core.c
+++ b/drivers/staging/iio/gyro/adis16260_core.c
@@ -498,28 +498,33 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
498 switch (chan->type) { 498 switch (chan->type) {
499 case IIO_ANGL_VEL: 499 case IIO_ANGL_VEL:
500 *val = 0; 500 *val = 0;
501 if (spi_get_device_id(st->us)->driver_data) 501 if (spi_get_device_id(st->us)->driver_data) {
502 *val2 = 320; 502 /* 0.01832 degree / sec */
503 else 503 *val2 = IIO_DEGREE_TO_RAD(18320);
504 *val2 = 1278; 504 } else {
505 /* 0.07326 degree / sec */
506 *val2 = IIO_DEGREE_TO_RAD(73260);
507 }
505 return IIO_VAL_INT_PLUS_MICRO; 508 return IIO_VAL_INT_PLUS_MICRO;
506 case IIO_VOLTAGE: 509 case IIO_VOLTAGE:
507 *val = 0; 510 if (chan->channel == 0) {
508 if (chan->channel == 0) 511 *val = 1;
509 *val2 = 18315; 512 *val2 = 831500; /* 1.8315 mV */
510 else 513 } else {
511 *val2 = 610500; 514 *val = 0;
515 *val2 = 610500; /* 610.5 uV */
516 }
512 return IIO_VAL_INT_PLUS_MICRO; 517 return IIO_VAL_INT_PLUS_MICRO;
513 case IIO_TEMP: 518 case IIO_TEMP:
514 *val = 0; 519 *val = 145;
515 *val2 = 145300; 520 *val2 = 300000; /* 0.1453 C */
516 return IIO_VAL_INT_PLUS_MICRO; 521 return IIO_VAL_INT_PLUS_MICRO;
517 default: 522 default:
518 return -EINVAL; 523 return -EINVAL;
519 } 524 }
520 break; 525 break;
521 case IIO_CHAN_INFO_OFFSET: 526 case IIO_CHAN_INFO_OFFSET:
522 *val = 25; 527 *val = 250000 / 1453; /* 25 C = 0x00 */
523 return IIO_VAL_INT; 528 return IIO_VAL_INT;
524 case IIO_CHAN_INFO_CALIBBIAS: 529 case IIO_CHAN_INFO_CALIBBIAS:
525 switch (chan->type) { 530 switch (chan->type) {
diff --git a/drivers/staging/iio/imu/adis16400.h b/drivers/staging/iio/imu/adis16400.h
index d59d7ac856a9..77c601da1846 100644
--- a/drivers/staging/iio/imu/adis16400.h
+++ b/drivers/staging/iio/imu/adis16400.h
@@ -139,6 +139,8 @@ struct adis16400_chip_info {
139 const long flags; 139 const long flags;
140 unsigned int gyro_scale_micro; 140 unsigned int gyro_scale_micro;
141 unsigned int accel_scale_micro; 141 unsigned int accel_scale_micro;
142 int temp_scale_nano;
143 int temp_offset;
142 unsigned long default_scan_mask; 144 unsigned long default_scan_mask;
143}; 145};
144 146
diff --git a/drivers/staging/iio/imu/adis16400_core.c b/drivers/staging/iio/imu/adis16400_core.c
index b302c9ba2712..3144a7b1e1c4 100644
--- a/drivers/staging/iio/imu/adis16400_core.c
+++ b/drivers/staging/iio/imu/adis16400_core.c
@@ -553,10 +553,13 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
553 return IIO_VAL_INT_PLUS_MICRO; 553 return IIO_VAL_INT_PLUS_MICRO;
554 case IIO_VOLTAGE: 554 case IIO_VOLTAGE:
555 *val = 0; 555 *val = 0;
556 if (chan->channel == 0) 556 if (chan->channel == 0) {
557 *val2 = 2418; 557 *val = 2;
558 else 558 *val2 = 418000; /* 2.418 mV */
559 *val2 = 806; 559 } else {
560 *val = 0;
561 *val2 = 805800; /* 805.8 uV */
562 }
560 return IIO_VAL_INT_PLUS_MICRO; 563 return IIO_VAL_INT_PLUS_MICRO;
561 case IIO_ACCEL: 564 case IIO_ACCEL:
562 *val = 0; 565 *val = 0;
@@ -564,11 +567,11 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
564 return IIO_VAL_INT_PLUS_MICRO; 567 return IIO_VAL_INT_PLUS_MICRO;
565 case IIO_MAGN: 568 case IIO_MAGN:
566 *val = 0; 569 *val = 0;
567 *val2 = 500; 570 *val2 = 500; /* 0.5 mgauss */
568 return IIO_VAL_INT_PLUS_MICRO; 571 return IIO_VAL_INT_PLUS_MICRO;
569 case IIO_TEMP: 572 case IIO_TEMP:
570 *val = 0; 573 *val = st->variant->temp_scale_nano / 1000000;
571 *val2 = 140000; 574 *val2 = (st->variant->temp_scale_nano % 1000000);
572 return IIO_VAL_INT_PLUS_MICRO; 575 return IIO_VAL_INT_PLUS_MICRO;
573 default: 576 default:
574 return -EINVAL; 577 return -EINVAL;
@@ -586,9 +589,8 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
586 return IIO_VAL_INT; 589 return IIO_VAL_INT;
587 case IIO_CHAN_INFO_OFFSET: 590 case IIO_CHAN_INFO_OFFSET:
588 /* currently only temperature */ 591 /* currently only temperature */
589 *val = 198; 592 *val = st->variant->temp_offset;
590 *val2 = 160000; 593 return IIO_VAL_INT;
591 return IIO_VAL_INT_PLUS_MICRO;
592 case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: 594 case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
593 mutex_lock(&indio_dev->mlock); 595 mutex_lock(&indio_dev->mlock);
594 /* Need both the number of taps and the sampling frequency */ 596 /* Need both the number of taps and the sampling frequency */
@@ -1035,7 +1037,7 @@ static const struct iio_chan_spec adis16334_channels[] = {
1035 .indexed = 1, 1037 .indexed = 1,
1036 .channel = 0, 1038 .channel = 0,
1037 .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | 1039 .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
1038 IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT | 1040 IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
1039 IIO_CHAN_INFO_SCALE_SHARED_BIT, 1041 IIO_CHAN_INFO_SCALE_SHARED_BIT,
1040 .address = temp0, 1042 .address = temp0,
1041 .scan_index = ADIS16400_SCAN_TEMP, 1043 .scan_index = ADIS16400_SCAN_TEMP,
@@ -1058,8 +1060,10 @@ static struct adis16400_chip_info adis16400_chips[] = {
1058 [ADIS16300] = { 1060 [ADIS16300] = {
1059 .channels = adis16300_channels, 1061 .channels = adis16300_channels,
1060 .num_channels = ARRAY_SIZE(adis16300_channels), 1062 .num_channels = ARRAY_SIZE(adis16300_channels),
1061 .gyro_scale_micro = 873, 1063 .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
1062 .accel_scale_micro = 5884, 1064 .accel_scale_micro = 5884,
1065 .temp_scale_nano = 140000000, /* 0.14 C */
1066 .temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
1063 .default_scan_mask = (1 << ADIS16400_SCAN_SUPPLY) | 1067 .default_scan_mask = (1 << ADIS16400_SCAN_SUPPLY) |
1064 (1 << ADIS16400_SCAN_GYRO_X) | (1 << ADIS16400_SCAN_ACC_X) | 1068 (1 << ADIS16400_SCAN_GYRO_X) | (1 << ADIS16400_SCAN_ACC_X) |
1065 (1 << ADIS16400_SCAN_ACC_Y) | (1 << ADIS16400_SCAN_ACC_Z) | 1069 (1 << ADIS16400_SCAN_ACC_Y) | (1 << ADIS16400_SCAN_ACC_Z) |
@@ -1070,8 +1074,10 @@ static struct adis16400_chip_info adis16400_chips[] = {
1070 [ADIS16334] = { 1074 [ADIS16334] = {
1071 .channels = adis16334_channels, 1075 .channels = adis16334_channels,
1072 .num_channels = ARRAY_SIZE(adis16334_channels), 1076 .num_channels = ARRAY_SIZE(adis16334_channels),
1073 .gyro_scale_micro = 873, 1077 .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
1074 .accel_scale_micro = 981, 1078 .accel_scale_micro = IIO_G_TO_M_S_2(1000), /* 1 mg */
1079 .temp_scale_nano = 67850000, /* 0.06785 C */
1080 .temp_offset = 25000000 / 67850, /* 25 C = 0x00 */
1075 .default_scan_mask = (1 << ADIS16400_SCAN_GYRO_X) | 1081 .default_scan_mask = (1 << ADIS16400_SCAN_GYRO_X) |
1076 (1 << ADIS16400_SCAN_GYRO_Y) | (1 << ADIS16400_SCAN_GYRO_Z) | 1082 (1 << ADIS16400_SCAN_GYRO_Y) | (1 << ADIS16400_SCAN_GYRO_Z) |
1077 (1 << ADIS16400_SCAN_ACC_X) | (1 << ADIS16400_SCAN_ACC_Y) | 1083 (1 << ADIS16400_SCAN_ACC_X) | (1 << ADIS16400_SCAN_ACC_Y) |
@@ -1080,8 +1086,10 @@ static struct adis16400_chip_info adis16400_chips[] = {
1080 [ADIS16350] = { 1086 [ADIS16350] = {
1081 .channels = adis16350_channels, 1087 .channels = adis16350_channels,
1082 .num_channels = ARRAY_SIZE(adis16350_channels), 1088 .num_channels = ARRAY_SIZE(adis16350_channels),
1083 .gyro_scale_micro = 872664, 1089 .gyro_scale_micro = IIO_DEGREE_TO_RAD(73260), /* 0.07326 deg/s */
1084 .accel_scale_micro = 24732, 1090 .accel_scale_micro = IIO_G_TO_M_S_2(2522), /* 0.002522 g */
1091 .temp_scale_nano = 145300000, /* 0.1453 C */
1092 .temp_offset = 25000000 / 145300, /* 25 C = 0x00 */
1085 .default_scan_mask = 0x7FF, 1093 .default_scan_mask = 0x7FF,
1086 .flags = ADIS16400_NO_BURST, 1094 .flags = ADIS16400_NO_BURST,
1087 }, 1095 },
@@ -1090,8 +1098,10 @@ static struct adis16400_chip_info adis16400_chips[] = {
1090 .num_channels = ARRAY_SIZE(adis16350_channels), 1098 .num_channels = ARRAY_SIZE(adis16350_channels),
1091 .flags = ADIS16400_HAS_PROD_ID, 1099 .flags = ADIS16400_HAS_PROD_ID,
1092 .product_id = 0x3FE8, 1100 .product_id = 0x3FE8,
1093 .gyro_scale_micro = 1279, 1101 .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
1094 .accel_scale_micro = 24732, 1102 .accel_scale_micro = IIO_G_TO_M_S_2(3333), /* 3.333 mg */
1103 .temp_scale_nano = 136000000, /* 0.136 C */
1104 .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
1095 .default_scan_mask = 0x7FF, 1105 .default_scan_mask = 0x7FF,
1096 }, 1106 },
1097 [ADIS16362] = { 1107 [ADIS16362] = {
@@ -1099,8 +1109,10 @@ static struct adis16400_chip_info adis16400_chips[] = {
1099 .num_channels = ARRAY_SIZE(adis16350_channels), 1109 .num_channels = ARRAY_SIZE(adis16350_channels),
1100 .flags = ADIS16400_HAS_PROD_ID, 1110 .flags = ADIS16400_HAS_PROD_ID,
1101 .product_id = 0x3FEA, 1111 .product_id = 0x3FEA,
1102 .gyro_scale_micro = 1279, 1112 .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
1103 .accel_scale_micro = 24732, 1113 .accel_scale_micro = IIO_G_TO_M_S_2(333), /* 0.333 mg */
1114 .temp_scale_nano = 136000000, /* 0.136 C */
1115 .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
1104 .default_scan_mask = 0x7FF, 1116 .default_scan_mask = 0x7FF,
1105 }, 1117 },
1106 [ADIS16364] = { 1118 [ADIS16364] = {
@@ -1108,8 +1120,10 @@ static struct adis16400_chip_info adis16400_chips[] = {
1108 .num_channels = ARRAY_SIZE(adis16350_channels), 1120 .num_channels = ARRAY_SIZE(adis16350_channels),
1109 .flags = ADIS16400_HAS_PROD_ID, 1121 .flags = ADIS16400_HAS_PROD_ID,
1110 .product_id = 0x3FEC, 1122 .product_id = 0x3FEC,
1111 .gyro_scale_micro = 1279, 1123 .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
1112 .accel_scale_micro = 24732, 1124 .accel_scale_micro = IIO_G_TO_M_S_2(1000), /* 1 mg */
1125 .temp_scale_nano = 136000000, /* 0.136 C */
1126 .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
1113 .default_scan_mask = 0x7FF, 1127 .default_scan_mask = 0x7FF,
1114 }, 1128 },
1115 [ADIS16365] = { 1129 [ADIS16365] = {
@@ -1117,8 +1131,10 @@ static struct adis16400_chip_info adis16400_chips[] = {
1117 .num_channels = ARRAY_SIZE(adis16350_channels), 1131 .num_channels = ARRAY_SIZE(adis16350_channels),
1118 .flags = ADIS16400_HAS_PROD_ID, 1132 .flags = ADIS16400_HAS_PROD_ID,
1119 .product_id = 0x3FED, 1133 .product_id = 0x3FED,
1120 .gyro_scale_micro = 1279, 1134 .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
1121 .accel_scale_micro = 24732, 1135 .accel_scale_micro = IIO_G_TO_M_S_2(1000), /* 1 mg */
1136 .temp_scale_nano = 136000000, /* 0.136 C */
1137 .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
1122 .default_scan_mask = 0x7FF, 1138 .default_scan_mask = 0x7FF,
1123 }, 1139 },
1124 [ADIS16400] = { 1140 [ADIS16400] = {
@@ -1126,9 +1142,11 @@ static struct adis16400_chip_info adis16400_chips[] = {
1126 .num_channels = ARRAY_SIZE(adis16400_channels), 1142 .num_channels = ARRAY_SIZE(adis16400_channels),
1127 .flags = ADIS16400_HAS_PROD_ID, 1143 .flags = ADIS16400_HAS_PROD_ID,
1128 .product_id = 0x4015, 1144 .product_id = 0x4015,
1129 .gyro_scale_micro = 873, 1145 .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
1130 .accel_scale_micro = 32656, 1146 .accel_scale_micro = IIO_G_TO_M_S_2(3333), /* 3.333 mg */
1131 .default_scan_mask = 0xFFF, 1147 .default_scan_mask = 0xFFF,
1148 .temp_scale_nano = 140000000, /* 0.14 C */
1149 .temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
1132 } 1150 }
1133}; 1151};
1134 1152
diff --git a/drivers/staging/ipack/bridges/tpci200.c b/drivers/staging/ipack/bridges/tpci200.c
index bb8aa70281cd..46d6657280b8 100644
--- a/drivers/staging/ipack/bridges/tpci200.c
+++ b/drivers/staging/ipack/bridges/tpci200.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/slab.h>
15#include "tpci200.h" 16#include "tpci200.h"
16 17
17static u16 tpci200_status_timeout[] = { 18static u16 tpci200_status_timeout[] = {
diff --git a/drivers/staging/omap-thermal/omap-thermal-common.c b/drivers/staging/omap-thermal/omap-thermal-common.c
index 46ee0a9f49d9..5c0c203b887f 100644
--- a/drivers/staging/omap-thermal/omap-thermal-common.c
+++ b/drivers/staging/omap-thermal/omap-thermal-common.c
@@ -126,7 +126,9 @@ static int omap_thermal_bind(struct thermal_zone_device *thermal,
126 126
127 /* TODO: bind with min and max states */ 127 /* TODO: bind with min and max states */
128 /* Simple thing, two trips, one passive another critical */ 128 /* Simple thing, two trips, one passive another critical */
129 return thermal_zone_bind_cooling_device(thermal, 0, cdev); 129 return thermal_zone_bind_cooling_device(thermal, 0, cdev,
130 THERMAL_NO_LIMIT,
131 THERMAL_NO_LIMIT);
130} 132}
131 133
132/* Unbind callback functions for thermal zone */ 134/* Unbind callback functions for thermal zone */
@@ -268,7 +270,6 @@ int omap_thermal_expose_sensor(struct omap_bandgap *bg_ptr, int id,
268 /* Create thermal zone */ 270 /* Create thermal zone */
269 data->omap_thermal = thermal_zone_device_register(domain, 271 data->omap_thermal = thermal_zone_device_register(domain,
270 OMAP_TRIP_NUMBER, 0, data, &omap_thermal_ops, 272 OMAP_TRIP_NUMBER, 0, data, &omap_thermal_ops,
271 1, 2, /*TODO: remove this when FW allows */
272 FAST_TEMP_MONITORING_RATE, 273 FAST_TEMP_MONITORING_RATE,
273 FAST_TEMP_MONITORING_RATE); 274 FAST_TEMP_MONITORING_RATE);
274 if (IS_ERR_OR_NULL(data->omap_thermal)) { 275 if (IS_ERR_OR_NULL(data->omap_thermal)) {
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/staging/omapdrm/omap_drv.c
index 2ec5264dd002..ebdb0b676737 100644
--- a/drivers/staging/omapdrm/omap_drv.c
+++ b/drivers/staging/omapdrm/omap_drv.c
@@ -106,7 +106,8 @@ static void dump_video_chains(void)
106 for (i = 0; i < omap_dss_get_num_overlays(); i++) { 106 for (i = 0; i < omap_dss_get_num_overlays(); i++) {
107 struct omap_overlay *ovl = omap_dss_get_overlay(i); 107 struct omap_overlay *ovl = omap_dss_get_overlay(i);
108 struct omap_overlay_manager *mgr = ovl->manager; 108 struct omap_overlay_manager *mgr = ovl->manager;
109 struct omap_dss_device *dssdev = mgr ? mgr->device : NULL; 109 struct omap_dss_device *dssdev = mgr ?
110 mgr->get_device(mgr) : NULL;
110 if (dssdev) { 111 if (dssdev) {
111 DBG("%d: %s -> %s -> %s", i, ovl->name, mgr->name, 112 DBG("%d: %s -> %s -> %s", i, ovl->name, mgr->name,
112 dssdev->name); 113 dssdev->name);
@@ -185,7 +186,7 @@ static int create_connector(struct drm_device *dev,
185 for (j = 0; j < priv->num_encoders; j++) { 186 for (j = 0; j < priv->num_encoders; j++) {
186 struct omap_overlay_manager *mgr = 187 struct omap_overlay_manager *mgr =
187 omap_encoder_get_manager(priv->encoders[j]); 188 omap_encoder_get_manager(priv->encoders[j]);
188 if (mgr->device == dssdev) { 189 if (mgr->get_device(mgr) == dssdev) {
189 drm_mode_connector_attach_encoder(connector, 190 drm_mode_connector_attach_encoder(connector,
190 priv->encoders[j]); 191 priv->encoders[j]);
191 } 192 }
diff --git a/drivers/staging/omapdrm/omap_gem.c b/drivers/staging/omapdrm/omap_gem.c
index 3434e6ec0142..66e2c2f8a239 100644
--- a/drivers/staging/omapdrm/omap_gem.c
+++ b/drivers/staging/omapdrm/omap_gem.c
@@ -246,7 +246,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
246 * DSS, GPU, etc. are not cache coherent: 246 * DSS, GPU, etc. are not cache coherent:
247 */ 247 */
248 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { 248 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
249 addrs = kmalloc(npages * sizeof(addrs), GFP_KERNEL); 249 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
250 if (!addrs) { 250 if (!addrs) {
251 ret = -ENOMEM; 251 ret = -ENOMEM;
252 goto free_pages; 252 goto free_pages;
@@ -257,7 +257,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
257 0, PAGE_SIZE, DMA_BIDIRECTIONAL); 257 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
258 } 258 }
259 } else { 259 } else {
260 addrs = kzalloc(npages * sizeof(addrs), GFP_KERNEL); 260 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
261 if (!addrs) { 261 if (!addrs) {
262 ret = -ENOMEM; 262 ret = -ENOMEM;
263 goto free_pages; 263 goto free_pages;
diff --git a/drivers/staging/ramster/Kconfig b/drivers/staging/ramster/Kconfig
index 843c54101438..3abf6619dace 100644
--- a/drivers/staging/ramster/Kconfig
+++ b/drivers/staging/ramster/Kconfig
@@ -18,6 +18,7 @@ config ZCACHE2
18config RAMSTER 18config RAMSTER
19 bool "Cross-machine RAM capacity sharing, aka peer-to-peer tmem" 19 bool "Cross-machine RAM capacity sharing, aka peer-to-peer tmem"
20 depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE2=y 20 depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE2=y
21 depends on NET
21 # must ensure struct page is 8-byte aligned 22 # must ensure struct page is 8-byte aligned
22 select HAVE_ALIGNED_STRUCT_PAGE if !64_BIT 23 select HAVE_ALIGNED_STRUCT_PAGE if !64_BIT
23 default n 24 default n
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index 066a3ceec65e..f619fb3c56d2 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -126,7 +126,8 @@ static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
126 u32 ul_num_bytes, 126 u32 ul_num_bytes,
127 struct hw_mmu_map_attrs_t *hw_attrs); 127 struct hw_mmu_map_attrs_t *hw_attrs);
128 128
129bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr); 129bool wait_for_start(struct bridge_dev_context *dev_context,
130 void __iomem *sync_addr);
130 131
131/* ----------------------------------- Globals */ 132/* ----------------------------------- Globals */
132 133
@@ -363,10 +364,11 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
363{ 364{
364 int status = 0; 365 int status = 0;
365 struct bridge_dev_context *dev_context = dev_ctxt; 366 struct bridge_dev_context *dev_context = dev_ctxt;
366 u32 dw_sync_addr = 0; 367 void __iomem *sync_addr;
367 u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */ 368 u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */
368 u32 ul_shm_base_virt; /* Dsp Virt SM base addr */ 369 u32 ul_shm_base_virt; /* Dsp Virt SM base addr */
369 u32 ul_tlb_base_virt; /* Base of MMU TLB entry */ 370 u32 ul_tlb_base_virt; /* Base of MMU TLB entry */
371 u32 shm_sync_pa;
370 /* Offset of shm_base_virt from tlb_base_virt */ 372 /* Offset of shm_base_virt from tlb_base_virt */
371 u32 ul_shm_offset_virt; 373 u32 ul_shm_offset_virt;
372 s32 entry_ndx; 374 s32 entry_ndx;
@@ -397,15 +399,22 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
397 /* Kernel logical address */ 399 /* Kernel logical address */
398 ul_shm_base = dev_context->atlb_entry[0].gpp_va + ul_shm_offset_virt; 400 ul_shm_base = dev_context->atlb_entry[0].gpp_va + ul_shm_offset_virt;
399 401
402 /* SHM physical sync address */
403 shm_sync_pa = dev_context->atlb_entry[0].gpp_pa + ul_shm_offset_virt +
404 SHMSYNCOFFSET;
405
400 /* 2nd wd is used as sync field */ 406 /* 2nd wd is used as sync field */
401 dw_sync_addr = ul_shm_base + SHMSYNCOFFSET; 407 sync_addr = ioremap(shm_sync_pa, SZ_32);
408 if (!sync_addr)
409 return -ENOMEM;
410
402 /* Write a signature into the shm base + offset; this will 411 /* Write a signature into the shm base + offset; this will
403 * get cleared when the DSP program starts. */ 412 * get cleared when the DSP program starts. */
404 if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) { 413 if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) {
405 pr_err("%s: Illegal SM base\n", __func__); 414 pr_err("%s: Illegal SM base\n", __func__);
406 status = -EPERM; 415 status = -EPERM;
407 } else 416 } else
408 __raw_writel(0xffffffff, dw_sync_addr); 417 __raw_writel(0xffffffff, sync_addr);
409 418
410 if (!status) { 419 if (!status) {
411 resources = dev_context->resources; 420 resources = dev_context->resources;
@@ -419,8 +428,10 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
419 * function is made available. 428 * function is made available.
420 */ 429 */
421 void __iomem *ctrl = ioremap(0x48002000, SZ_4K); 430 void __iomem *ctrl = ioremap(0x48002000, SZ_4K);
422 if (!ctrl) 431 if (!ctrl) {
432 iounmap(sync_addr);
423 return -ENOMEM; 433 return -ENOMEM;
434 }
424 435
425 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 436 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
426 OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, 437 OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
@@ -588,15 +599,15 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
588 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0, 599 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
589 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); 600 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
590 601
591 dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", dw_sync_addr); 602 dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", *(u32 *)sync_addr);
592 dev_dbg(bridge, "DSP c_int00 Address = 0x%x\n", dsp_addr); 603 dev_dbg(bridge, "DSP c_int00 Address = 0x%x\n", dsp_addr);
593 if (dsp_debug) 604 if (dsp_debug)
594 while (__raw_readw(dw_sync_addr)) 605 while (__raw_readw(sync_addr))
595 ; 606 ;
596 607
597 /* Wait for DSP to clear word in shared memory */ 608 /* Wait for DSP to clear word in shared memory */
598 /* Read the Location */ 609 /* Read the Location */
599 if (!wait_for_start(dev_context, dw_sync_addr)) 610 if (!wait_for_start(dev_context, sync_addr))
600 status = -ETIMEDOUT; 611 status = -ETIMEDOUT;
601 612
602 dev_get_symbol(dev_context->dev_obj, "_WDT_enable", &wdt_en); 613 dev_get_symbol(dev_context->dev_obj, "_WDT_enable", &wdt_en);
@@ -612,7 +623,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
612 /* Write the synchronization bit to indicate the 623 /* Write the synchronization bit to indicate the
613 * completion of OPP table update to DSP 624 * completion of OPP table update to DSP
614 */ 625 */
615 __raw_writel(0XCAFECAFE, dw_sync_addr); 626 __raw_writel(0XCAFECAFE, sync_addr);
616 627
617 /* update board state */ 628 /* update board state */
618 dev_context->brd_state = BRD_RUNNING; 629 dev_context->brd_state = BRD_RUNNING;
@@ -621,6 +632,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
621 dev_context->brd_state = BRD_UNKNOWN; 632 dev_context->brd_state = BRD_UNKNOWN;
622 } 633 }
623 } 634 }
635
636 iounmap(sync_addr);
637
624 return status; 638 return status;
625} 639}
626 640
@@ -1796,12 +1810,13 @@ static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
1796 * ======== wait_for_start ======== 1810 * ======== wait_for_start ========
1797 * Wait for the singal from DSP that it has started, or time out. 1811 * Wait for the singal from DSP that it has started, or time out.
1798 */ 1812 */
1799bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr) 1813bool wait_for_start(struct bridge_dev_context *dev_context,
1814 void __iomem *sync_addr)
1800{ 1815{
1801 u16 timeout = TIHELEN_ACKTIMEOUT; 1816 u16 timeout = TIHELEN_ACKTIMEOUT;
1802 1817
1803 /* Wait for response from board */ 1818 /* Wait for response from board */
1804 while (__raw_readw(dw_sync_addr) && --timeout) 1819 while (__raw_readw(sync_addr) && --timeout)
1805 udelay(10); 1820 udelay(10);
1806 1821
1807 /* If timed out: return false */ 1822 /* If timed out: return false */
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.c b/drivers/staging/tidspbridge/hw/hw_mmu.c
index 71cb82293649..50244a474178 100644
--- a/drivers/staging/tidspbridge/hw/hw_mmu.c
+++ b/drivers/staging/tidspbridge/hw/hw_mmu.c
@@ -48,37 +48,12 @@ enum hw_mmu_page_size_t {
48}; 48};
49 49
50/* 50/*
51 * FUNCTION : mmu_flush_entry
52 *
53 * INPUTS:
54 *
55 * Identifier : base_address
56 * Type : const u32
57 * Description : Base Address of instance of MMU module
58 *
59 * RETURNS:
60 *
61 * Type : hw_status
62 * Description : 0 -- No errors occurred
63 * RET_BAD_NULL_PARAM -- A Pointer
64 * Parameter was set to NULL
65 *
66 * PURPOSE: : Flush the TLB entry pointed by the
67 * lock counter register
68 * even if this entry is set protected
69 *
70 * METHOD: : Check the Input parameter and Flush a
71 * single entry in the TLB.
72 */
73static hw_status mmu_flush_entry(const void __iomem *base_address);
74
75/*
76 * FUNCTION : mmu_set_cam_entry 51 * FUNCTION : mmu_set_cam_entry
77 * 52 *
78 * INPUTS: 53 * INPUTS:
79 * 54 *
80 * Identifier : base_address 55 * Identifier : base_address
81 * TypE : const u32 56 * Type : void __iomem *
82 * Description : Base Address of instance of MMU module 57 * Description : Base Address of instance of MMU module
83 * 58 *
84 * Identifier : page_sz 59 * Identifier : page_sz
@@ -112,7 +87,7 @@ static hw_status mmu_flush_entry(const void __iomem *base_address);
112 * 87 *
113 * METHOD: : Check the Input parameters and set the CAM entry. 88 * METHOD: : Check the Input parameters and set the CAM entry.
114 */ 89 */
115static hw_status mmu_set_cam_entry(const void __iomem *base_address, 90static hw_status mmu_set_cam_entry(void __iomem *base_address,
116 const u32 page_sz, 91 const u32 page_sz,
117 const u32 preserved_bit, 92 const u32 preserved_bit,
118 const u32 valid_bit, 93 const u32 valid_bit,
@@ -124,7 +99,7 @@ static hw_status mmu_set_cam_entry(const void __iomem *base_address,
124 * INPUTS: 99 * INPUTS:
125 * 100 *
126 * Identifier : base_address 101 * Identifier : base_address
127 * Type : const u32 102 * Type : void __iomem *
128 * Description : Base Address of instance of MMU module 103 * Description : Base Address of instance of MMU module
129 * 104 *
130 * Identifier : physical_addr 105 * Identifier : physical_addr
@@ -157,7 +132,7 @@ static hw_status mmu_set_cam_entry(const void __iomem *base_address,
157 * 132 *
158 * METHOD: : Check the Input parameters and set the RAM entry. 133 * METHOD: : Check the Input parameters and set the RAM entry.
159 */ 134 */
160static hw_status mmu_set_ram_entry(const void __iomem *base_address, 135static hw_status mmu_set_ram_entry(void __iomem *base_address,
161 const u32 physical_addr, 136 const u32 physical_addr,
162 enum hw_endianism_t endianism, 137 enum hw_endianism_t endianism,
163 enum hw_element_size_t element_size, 138 enum hw_element_size_t element_size,
@@ -165,7 +140,7 @@ static hw_status mmu_set_ram_entry(const void __iomem *base_address,
165 140
166/* HW FUNCTIONS */ 141/* HW FUNCTIONS */
167 142
168hw_status hw_mmu_enable(const void __iomem *base_address) 143hw_status hw_mmu_enable(void __iomem *base_address)
169{ 144{
170 hw_status status = 0; 145 hw_status status = 0;
171 146
@@ -174,7 +149,7 @@ hw_status hw_mmu_enable(const void __iomem *base_address)
174 return status; 149 return status;
175} 150}
176 151
177hw_status hw_mmu_disable(const void __iomem *base_address) 152hw_status hw_mmu_disable(void __iomem *base_address)
178{ 153{
179 hw_status status = 0; 154 hw_status status = 0;
180 155
@@ -183,7 +158,7 @@ hw_status hw_mmu_disable(const void __iomem *base_address)
183 return status; 158 return status;
184} 159}
185 160
186hw_status hw_mmu_num_locked_set(const void __iomem *base_address, 161hw_status hw_mmu_num_locked_set(void __iomem *base_address,
187 u32 num_locked_entries) 162 u32 num_locked_entries)
188{ 163{
189 hw_status status = 0; 164 hw_status status = 0;
@@ -193,7 +168,7 @@ hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
193 return status; 168 return status;
194} 169}
195 170
196hw_status hw_mmu_victim_num_set(const void __iomem *base_address, 171hw_status hw_mmu_victim_num_set(void __iomem *base_address,
197 u32 victim_entry_num) 172 u32 victim_entry_num)
198{ 173{
199 hw_status status = 0; 174 hw_status status = 0;
@@ -203,7 +178,7 @@ hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
203 return status; 178 return status;
204} 179}
205 180
206hw_status hw_mmu_event_ack(const void __iomem *base_address, u32 irq_mask) 181hw_status hw_mmu_event_ack(void __iomem *base_address, u32 irq_mask)
207{ 182{
208 hw_status status = 0; 183 hw_status status = 0;
209 184
@@ -212,7 +187,7 @@ hw_status hw_mmu_event_ack(const void __iomem *base_address, u32 irq_mask)
212 return status; 187 return status;
213} 188}
214 189
215hw_status hw_mmu_event_disable(const void __iomem *base_address, u32 irq_mask) 190hw_status hw_mmu_event_disable(void __iomem *base_address, u32 irq_mask)
216{ 191{
217 hw_status status = 0; 192 hw_status status = 0;
218 u32 irq_reg; 193 u32 irq_reg;
@@ -224,7 +199,7 @@ hw_status hw_mmu_event_disable(const void __iomem *base_address, u32 irq_mask)
224 return status; 199 return status;
225} 200}
226 201
227hw_status hw_mmu_event_enable(const void __iomem *base_address, u32 irq_mask) 202hw_status hw_mmu_event_enable(void __iomem *base_address, u32 irq_mask)
228{ 203{
229 hw_status status = 0; 204 hw_status status = 0;
230 u32 irq_reg; 205 u32 irq_reg;
@@ -236,7 +211,7 @@ hw_status hw_mmu_event_enable(const void __iomem *base_address, u32 irq_mask)
236 return status; 211 return status;
237} 212}
238 213
239hw_status hw_mmu_event_status(const void __iomem *base_address, u32 *irq_mask) 214hw_status hw_mmu_event_status(void __iomem *base_address, u32 *irq_mask)
240{ 215{
241 hw_status status = 0; 216 hw_status status = 0;
242 217
@@ -245,7 +220,7 @@ hw_status hw_mmu_event_status(const void __iomem *base_address, u32 *irq_mask)
245 return status; 220 return status;
246} 221}
247 222
248hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, u32 *addr) 223hw_status hw_mmu_fault_addr_read(void __iomem *base_address, u32 *addr)
249{ 224{
250 hw_status status = 0; 225 hw_status status = 0;
251 226
@@ -255,7 +230,7 @@ hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, u32 *addr)
255 return status; 230 return status;
256} 231}
257 232
258hw_status hw_mmu_ttb_set(const void __iomem *base_address, u32 ttb_phys_addr) 233hw_status hw_mmu_ttb_set(void __iomem *base_address, u32 ttb_phys_addr)
259{ 234{
260 hw_status status = 0; 235 hw_status status = 0;
261 u32 load_ttb; 236 u32 load_ttb;
@@ -267,7 +242,7 @@ hw_status hw_mmu_ttb_set(const void __iomem *base_address, u32 ttb_phys_addr)
267 return status; 242 return status;
268} 243}
269 244
270hw_status hw_mmu_twl_enable(const void __iomem *base_address) 245hw_status hw_mmu_twl_enable(void __iomem *base_address)
271{ 246{
272 hw_status status = 0; 247 hw_status status = 0;
273 248
@@ -276,7 +251,7 @@ hw_status hw_mmu_twl_enable(const void __iomem *base_address)
276 return status; 251 return status;
277} 252}
278 253
279hw_status hw_mmu_twl_disable(const void __iomem *base_address) 254hw_status hw_mmu_twl_disable(void __iomem *base_address)
280{ 255{
281 hw_status status = 0; 256 hw_status status = 0;
282 257
@@ -285,45 +260,7 @@ hw_status hw_mmu_twl_disable(const void __iomem *base_address)
285 return status; 260 return status;
286} 261}
287 262
288hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtual_addr, 263hw_status hw_mmu_tlb_add(void __iomem *base_address,
289 u32 page_sz)
290{
291 hw_status status = 0;
292 u32 virtual_addr_tag;
293 enum hw_mmu_page_size_t pg_size_bits;
294
295 switch (page_sz) {
296 case HW_PAGE_SIZE4KB:
297 pg_size_bits = HW_MMU_SMALL_PAGE;
298 break;
299
300 case HW_PAGE_SIZE64KB:
301 pg_size_bits = HW_MMU_LARGE_PAGE;
302 break;
303
304 case HW_PAGE_SIZE1MB:
305 pg_size_bits = HW_MMU_SECTION;
306 break;
307
308 case HW_PAGE_SIZE16MB:
309 pg_size_bits = HW_MMU_SUPERSECTION;
310 break;
311
312 default:
313 return -EINVAL;
314 }
315
316 /* Generate the 20-bit tag from virtual address */
317 virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
318
319 mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag);
320
321 mmu_flush_entry(base_address);
322
323 return status;
324}
325
326hw_status hw_mmu_tlb_add(const void __iomem *base_address,
327 u32 physical_addr, 264 u32 physical_addr,
328 u32 virtual_addr, 265 u32 virtual_addr,
329 u32 page_sz, 266 u32 page_sz,
@@ -503,20 +440,8 @@ hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size)
503 return status; 440 return status;
504} 441}
505 442
506/* mmu_flush_entry */
507static hw_status mmu_flush_entry(const void __iomem *base_address)
508{
509 hw_status status = 0;
510 u32 flush_entry_data = 0x1;
511
512 /* write values to register */
513 MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, flush_entry_data);
514
515 return status;
516}
517
518/* mmu_set_cam_entry */ 443/* mmu_set_cam_entry */
519static hw_status mmu_set_cam_entry(const void __iomem *base_address, 444static hw_status mmu_set_cam_entry(void __iomem *base_address,
520 const u32 page_sz, 445 const u32 page_sz,
521 const u32 preserved_bit, 446 const u32 preserved_bit,
522 const u32 valid_bit, 447 const u32 valid_bit,
@@ -536,7 +461,7 @@ static hw_status mmu_set_cam_entry(const void __iomem *base_address,
536} 461}
537 462
538/* mmu_set_ram_entry */ 463/* mmu_set_ram_entry */
539static hw_status mmu_set_ram_entry(const void __iomem *base_address, 464static hw_status mmu_set_ram_entry(void __iomem *base_address,
540 const u32 physical_addr, 465 const u32 physical_addr,
541 enum hw_endianism_t endianism, 466 enum hw_endianism_t endianism,
542 enum hw_element_size_t element_size, 467 enum hw_element_size_t element_size,
@@ -556,7 +481,7 @@ static hw_status mmu_set_ram_entry(const void __iomem *base_address,
556 481
557} 482}
558 483
559void hw_mmu_tlb_flush_all(const void __iomem *base) 484void hw_mmu_tlb_flush_all(void __iomem *base)
560{ 485{
561 __raw_writel(1, base + MMU_GFLUSH); 486 __raw_writel(1, base + MMU_GFLUSH);
562} 487}
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.h b/drivers/staging/tidspbridge/hw/hw_mmu.h
index 1458a2c6027b..1c50bb36edfe 100644
--- a/drivers/staging/tidspbridge/hw/hw_mmu.h
+++ b/drivers/staging/tidspbridge/hw/hw_mmu.h
@@ -42,44 +42,41 @@ struct hw_mmu_map_attrs_t {
42 bool donotlockmpupage; 42 bool donotlockmpupage;
43}; 43};
44 44
45extern hw_status hw_mmu_enable(const void __iomem *base_address); 45extern hw_status hw_mmu_enable(void __iomem *base_address);
46 46
47extern hw_status hw_mmu_disable(const void __iomem *base_address); 47extern hw_status hw_mmu_disable(void __iomem *base_address);
48 48
49extern hw_status hw_mmu_num_locked_set(const void __iomem *base_address, 49extern hw_status hw_mmu_num_locked_set(void __iomem *base_address,
50 u32 num_locked_entries); 50 u32 num_locked_entries);
51 51
52extern hw_status hw_mmu_victim_num_set(const void __iomem *base_address, 52extern hw_status hw_mmu_victim_num_set(void __iomem *base_address,
53 u32 victim_entry_num); 53 u32 victim_entry_num);
54 54
55/* For MMU faults */ 55/* For MMU faults */
56extern hw_status hw_mmu_event_ack(const void __iomem *base_address, 56extern hw_status hw_mmu_event_ack(void __iomem *base_address,
57 u32 irq_mask); 57 u32 irq_mask);
58 58
59extern hw_status hw_mmu_event_disable(const void __iomem *base_address, 59extern hw_status hw_mmu_event_disable(void __iomem *base_address,
60 u32 irq_mask); 60 u32 irq_mask);
61 61
62extern hw_status hw_mmu_event_enable(const void __iomem *base_address, 62extern hw_status hw_mmu_event_enable(void __iomem *base_address,
63 u32 irq_mask); 63 u32 irq_mask);
64 64
65extern hw_status hw_mmu_event_status(const void __iomem *base_address, 65extern hw_status hw_mmu_event_status(void __iomem *base_address,
66 u32 *irq_mask); 66 u32 *irq_mask);
67 67
68extern hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, 68extern hw_status hw_mmu_fault_addr_read(void __iomem *base_address,
69 u32 *addr); 69 u32 *addr);
70 70
71/* Set the TT base address */ 71/* Set the TT base address */
72extern hw_status hw_mmu_ttb_set(const void __iomem *base_address, 72extern hw_status hw_mmu_ttb_set(void __iomem *base_address,
73 u32 ttb_phys_addr); 73 u32 ttb_phys_addr);
74 74
75extern hw_status hw_mmu_twl_enable(const void __iomem *base_address); 75extern hw_status hw_mmu_twl_enable(void __iomem *base_address);
76 76
77extern hw_status hw_mmu_twl_disable(const void __iomem *base_address); 77extern hw_status hw_mmu_twl_disable(void __iomem *base_address);
78 78
79extern hw_status hw_mmu_tlb_flush(const void __iomem *base_address, 79extern hw_status hw_mmu_tlb_add(void __iomem *base_address,
80 u32 virtual_addr, u32 page_sz);
81
82extern hw_status hw_mmu_tlb_add(const void __iomem *base_address,
83 u32 physical_addr, 80 u32 physical_addr,
84 u32 virtual_addr, 81 u32 virtual_addr,
85 u32 page_sz, 82 u32 page_sz,
@@ -97,7 +94,7 @@ extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
97extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, 94extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
98 u32 virtual_addr, u32 page_size); 95 u32 virtual_addr, u32 page_size);
99 96
100void hw_mmu_tlb_flush_all(const void __iomem *base); 97void hw_mmu_tlb_flush_all(void __iomem *base);
101 98
102static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va) 99static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va)
103{ 100{
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
index 60a278136bdf..b32c75673ab4 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
@@ -53,8 +53,8 @@ struct cfg_hostres {
53 u32 chnl_buf_size; 53 u32 chnl_buf_size;
54 u32 num_chnls; 54 u32 num_chnls;
55 void __iomem *per_base; 55 void __iomem *per_base;
56 u32 per_pm_base; 56 void __iomem *per_pm_base;
57 u32 core_pm_base; 57 void __iomem *core_pm_base;
58 void __iomem *dmmu_base; 58 void __iomem *dmmu_base;
59}; 59};
60 60
diff --git a/drivers/staging/tidspbridge/include/dspbridge/host_os.h b/drivers/staging/tidspbridge/include/dspbridge/host_os.h
index ed00d3da3205..5e2f4d82d925 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/host_os.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/host_os.h
@@ -47,8 +47,8 @@
47#include <asm/cacheflush.h> 47#include <asm/cacheflush.h>
48#include <linux/dma-mapping.h> 48#include <linux/dma-mapping.h>
49 49
50/* TODO -- Remove, once BP defines them */ 50/* TODO -- Remove, once omap-iommu is used */
51#define INT_DSP_MMU_IRQ 28 51#define INT_DSP_MMU_IRQ (28 + NR_IRQS)
52 52
53#define PRCM_VDD1 1 53#define PRCM_VDD1 1
54 54
diff --git a/drivers/staging/tidspbridge/rmgr/drv.c b/drivers/staging/tidspbridge/rmgr/drv.c
index 6795205b0155..db1da28cecba 100644
--- a/drivers/staging/tidspbridge/rmgr/drv.c
+++ b/drivers/staging/tidspbridge/rmgr/drv.c
@@ -667,10 +667,10 @@ int drv_request_bridge_res_dsp(void **phost_resources)
667 OMAP_DSP_MEM3_SIZE); 667 OMAP_DSP_MEM3_SIZE);
668 host_res->per_base = ioremap(OMAP_PER_CM_BASE, 668 host_res->per_base = ioremap(OMAP_PER_CM_BASE,
669 OMAP_PER_CM_SIZE); 669 OMAP_PER_CM_SIZE);
670 host_res->per_pm_base = (u32) ioremap(OMAP_PER_PRM_BASE, 670 host_res->per_pm_base = ioremap(OMAP_PER_PRM_BASE,
671 OMAP_PER_PRM_SIZE); 671 OMAP_PER_PRM_SIZE);
672 host_res->core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE, 672 host_res->core_pm_base = ioremap(OMAP_CORE_PRM_BASE,
673 OMAP_CORE_PRM_SIZE); 673 OMAP_CORE_PRM_SIZE);
674 host_res->dmmu_base = ioremap(OMAP_DMMU_BASE, 674 host_res->dmmu_base = ioremap(OMAP_DMMU_BASE,
675 OMAP_DMMU_SIZE); 675 OMAP_DMMU_SIZE);
676 676
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c
index c2fc6137c770..294e9b40f516 100644
--- a/drivers/staging/tidspbridge/rmgr/node.c
+++ b/drivers/staging/tidspbridge/rmgr/node.c
@@ -304,8 +304,7 @@ int node_allocate(struct proc_object *hprocessor,
304 u32 pul_value; 304 u32 pul_value;
305 u32 dynext_base; 305 u32 dynext_base;
306 u32 off_set = 0; 306 u32 off_set = 0;
307 u32 ul_stack_seg_addr, ul_stack_seg_val; 307 u32 ul_stack_seg_val;
308 u32 ul_gpp_mem_base;
309 struct cfg_hostres *host_res; 308 struct cfg_hostres *host_res;
310 struct bridge_dev_context *pbridge_context; 309 struct bridge_dev_context *pbridge_context;
311 u32 mapped_addr = 0; 310 u32 mapped_addr = 0;
@@ -581,6 +580,9 @@ func_cont:
581 if (strcmp((char *) 580 if (strcmp((char *)
582 pnode->dcd_props.obj_data.node_obj.ndb_props. 581 pnode->dcd_props.obj_data.node_obj.ndb_props.
583 stack_seg_name, STACKSEGLABEL) == 0) { 582 stack_seg_name, STACKSEGLABEL) == 0) {
583 void __iomem *stack_seg;
584 u32 stack_seg_pa;
585
584 status = 586 status =
585 hnode_mgr->nldr_fxns. 587 hnode_mgr->nldr_fxns.
586 get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG", 588 get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
@@ -608,14 +610,21 @@ func_cont:
608 goto func_end; 610 goto func_end;
609 } 611 }
610 612
611 ul_gpp_mem_base = (u32) host_res->mem_base[1];
612 off_set = pul_value - dynext_base; 613 off_set = pul_value - dynext_base;
613 ul_stack_seg_addr = ul_gpp_mem_base + off_set; 614 stack_seg_pa = host_res->mem_phys[1] + off_set;
614 ul_stack_seg_val = readl(ul_stack_seg_addr); 615 stack_seg = ioremap(stack_seg_pa, SZ_32);
616 if (!stack_seg) {
617 status = -ENOMEM;
618 goto func_end;
619 }
620
621 ul_stack_seg_val = readl(stack_seg);
622
623 iounmap(stack_seg);
615 624
616 dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr =" 625 dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr ="
617 " 0x%x\n", __func__, ul_stack_seg_val, 626 " 0x%x\n", __func__, ul_stack_seg_val,
618 ul_stack_seg_addr); 627 host_res->mem_base[1] + off_set);
619 628
620 pnode->create_args.asa.task_arg_obj.stack_seg = 629 pnode->create_args.asa.task_arg_obj.stack_seg =
621 ul_stack_seg_val; 630 ul_stack_seg_val;
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 653b074035f7..6edefde23722 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -223,8 +223,13 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
223 cmem = zs_map_object(zram->mem_pool, zram->table[index].handle, 223 cmem = zs_map_object(zram->mem_pool, zram->table[index].handle,
224 ZS_MM_RO); 224 ZS_MM_RO);
225 225
226 ret = lzo1x_decompress_safe(cmem, zram->table[index].size, 226 if (zram->table[index].size == PAGE_SIZE) {
227 memcpy(uncmem, cmem, PAGE_SIZE);
228 ret = LZO_E_OK;
229 } else {
230 ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
227 uncmem, &clen); 231 uncmem, &clen);
232 }
228 233
229 if (is_partial_io(bvec)) { 234 if (is_partial_io(bvec)) {
230 memcpy(user_mem + bvec->bv_offset, uncmem + offset, 235 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
@@ -342,8 +347,11 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
342 goto out; 347 goto out;
343 } 348 }
344 349
345 if (unlikely(clen > max_zpage_size)) 350 if (unlikely(clen > max_zpage_size)) {
346 zram_stat_inc(&zram->stats.bad_compress); 351 zram_stat_inc(&zram->stats.bad_compress);
352 src = uncmem;
353 clen = PAGE_SIZE;
354 }
347 355
348 handle = zs_malloc(zram->mem_pool, clen); 356 handle = zs_malloc(zram->mem_pool, clen);
349 if (!handle) { 357 if (!handle) {
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 97c0f78c3c9c..035c2c762537 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -427,7 +427,7 @@ int iscsit_reset_np_thread(
427 return 0; 427 return 0;
428} 428}
429 429
430int iscsit_del_np_comm(struct iscsi_np *np) 430static int iscsit_del_np_comm(struct iscsi_np *np)
431{ 431{
432 if (np->np_socket) 432 if (np->np_socket)
433 sock_release(np->np_socket); 433 sock_release(np->np_socket);
@@ -785,10 +785,6 @@ static int iscsit_handle_scsi_cmd(
785 785
786 hdr = (struct iscsi_scsi_req *) buf; 786 hdr = (struct iscsi_scsi_req *) buf;
787 payload_length = ntoh24(hdr->dlength); 787 payload_length = ntoh24(hdr->dlength);
788 hdr->itt = be32_to_cpu(hdr->itt);
789 hdr->data_length = be32_to_cpu(hdr->data_length);
790 hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
791 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
792 788
793 /* FIXME; Add checks for AdditionalHeaderSegment */ 789 /* FIXME; Add checks for AdditionalHeaderSegment */
794 790
@@ -852,7 +848,7 @@ done:
852 buf, conn); 848 buf, conn);
853 } 849 }
854 850
855 if ((hdr->data_length == payload_length) && 851 if ((be32_to_cpu(hdr->data_length )== payload_length) &&
856 (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) { 852 (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
857 pr_err("Expected Data Transfer Length and Length of" 853 pr_err("Expected Data Transfer Length and Length of"
858 " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL" 854 " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
@@ -861,7 +857,7 @@ done:
861 buf, conn); 857 buf, conn);
862 } 858 }
863 859
864 if (payload_length > hdr->data_length) { 860 if (payload_length > be32_to_cpu(hdr->data_length)) {
865 pr_err("DataSegmentLength: %u is greater than" 861 pr_err("DataSegmentLength: %u is greater than"
866 " EDTL: %u, protocol error.\n", payload_length, 862 " EDTL: %u, protocol error.\n", payload_length,
867 hdr->data_length); 863 hdr->data_length);
@@ -869,10 +865,10 @@ done:
869 buf, conn); 865 buf, conn);
870 } 866 }
871 867
872 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) { 868 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
873 pr_err("DataSegmentLength: %u is greater than" 869 pr_err("DataSegmentLength: %u is greater than"
874 " MaxRecvDataSegmentLength: %u, protocol error.\n", 870 " MaxXmitDataSegmentLength: %u, protocol error.\n",
875 payload_length, conn->conn_ops->MaxRecvDataSegmentLength); 871 payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
876 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 872 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
877 buf, conn); 873 buf, conn);
878 } 874 }
@@ -932,8 +928,8 @@ done:
932 spin_unlock_bh(&conn->sess->ttt_lock); 928 spin_unlock_bh(&conn->sess->ttt_lock);
933 } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE) 929 } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
934 cmd->targ_xfer_tag = 0xFFFFFFFF; 930 cmd->targ_xfer_tag = 0xFFFFFFFF;
935 cmd->cmd_sn = hdr->cmdsn; 931 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
936 cmd->exp_stat_sn = hdr->exp_statsn; 932 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
937 cmd->first_burst_len = payload_length; 933 cmd->first_burst_len = payload_length;
938 934
939 if (cmd->data_direction == DMA_FROM_DEVICE) { 935 if (cmd->data_direction == DMA_FROM_DEVICE) {
@@ -952,8 +948,9 @@ done:
952 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 948 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
953 */ 949 */
954 transport_init_se_cmd(&cmd->se_cmd, &lio_target_fabric_configfs->tf_ops, 950 transport_init_se_cmd(&cmd->se_cmd, &lio_target_fabric_configfs->tf_ops,
955 conn->sess->se_sess, hdr->data_length, cmd->data_direction, 951 conn->sess->se_sess, be32_to_cpu(hdr->data_length),
956 sam_task_attr, &cmd->sense_buffer[0]); 952 cmd->data_direction, sam_task_attr,
953 cmd->sense_buffer + 2);
957 954
958 pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x," 955 pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
959 " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt, 956 " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
@@ -1028,7 +1025,7 @@ attach_cmd:
1028 1, 0, buf, cmd); 1025 1, 0, buf, cmd);
1029 } 1026 }
1030 1027
1031 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 1028 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1032 1029
1033 /* 1030 /*
1034 * If no Immediate Data is attached, it's OK to return now. 1031 * If no Immediate Data is attached, it's OK to return now.
@@ -1194,11 +1191,6 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1194 1191
1195 hdr = (struct iscsi_data *) buf; 1192 hdr = (struct iscsi_data *) buf;
1196 payload_length = ntoh24(hdr->dlength); 1193 payload_length = ntoh24(hdr->dlength);
1197 hdr->itt = be32_to_cpu(hdr->itt);
1198 hdr->ttt = be32_to_cpu(hdr->ttt);
1199 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
1200 hdr->datasn = be32_to_cpu(hdr->datasn);
1201 hdr->offset = be32_to_cpu(hdr->offset);
1202 1194
1203 if (!payload_length) { 1195 if (!payload_length) {
1204 pr_err("DataOUT payload is ZERO, protocol error.\n"); 1196 pr_err("DataOUT payload is ZERO, protocol error.\n");
@@ -1216,10 +1208,10 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1216 } 1208 }
1217 spin_unlock_bh(&conn->sess->session_stats_lock); 1209 spin_unlock_bh(&conn->sess->session_stats_lock);
1218 1210
1219 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) { 1211 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1220 pr_err("DataSegmentLength: %u is greater than" 1212 pr_err("DataSegmentLength: %u is greater than"
1221 " MaxRecvDataSegmentLength: %u\n", payload_length, 1213 " MaxXmitDataSegmentLength: %u\n", payload_length,
1222 conn->conn_ops->MaxRecvDataSegmentLength); 1214 conn->conn_ops->MaxXmitDataSegmentLength);
1223 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1215 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1224 buf, conn); 1216 buf, conn);
1225 } 1217 }
@@ -1250,7 +1242,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1250 se_cmd = &cmd->se_cmd; 1242 se_cmd = &cmd->se_cmd;
1251 iscsit_mod_dataout_timer(cmd); 1243 iscsit_mod_dataout_timer(cmd);
1252 1244
1253 if ((hdr->offset + payload_length) > cmd->se_cmd.data_length) { 1245 if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
1254 pr_err("DataOut Offset: %u, Length %u greater than" 1246 pr_err("DataOut Offset: %u, Length %u greater than"
1255 " iSCSI Command EDTL %u, protocol error.\n", 1247 " iSCSI Command EDTL %u, protocol error.\n",
1256 hdr->offset, payload_length, cmd->se_cmd.data_length); 1248 hdr->offset, payload_length, cmd->se_cmd.data_length);
@@ -1333,7 +1325,8 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1333 rx_size += payload_length; 1325 rx_size += payload_length;
1334 iov = &cmd->iov_data[0]; 1326 iov = &cmd->iov_data[0];
1335 1327
1336 iov_ret = iscsit_map_iovec(cmd, iov, hdr->offset, payload_length); 1328 iov_ret = iscsit_map_iovec(cmd, iov, be32_to_cpu(hdr->offset),
1329 payload_length);
1337 if (iov_ret < 0) 1330 if (iov_ret < 0)
1338 return -1; 1331 return -1;
1339 1332
@@ -1364,7 +1357,8 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1364 u32 data_crc; 1357 u32 data_crc;
1365 1358
1366 data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd, 1359 data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
1367 hdr->offset, payload_length, padding, 1360 be32_to_cpu(hdr->offset),
1361 payload_length, padding,
1368 cmd->pad_bytes); 1362 cmd->pad_bytes);
1369 1363
1370 if (checksum != data_crc) { 1364 if (checksum != data_crc) {
@@ -1425,30 +1419,26 @@ static int iscsit_handle_nop_out(
1425 1419
1426 hdr = (struct iscsi_nopout *) buf; 1420 hdr = (struct iscsi_nopout *) buf;
1427 payload_length = ntoh24(hdr->dlength); 1421 payload_length = ntoh24(hdr->dlength);
1428 hdr->itt = be32_to_cpu(hdr->itt);
1429 hdr->ttt = be32_to_cpu(hdr->ttt);
1430 hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
1431 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
1432 1422
1433 if ((hdr->itt == 0xFFFFFFFF) && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1423 if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1434 pr_err("NOPOUT ITT is reserved, but Immediate Bit is" 1424 pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
1435 " not set, protocol error.\n"); 1425 " not set, protocol error.\n");
1436 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1426 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1437 buf, conn); 1427 buf, conn);
1438 } 1428 }
1439 1429
1440 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) { 1430 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1441 pr_err("NOPOUT Ping Data DataSegmentLength: %u is" 1431 pr_err("NOPOUT Ping Data DataSegmentLength: %u is"
1442 " greater than MaxRecvDataSegmentLength: %u, protocol" 1432 " greater than MaxXmitDataSegmentLength: %u, protocol"
1443 " error.\n", payload_length, 1433 " error.\n", payload_length,
1444 conn->conn_ops->MaxRecvDataSegmentLength); 1434 conn->conn_ops->MaxXmitDataSegmentLength);
1445 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1435 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1446 buf, conn); 1436 buf, conn);
1447 } 1437 }
1448 1438
1449 pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%09x," 1439 pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%09x,"
1450 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n", 1440 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
1451 (hdr->itt == 0xFFFFFFFF) ? "Response" : "Request", 1441 hdr->itt == RESERVED_ITT ? "Response" : "Request",
1452 hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn, 1442 hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn,
1453 payload_length); 1443 payload_length);
1454 /* 1444 /*
@@ -1458,7 +1448,7 @@ static int iscsit_handle_nop_out(
1458 * Either way, make sure we allocate an struct iscsi_cmd, as both 1448 * Either way, make sure we allocate an struct iscsi_cmd, as both
1459 * can contain ping data. 1449 * can contain ping data.
1460 */ 1450 */
1461 if (hdr->ttt == 0xFFFFFFFF) { 1451 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
1462 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1452 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1463 if (!cmd) 1453 if (!cmd)
1464 return iscsit_add_reject( 1454 return iscsit_add_reject(
@@ -1471,12 +1461,12 @@ static int iscsit_handle_nop_out(
1471 1 : 0); 1461 1 : 0);
1472 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 1462 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1473 cmd->targ_xfer_tag = 0xFFFFFFFF; 1463 cmd->targ_xfer_tag = 0xFFFFFFFF;
1474 cmd->cmd_sn = hdr->cmdsn; 1464 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
1475 cmd->exp_stat_sn = hdr->exp_statsn; 1465 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
1476 cmd->data_direction = DMA_NONE; 1466 cmd->data_direction = DMA_NONE;
1477 } 1467 }
1478 1468
1479 if (payload_length && (hdr->ttt == 0xFFFFFFFF)) { 1469 if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
1480 rx_size = payload_length; 1470 rx_size = payload_length;
1481 ping_data = kzalloc(payload_length + 1, GFP_KERNEL); 1471 ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
1482 if (!ping_data) { 1472 if (!ping_data) {
@@ -1556,7 +1546,7 @@ static int iscsit_handle_nop_out(
1556 pr_debug("Ping Data: \"%s\"\n", ping_data); 1546 pr_debug("Ping Data: \"%s\"\n", ping_data);
1557 } 1547 }
1558 1548
1559 if (hdr->itt != 0xFFFFFFFF) { 1549 if (hdr->itt != RESERVED_ITT) {
1560 if (!cmd) { 1550 if (!cmd) {
1561 pr_err("Checking CmdSN for NOPOUT," 1551 pr_err("Checking CmdSN for NOPOUT,"
1562 " but cmd is NULL!\n"); 1552 " but cmd is NULL!\n");
@@ -1569,7 +1559,7 @@ static int iscsit_handle_nop_out(
1569 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 1559 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1570 spin_unlock_bh(&conn->cmd_lock); 1560 spin_unlock_bh(&conn->cmd_lock);
1571 1561
1572 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 1562 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1573 1563
1574 if (hdr->opcode & ISCSI_OP_IMMEDIATE) { 1564 if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
1575 iscsit_add_cmd_to_response_queue(cmd, conn, 1565 iscsit_add_cmd_to_response_queue(cmd, conn,
@@ -1590,11 +1580,11 @@ static int iscsit_handle_nop_out(
1590 return 0; 1580 return 0;
1591 } 1581 }
1592 1582
1593 if (hdr->ttt != 0xFFFFFFFF) { 1583 if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
1594 /* 1584 /*
1595 * This was a response to a unsolicited NOPIN ping. 1585 * This was a response to a unsolicited NOPIN ping.
1596 */ 1586 */
1597 cmd = iscsit_find_cmd_from_ttt(conn, hdr->ttt); 1587 cmd = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt));
1598 if (!cmd) 1588 if (!cmd)
1599 return -1; 1589 return -1;
1600 1590
@@ -1639,12 +1629,6 @@ static int iscsit_handle_task_mgt_cmd(
1639 u8 function; 1629 u8 function;
1640 1630
1641 hdr = (struct iscsi_tm *) buf; 1631 hdr = (struct iscsi_tm *) buf;
1642 hdr->itt = be32_to_cpu(hdr->itt);
1643 hdr->rtt = be32_to_cpu(hdr->rtt);
1644 hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
1645 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
1646 hdr->refcmdsn = be32_to_cpu(hdr->refcmdsn);
1647 hdr->exp_datasn = be32_to_cpu(hdr->exp_datasn);
1648 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; 1632 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
1649 function = hdr->flags; 1633 function = hdr->flags;
1650 1634
@@ -1655,9 +1639,9 @@ static int iscsit_handle_task_mgt_cmd(
1655 1639
1656 if ((function != ISCSI_TM_FUNC_ABORT_TASK) && 1640 if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
1657 ((function != ISCSI_TM_FUNC_TASK_REASSIGN) && 1641 ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
1658 (hdr->rtt != ISCSI_RESERVED_TAG))) { 1642 hdr->rtt != RESERVED_ITT)) {
1659 pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n"); 1643 pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n");
1660 hdr->rtt = ISCSI_RESERVED_TAG; 1644 hdr->rtt = RESERVED_ITT;
1661 } 1645 }
1662 1646
1663 if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) && 1647 if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) &&
@@ -1669,8 +1653,8 @@ static int iscsit_handle_task_mgt_cmd(
1669 buf, conn); 1653 buf, conn);
1670 } 1654 }
1671 if ((function != ISCSI_TM_FUNC_ABORT_TASK) && 1655 if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
1672 (hdr->refcmdsn != ISCSI_RESERVED_TAG)) 1656 be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG)
1673 hdr->refcmdsn = ISCSI_RESERVED_TAG; 1657 hdr->refcmdsn = cpu_to_be32(ISCSI_RESERVED_TAG);
1674 1658
1675 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1659 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1676 if (!cmd) 1660 if (!cmd)
@@ -1700,7 +1684,7 @@ static int iscsit_handle_task_mgt_cmd(
1700 transport_init_se_cmd(&cmd->se_cmd, 1684 transport_init_se_cmd(&cmd->se_cmd,
1701 &lio_target_fabric_configfs->tf_ops, 1685 &lio_target_fabric_configfs->tf_ops,
1702 conn->sess->se_sess, 0, DMA_NONE, 1686 conn->sess->se_sess, 0, DMA_NONE,
1703 MSG_SIMPLE_TAG, &cmd->sense_buffer[0]); 1687 MSG_SIMPLE_TAG, cmd->sense_buffer + 2);
1704 1688
1705 switch (function) { 1689 switch (function) {
1706 case ISCSI_TM_FUNC_ABORT_TASK: 1690 case ISCSI_TM_FUNC_ABORT_TASK:
@@ -1747,8 +1731,8 @@ static int iscsit_handle_task_mgt_cmd(
1747 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 1731 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
1748 cmd->init_task_tag = hdr->itt; 1732 cmd->init_task_tag = hdr->itt;
1749 cmd->targ_xfer_tag = 0xFFFFFFFF; 1733 cmd->targ_xfer_tag = 0xFFFFFFFF;
1750 cmd->cmd_sn = hdr->cmdsn; 1734 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
1751 cmd->exp_stat_sn = hdr->exp_statsn; 1735 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
1752 se_tmr = cmd->se_cmd.se_tmr_req; 1736 se_tmr = cmd->se_cmd.se_tmr_req;
1753 tmr_req = cmd->tmr_req; 1737 tmr_req = cmd->tmr_req;
1754 /* 1738 /*
@@ -1832,7 +1816,7 @@ attach:
1832 ISCSI_REASON_PROTOCOL_ERROR, 1816 ISCSI_REASON_PROTOCOL_ERROR,
1833 1, 0, buf, cmd); 1817 1, 0, buf, cmd);
1834 } 1818 }
1835 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 1819 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1836 1820
1837 if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE)) 1821 if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE))
1838 return 0; 1822 return 0;
@@ -1869,15 +1853,11 @@ static int iscsit_handle_text_cmd(
1869 1853
1870 hdr = (struct iscsi_text *) buf; 1854 hdr = (struct iscsi_text *) buf;
1871 payload_length = ntoh24(hdr->dlength); 1855 payload_length = ntoh24(hdr->dlength);
1872 hdr->itt = be32_to_cpu(hdr->itt);
1873 hdr->ttt = be32_to_cpu(hdr->ttt);
1874 hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
1875 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
1876 1856
1877 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) { 1857 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1878 pr_err("Unable to accept text parameter length: %u" 1858 pr_err("Unable to accept text parameter length: %u"
1879 "greater than MaxRecvDataSegmentLength %u.\n", 1859 "greater than MaxXmitDataSegmentLength %u.\n",
1880 payload_length, conn->conn_ops->MaxRecvDataSegmentLength); 1860 payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
1881 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1861 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1882 buf, conn); 1862 buf, conn);
1883 } 1863 }
@@ -1989,15 +1969,15 @@ static int iscsit_handle_text_cmd(
1989 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 1969 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
1990 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 1970 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1991 cmd->targ_xfer_tag = 0xFFFFFFFF; 1971 cmd->targ_xfer_tag = 0xFFFFFFFF;
1992 cmd->cmd_sn = hdr->cmdsn; 1972 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
1993 cmd->exp_stat_sn = hdr->exp_statsn; 1973 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
1994 cmd->data_direction = DMA_NONE; 1974 cmd->data_direction = DMA_NONE;
1995 1975
1996 spin_lock_bh(&conn->cmd_lock); 1976 spin_lock_bh(&conn->cmd_lock);
1997 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 1977 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1998 spin_unlock_bh(&conn->cmd_lock); 1978 spin_unlock_bh(&conn->cmd_lock);
1999 1979
2000 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 1980 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2001 1981
2002 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1982 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2003 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1983 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
@@ -2131,10 +2111,6 @@ static int iscsit_handle_logout_cmd(
2131 2111
2132 hdr = (struct iscsi_logout *) buf; 2112 hdr = (struct iscsi_logout *) buf;
2133 reason_code = (hdr->flags & 0x7f); 2113 reason_code = (hdr->flags & 0x7f);
2134 hdr->itt = be32_to_cpu(hdr->itt);
2135 hdr->cid = be16_to_cpu(hdr->cid);
2136 hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
2137 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
2138 2114
2139 if (tiqn) { 2115 if (tiqn) {
2140 spin_lock(&tiqn->logout_stats.lock); 2116 spin_lock(&tiqn->logout_stats.lock);
@@ -2166,9 +2142,9 @@ static int iscsit_handle_logout_cmd(
2166 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 2142 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2167 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 2143 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
2168 cmd->targ_xfer_tag = 0xFFFFFFFF; 2144 cmd->targ_xfer_tag = 0xFFFFFFFF;
2169 cmd->cmd_sn = hdr->cmdsn; 2145 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
2170 cmd->exp_stat_sn = hdr->exp_statsn; 2146 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
2171 cmd->logout_cid = hdr->cid; 2147 cmd->logout_cid = be16_to_cpu(hdr->cid);
2172 cmd->logout_reason = reason_code; 2148 cmd->logout_reason = reason_code;
2173 cmd->data_direction = DMA_NONE; 2149 cmd->data_direction = DMA_NONE;
2174 2150
@@ -2178,7 +2154,7 @@ static int iscsit_handle_logout_cmd(
2178 */ 2154 */
2179 if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) || 2155 if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) ||
2180 ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) && 2156 ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) &&
2181 (hdr->cid == conn->cid))) 2157 be16_to_cpu(hdr->cid) == conn->cid))
2182 logout_remove = 1; 2158 logout_remove = 1;
2183 2159
2184 spin_lock_bh(&conn->cmd_lock); 2160 spin_lock_bh(&conn->cmd_lock);
@@ -2186,7 +2162,7 @@ static int iscsit_handle_logout_cmd(
2186 spin_unlock_bh(&conn->cmd_lock); 2162 spin_unlock_bh(&conn->cmd_lock);
2187 2163
2188 if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY) 2164 if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY)
2189 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 2165 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2190 2166
2191 /* 2167 /*
2192 * Immediate commands are executed, well, immediately. 2168 * Immediate commands are executed, well, immediately.
@@ -2219,11 +2195,6 @@ static int iscsit_handle_snack(
2219 2195
2220 hdr = (struct iscsi_snack *) buf; 2196 hdr = (struct iscsi_snack *) buf;
2221 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; 2197 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
2222 hdr->itt = be32_to_cpu(hdr->itt);
2223 hdr->ttt = be32_to_cpu(hdr->ttt);
2224 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
2225 hdr->begrun = be32_to_cpu(hdr->begrun);
2226 hdr->runlength = be32_to_cpu(hdr->runlength);
2227 2198
2228 pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:" 2199 pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:"
2229 " 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x," 2200 " 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x,"
@@ -2243,13 +2214,18 @@ static int iscsit_handle_snack(
2243 switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) { 2214 switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) {
2244 case 0: 2215 case 0:
2245 return iscsit_handle_recovery_datain_or_r2t(conn, buf, 2216 return iscsit_handle_recovery_datain_or_r2t(conn, buf,
2246 hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength); 2217 hdr->itt,
2218 be32_to_cpu(hdr->ttt),
2219 be32_to_cpu(hdr->begrun),
2220 be32_to_cpu(hdr->runlength));
2247 case ISCSI_FLAG_SNACK_TYPE_STATUS: 2221 case ISCSI_FLAG_SNACK_TYPE_STATUS:
2248 return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt, 2222 return iscsit_handle_status_snack(conn, hdr->itt,
2249 hdr->begrun, hdr->runlength); 2223 be32_to_cpu(hdr->ttt),
2224 be32_to_cpu(hdr->begrun), be32_to_cpu(hdr->runlength));
2250 case ISCSI_FLAG_SNACK_TYPE_DATA_ACK: 2225 case ISCSI_FLAG_SNACK_TYPE_DATA_ACK:
2251 return iscsit_handle_data_ack(conn, hdr->ttt, hdr->begrun, 2226 return iscsit_handle_data_ack(conn, be32_to_cpu(hdr->ttt),
2252 hdr->runlength); 2227 be32_to_cpu(hdr->begrun),
2228 be32_to_cpu(hdr->runlength));
2253 case ISCSI_FLAG_SNACK_TYPE_RDATA: 2229 case ISCSI_FLAG_SNACK_TYPE_RDATA:
2254 /* FIXME: Support R-Data SNACK */ 2230 /* FIXME: Support R-Data SNACK */
2255 pr_err("R-Data SNACK Not Supported.\n"); 2231 pr_err("R-Data SNACK Not Supported.\n");
@@ -2414,7 +2390,7 @@ static int iscsit_send_conn_drop_async_message(
2414 hdr = (struct iscsi_async *) cmd->pdu; 2390 hdr = (struct iscsi_async *) cmd->pdu;
2415 hdr->opcode = ISCSI_OP_ASYNC_EVENT; 2391 hdr->opcode = ISCSI_OP_ASYNC_EVENT;
2416 hdr->flags = ISCSI_FLAG_CMD_FINAL; 2392 hdr->flags = ISCSI_FLAG_CMD_FINAL;
2417 cmd->init_task_tag = 0xFFFFFFFF; 2393 cmd->init_task_tag = RESERVED_ITT;
2418 cmd->targ_xfer_tag = 0xFFFFFFFF; 2394 cmd->targ_xfer_tag = 0xFFFFFFFF;
2419 put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]); 2395 put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]);
2420 cmd->stat_sn = conn->stat_sn++; 2396 cmd->stat_sn = conn->stat_sn++;
@@ -2536,12 +2512,17 @@ static int iscsit_send_data_in(
2536 else 2512 else
2537 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); 2513 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2538 2514
2539 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2515 hdr->itt = cmd->init_task_tag;
2540 hdr->ttt = (hdr->flags & ISCSI_FLAG_DATA_ACK) ? 2516
2541 cpu_to_be32(cmd->targ_xfer_tag) : 2517 if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2542 0xFFFFFFFF; 2518 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2543 hdr->statsn = (set_statsn) ? cpu_to_be32(cmd->stat_sn) : 2519 else
2544 0xFFFFFFFF; 2520 hdr->ttt = cpu_to_be32(0xFFFFFFFF);
2521 if (set_statsn)
2522 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2523 else
2524 hdr->statsn = cpu_to_be32(0xFFFFFFFF);
2525
2545 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2526 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2546 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2527 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2547 hdr->datasn = cpu_to_be32(datain.data_sn); 2528 hdr->datasn = cpu_to_be32(datain.data_sn);
@@ -2708,7 +2689,7 @@ static int iscsit_send_logout_response(
2708 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 2689 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
2709 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2690 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2710 hdr->response = cmd->logout_response; 2691 hdr->response = cmd->logout_response;
2711 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2692 hdr->itt = cmd->init_task_tag;
2712 cmd->stat_sn = conn->stat_sn++; 2693 cmd->stat_sn = conn->stat_sn++;
2713 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2694 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2714 2695
@@ -2759,7 +2740,7 @@ static int iscsit_send_unsolicited_nopin(
2759 memset(hdr, 0, ISCSI_HDR_LEN); 2740 memset(hdr, 0, ISCSI_HDR_LEN);
2760 hdr->opcode = ISCSI_OP_NOOP_IN; 2741 hdr->opcode = ISCSI_OP_NOOP_IN;
2761 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2742 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2762 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2743 hdr->itt = cmd->init_task_tag;
2763 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 2744 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2764 cmd->stat_sn = conn->stat_sn; 2745 cmd->stat_sn = conn->stat_sn;
2765 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2746 hdr->statsn = cpu_to_be32(cmd->stat_sn);
@@ -2816,7 +2797,7 @@ static int iscsit_send_nopin_response(
2816 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2797 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2817 hton24(hdr->dlength, cmd->buf_ptr_size); 2798 hton24(hdr->dlength, cmd->buf_ptr_size);
2818 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); 2799 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2819 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2800 hdr->itt = cmd->init_task_tag;
2820 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 2801 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2821 cmd->stat_sn = conn->stat_sn++; 2802 cmd->stat_sn = conn->stat_sn++;
2822 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2803 hdr->statsn = cpu_to_be32(cmd->stat_sn);
@@ -2906,7 +2887,7 @@ static int iscsit_send_r2t(
2906 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2887 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2907 int_to_scsilun(cmd->se_cmd.orig_fe_lun, 2888 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
2908 (struct scsi_lun *)&hdr->lun); 2889 (struct scsi_lun *)&hdr->lun);
2909 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2890 hdr->itt = cmd->init_task_tag;
2910 spin_lock_bh(&conn->sess->ttt_lock); 2891 spin_lock_bh(&conn->sess->ttt_lock);
2911 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++; 2892 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
2912 if (r2t->targ_xfer_tag == 0xFFFFFFFF) 2893 if (r2t->targ_xfer_tag == 0xFFFFFFFF)
@@ -3074,7 +3055,7 @@ static int iscsit_send_status(
3074 } 3055 }
3075 hdr->response = cmd->iscsi_response; 3056 hdr->response = cmd->iscsi_response;
3076 hdr->cmd_status = cmd->se_cmd.scsi_status; 3057 hdr->cmd_status = cmd->se_cmd.scsi_status;
3077 hdr->itt = cpu_to_be32(cmd->init_task_tag); 3058 hdr->itt = cmd->init_task_tag;
3078 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3059 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3079 3060
3080 iscsit_increment_maxcmdsn(cmd, conn->sess); 3061 iscsit_increment_maxcmdsn(cmd, conn->sess);
@@ -3092,15 +3073,18 @@ static int iscsit_send_status(
3092 if (cmd->se_cmd.sense_buffer && 3073 if (cmd->se_cmd.sense_buffer &&
3093 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 3074 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
3094 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 3075 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
3076 put_unaligned_be16(cmd->se_cmd.scsi_sense_length, cmd->sense_buffer);
3077 cmd->se_cmd.scsi_sense_length += sizeof (__be16);
3078
3095 padding = -(cmd->se_cmd.scsi_sense_length) & 3; 3079 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
3096 hton24(hdr->dlength, cmd->se_cmd.scsi_sense_length); 3080 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
3097 iov[iov_count].iov_base = cmd->se_cmd.sense_buffer; 3081 iov[iov_count].iov_base = cmd->sense_buffer;
3098 iov[iov_count++].iov_len = 3082 iov[iov_count++].iov_len =
3099 (cmd->se_cmd.scsi_sense_length + padding); 3083 (cmd->se_cmd.scsi_sense_length + padding);
3100 tx_size += cmd->se_cmd.scsi_sense_length; 3084 tx_size += cmd->se_cmd.scsi_sense_length;
3101 3085
3102 if (padding) { 3086 if (padding) {
3103 memset(cmd->se_cmd.sense_buffer + 3087 memset(cmd->sense_buffer +
3104 cmd->se_cmd.scsi_sense_length, 0, padding); 3088 cmd->se_cmd.scsi_sense_length, 0, padding);
3105 tx_size += padding; 3089 tx_size += padding;
3106 pr_debug("Adding %u bytes of padding to" 3090 pr_debug("Adding %u bytes of padding to"
@@ -3109,7 +3093,7 @@ static int iscsit_send_status(
3109 3093
3110 if (conn->conn_ops->DataDigest) { 3094 if (conn->conn_ops->DataDigest) {
3111 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3095 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3112 cmd->se_cmd.sense_buffer, 3096 cmd->sense_buffer,
3113 (cmd->se_cmd.scsi_sense_length + padding), 3097 (cmd->se_cmd.scsi_sense_length + padding),
3114 0, NULL, (u8 *)&cmd->data_crc); 3098 0, NULL, (u8 *)&cmd->data_crc);
3115 3099
@@ -3184,7 +3168,7 @@ static int iscsit_send_task_mgt_rsp(
3184 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 3168 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
3185 hdr->flags = ISCSI_FLAG_CMD_FINAL; 3169 hdr->flags = ISCSI_FLAG_CMD_FINAL;
3186 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr); 3170 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
3187 hdr->itt = cpu_to_be32(cmd->init_task_tag); 3171 hdr->itt = cmd->init_task_tag;
3188 cmd->stat_sn = conn->stat_sn++; 3172 cmd->stat_sn = conn->stat_sn++;
3189 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3173 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3190 3174
@@ -3236,7 +3220,7 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np)
3236 struct sockaddr_in * sock_in = 3220 struct sockaddr_in * sock_in =
3237 (struct sockaddr_in *)&np->np_sockaddr; 3221 (struct sockaddr_in *)&np->np_sockaddr;
3238 3222
3239 if (sock_in->sin_addr.s_addr == INADDR_ANY) 3223 if (sock_in->sin_addr.s_addr == htonl(INADDR_ANY))
3240 ret = true; 3224 ret = true;
3241 } 3225 }
3242 3226
@@ -3271,7 +3255,6 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3271 len += 1; 3255 len += 1;
3272 3256
3273 if ((len + payload_len) > buffer_len) { 3257 if ((len + payload_len) > buffer_len) {
3274 spin_unlock(&tiqn->tiqn_tpg_lock);
3275 end_of_buf = 1; 3258 end_of_buf = 1;
3276 goto eob; 3259 goto eob;
3277 } 3260 }
@@ -3358,7 +3341,7 @@ static int iscsit_send_text_rsp(
3358 hdr->opcode = ISCSI_OP_TEXT_RSP; 3341 hdr->opcode = ISCSI_OP_TEXT_RSP;
3359 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3342 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3360 hton24(hdr->dlength, text_length); 3343 hton24(hdr->dlength, text_length);
3361 hdr->itt = cpu_to_be32(cmd->init_task_tag); 3344 hdr->itt = cmd->init_task_tag;
3362 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 3345 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
3363 cmd->stat_sn = conn->stat_sn++; 3346 cmd->stat_sn = conn->stat_sn++;
3364 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3347 hdr->statsn = cpu_to_be32(cmd->stat_sn);
@@ -3424,6 +3407,7 @@ static int iscsit_send_reject(
3424 hdr->opcode = ISCSI_OP_REJECT; 3407 hdr->opcode = ISCSI_OP_REJECT;
3425 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3408 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3426 hton24(hdr->dlength, ISCSI_HDR_LEN); 3409 hton24(hdr->dlength, ISCSI_HDR_LEN);
3410 hdr->ffffffff = cpu_to_be32(0xffffffff);
3427 cmd->stat_sn = conn->stat_sn++; 3411 cmd->stat_sn = conn->stat_sn++;
3428 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3412 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3429 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3413 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
@@ -3735,7 +3719,9 @@ restart:
3735 */ 3719 */
3736 iscsit_thread_check_cpumask(conn, current, 1); 3720 iscsit_thread_check_cpumask(conn, current, 1);
3737 3721
3738 schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT); 3722 wait_event_interruptible(conn->queues_wq,
3723 !iscsit_conn_all_queues_empty(conn) ||
3724 ts->status == ISCSI_THREAD_SET_RESET);
3739 3725
3740 if ((ts->status == ISCSI_THREAD_SET_RESET) || 3726 if ((ts->status == ISCSI_THREAD_SET_RESET) ||
3741 signal_pending(current)) 3727 signal_pending(current))
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index 12abb4c9e34e..f1e4f3155bac 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -38,4 +38,9 @@ extern struct kmem_cache *lio_cmd_cache;
38extern struct kmem_cache *lio_qr_cache; 38extern struct kmem_cache *lio_qr_cache;
39extern struct kmem_cache *lio_r2t_cache; 39extern struct kmem_cache *lio_r2t_cache;
40 40
41extern struct idr sess_idr;
42extern struct mutex auth_id_lock;
43extern spinlock_t sess_idr_lock;
44
45
41#endif /*** ISCSI_TARGET_H ***/ 46#endif /*** ISCSI_TARGET_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index a7b25e783b58..ff6fd4fb624d 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -135,7 +135,7 @@ static struct configfs_attribute *lio_target_portal_attrs[] = {
135 135
136#define MAX_PORTAL_LEN 256 136#define MAX_PORTAL_LEN 256
137 137
138struct se_tpg_np *lio_target_call_addnptotpg( 138static struct se_tpg_np *lio_target_call_addnptotpg(
139 struct se_portal_group *se_tpg, 139 struct se_portal_group *se_tpg,
140 struct config_group *group, 140 struct config_group *group,
141 const char *name) 141 const char *name)
@@ -1034,6 +1034,9 @@ TPG_PARAM_ATTR(ImmediateData, S_IRUGO | S_IWUSR);
1034DEF_TPG_PARAM(MaxRecvDataSegmentLength); 1034DEF_TPG_PARAM(MaxRecvDataSegmentLength);
1035TPG_PARAM_ATTR(MaxRecvDataSegmentLength, S_IRUGO | S_IWUSR); 1035TPG_PARAM_ATTR(MaxRecvDataSegmentLength, S_IRUGO | S_IWUSR);
1036 1036
1037DEF_TPG_PARAM(MaxXmitDataSegmentLength);
1038TPG_PARAM_ATTR(MaxXmitDataSegmentLength, S_IRUGO | S_IWUSR);
1039
1037DEF_TPG_PARAM(MaxBurstLength); 1040DEF_TPG_PARAM(MaxBurstLength);
1038TPG_PARAM_ATTR(MaxBurstLength, S_IRUGO | S_IWUSR); 1041TPG_PARAM_ATTR(MaxBurstLength, S_IRUGO | S_IWUSR);
1039 1042
@@ -1079,6 +1082,7 @@ static struct configfs_attribute *lio_target_tpg_param_attrs[] = {
1079 &iscsi_tpg_param_InitialR2T.attr, 1082 &iscsi_tpg_param_InitialR2T.attr,
1080 &iscsi_tpg_param_ImmediateData.attr, 1083 &iscsi_tpg_param_ImmediateData.attr,
1081 &iscsi_tpg_param_MaxRecvDataSegmentLength.attr, 1084 &iscsi_tpg_param_MaxRecvDataSegmentLength.attr,
1085 &iscsi_tpg_param_MaxXmitDataSegmentLength.attr,
1082 &iscsi_tpg_param_MaxBurstLength.attr, 1086 &iscsi_tpg_param_MaxBurstLength.attr,
1083 &iscsi_tpg_param_FirstBurstLength.attr, 1087 &iscsi_tpg_param_FirstBurstLength.attr,
1084 &iscsi_tpg_param_DefaultTime2Wait.attr, 1088 &iscsi_tpg_param_DefaultTime2Wait.attr,
@@ -1166,7 +1170,7 @@ static struct configfs_attribute *lio_target_tpg_attrs[] = {
1166 1170
1167/* Start items for lio_target_tiqn_cit */ 1171/* Start items for lio_target_tiqn_cit */
1168 1172
1169struct se_portal_group *lio_target_tiqn_addtpg( 1173static struct se_portal_group *lio_target_tiqn_addtpg(
1170 struct se_wwn *wwn, 1174 struct se_wwn *wwn,
1171 struct config_group *group, 1175 struct config_group *group,
1172 const char *name) 1176 const char *name)
@@ -1216,7 +1220,7 @@ out:
1216 return NULL; 1220 return NULL;
1217} 1221}
1218 1222
1219void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg) 1223static void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
1220{ 1224{
1221 struct iscsi_portal_group *tpg; 1225 struct iscsi_portal_group *tpg;
1222 struct iscsi_tiqn *tiqn; 1226 struct iscsi_tiqn *tiqn;
@@ -1248,7 +1252,7 @@ static struct configfs_attribute *lio_target_wwn_attrs[] = {
1248 NULL, 1252 NULL,
1249}; 1253};
1250 1254
1251struct se_wwn *lio_target_call_coreaddtiqn( 1255static struct se_wwn *lio_target_call_coreaddtiqn(
1252 struct target_fabric_configfs *tf, 1256 struct target_fabric_configfs *tf,
1253 struct config_group *group, 1257 struct config_group *group,
1254 const char *name) 1258 const char *name)
@@ -1296,7 +1300,7 @@ struct se_wwn *lio_target_call_coreaddtiqn(
1296 return &tiqn->tiqn_wwn; 1300 return &tiqn->tiqn_wwn;
1297} 1301}
1298 1302
1299void lio_target_call_coredeltiqn( 1303static void lio_target_call_coredeltiqn(
1300 struct se_wwn *wwn) 1304 struct se_wwn *wwn)
1301{ 1305{
1302 struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn); 1306 struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
@@ -1471,7 +1475,8 @@ static u32 iscsi_get_task_tag(struct se_cmd *se_cmd)
1471{ 1475{
1472 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1476 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1473 1477
1474 return cmd->init_task_tag; 1478 /* only used for printks or comparism with ->ref_task_tag */
1479 return (__force u32)cmd->init_task_tag;
1475} 1480}
1476 1481
1477static int iscsi_get_cmd_state(struct se_cmd *se_cmd) 1482static int iscsi_get_cmd_state(struct se_cmd *se_cmd)
@@ -1542,29 +1547,6 @@ static int lio_queue_status(struct se_cmd *se_cmd)
1542 return 0; 1547 return 0;
1543} 1548}
1544 1549
1545static u16 lio_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
1546{
1547 unsigned char *buffer = se_cmd->sense_buffer;
1548 /*
1549 * From RFC-3720 10.4.7. Data Segment - Sense and Response Data Segment
1550 * 16-bit SenseLength.
1551 */
1552 buffer[0] = ((sense_length >> 8) & 0xff);
1553 buffer[1] = (sense_length & 0xff);
1554 /*
1555 * Return two byte offset into allocated sense_buffer.
1556 */
1557 return 2;
1558}
1559
1560static u16 lio_get_fabric_sense_len(void)
1561{
1562 /*
1563 * Return two byte offset into allocated sense_buffer.
1564 */
1565 return 2;
1566}
1567
1568static int lio_queue_tm_rsp(struct se_cmd *se_cmd) 1550static int lio_queue_tm_rsp(struct se_cmd *se_cmd)
1569{ 1551{
1570 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1552 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
@@ -1748,8 +1730,6 @@ int iscsi_target_register_configfs(void)
1748 fabric->tf_ops.queue_data_in = &lio_queue_data_in; 1730 fabric->tf_ops.queue_data_in = &lio_queue_data_in;
1749 fabric->tf_ops.queue_status = &lio_queue_status; 1731 fabric->tf_ops.queue_status = &lio_queue_status;
1750 fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp; 1732 fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp;
1751 fabric->tf_ops.set_fabric_sense_len = &lio_set_fabric_sense_len;
1752 fabric->tf_ops.get_fabric_sense_len = &lio_get_fabric_sense_len;
1753 /* 1733 /*
1754 * Setup function pointers for generic logic in target_core_fabric_configfs.c 1734 * Setup function pointers for generic logic in target_core_fabric_configfs.c
1755 */ 1735 */
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 8a908b28d8b2..21048dbf7d13 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -25,10 +25,10 @@
25#define NA_DATAOUT_TIMEOUT_RETRIES 5 25#define NA_DATAOUT_TIMEOUT_RETRIES 5
26#define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15 26#define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
27#define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1 27#define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
28#define NA_NOPIN_TIMEOUT 5 28#define NA_NOPIN_TIMEOUT 15
29#define NA_NOPIN_TIMEOUT_MAX 60 29#define NA_NOPIN_TIMEOUT_MAX 60
30#define NA_NOPIN_TIMEOUT_MIN 3 30#define NA_NOPIN_TIMEOUT_MIN 3
31#define NA_NOPIN_RESPONSE_TIMEOUT 5 31#define NA_NOPIN_RESPONSE_TIMEOUT 30
32#define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60 32#define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60
33#define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3 33#define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3
34#define NA_RANDOM_DATAIN_PDU_OFFSETS 0 34#define NA_RANDOM_DATAIN_PDU_OFFSETS 0
@@ -239,6 +239,7 @@ struct iscsi_conn_ops {
239 u8 HeaderDigest; /* [0,1] == [None,CRC32C] */ 239 u8 HeaderDigest; /* [0,1] == [None,CRC32C] */
240 u8 DataDigest; /* [0,1] == [None,CRC32C] */ 240 u8 DataDigest; /* [0,1] == [None,CRC32C] */
241 u32 MaxRecvDataSegmentLength; /* [512..2**24-1] */ 241 u32 MaxRecvDataSegmentLength; /* [512..2**24-1] */
242 u32 MaxXmitDataSegmentLength; /* [512..2**24-1] */
242 u8 OFMarker; /* [0,1] == [No,Yes] */ 243 u8 OFMarker; /* [0,1] == [No,Yes] */
243 u8 IFMarker; /* [0,1] == [No,Yes] */ 244 u8 IFMarker; /* [0,1] == [No,Yes] */
244 u32 OFMarkInt; /* [1..65535] */ 245 u32 OFMarkInt; /* [1..65535] */
@@ -360,7 +361,7 @@ struct iscsi_cmd {
360 /* Command flags */ 361 /* Command flags */
361 enum cmd_flags_table cmd_flags; 362 enum cmd_flags_table cmd_flags;
362 /* Initiator Task Tag assigned from Initiator */ 363 /* Initiator Task Tag assigned from Initiator */
363 u32 init_task_tag; 364 itt_t init_task_tag;
364 /* Target Transfer Tag assigned from Target */ 365 /* Target Transfer Tag assigned from Target */
365 u32 targ_xfer_tag; 366 u32 targ_xfer_tag;
366 /* CmdSN assigned from Initiator */ 367 /* CmdSN assigned from Initiator */
@@ -478,7 +479,6 @@ struct iscsi_cmd {
478 479
479struct iscsi_tmr_req { 480struct iscsi_tmr_req {
480 bool task_reassign:1; 481 bool task_reassign:1;
481 u32 ref_cmd_sn;
482 u32 exp_data_sn; 482 u32 exp_data_sn;
483 struct iscsi_cmd *ref_cmd; 483 struct iscsi_cmd *ref_cmd;
484 struct iscsi_conn_recovery *conn_recovery; 484 struct iscsi_conn_recovery *conn_recovery;
@@ -486,6 +486,7 @@ struct iscsi_tmr_req {
486}; 486};
487 487
488struct iscsi_conn { 488struct iscsi_conn {
489 wait_queue_head_t queues_wq;
489 /* Authentication Successful for this connection */ 490 /* Authentication Successful for this connection */
490 u8 auth_complete; 491 u8 auth_complete;
491 /* State connection is currently in */ 492 /* State connection is currently in */
@@ -505,7 +506,7 @@ struct iscsi_conn {
505 u32 auth_id; 506 u32 auth_id;
506 u32 conn_flags; 507 u32 conn_flags;
507 /* Used for iscsi_tx_login_rsp() */ 508 /* Used for iscsi_tx_login_rsp() */
508 u32 login_itt; 509 itt_t login_itt;
509 u32 exp_statsn; 510 u32 exp_statsn;
510 /* Per connection status sequence number */ 511 /* Per connection status sequence number */
511 u32 stat_sn; 512 u32 stat_sn;
@@ -578,6 +579,7 @@ struct iscsi_conn_recovery {
578 u16 cid; 579 u16 cid;
579 u32 cmd_count; 580 u32 cmd_count;
580 u32 maxrecvdatasegmentlength; 581 u32 maxrecvdatasegmentlength;
582 u32 maxxmitdatasegmentlength;
581 int ready_for_reallegiance; 583 int ready_for_reallegiance;
582 struct list_head conn_recovery_cmd_list; 584 struct list_head conn_recovery_cmd_list;
583 spinlock_t conn_recovery_cmd_lock; 585 spinlock_t conn_recovery_cmd_lock;
@@ -597,7 +599,7 @@ struct iscsi_session {
597 /* state session is currently in */ 599 /* state session is currently in */
598 u32 session_state; 600 u32 session_state;
599 /* session wide counter: initiator assigned task tag */ 601 /* session wide counter: initiator assigned task tag */
600 u32 init_task_tag; 602 itt_t init_task_tag;
601 /* session wide counter: target assigned task tag */ 603 /* session wide counter: target assigned task tag */
602 u32 targ_xfer_tag; 604 u32 targ_xfer_tag;
603 u32 cmdsn_window; 605 u32 cmdsn_window;
@@ -663,7 +665,7 @@ struct iscsi_login {
663 u8 version_max; 665 u8 version_max;
664 char isid[6]; 666 char isid[6];
665 u32 cmd_sn; 667 u32 cmd_sn;
666 u32 init_task_tag; 668 itt_t init_task_tag;
667 u32 initial_exp_statsn; 669 u32 initial_exp_statsn;
668 u32 rsp_length; 670 u32 rsp_length;
669 u16 cid; 671 u16 cid;
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 1a02016ecdab..8aacf611b86d 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -48,9 +48,9 @@ void iscsit_set_dataout_sequence_values(
48 if (cmd->unsolicited_data) { 48 if (cmd->unsolicited_data) {
49 cmd->seq_start_offset = cmd->write_data_done; 49 cmd->seq_start_offset = cmd->write_data_done;
50 cmd->seq_end_offset = (cmd->write_data_done + 50 cmd->seq_end_offset = (cmd->write_data_done +
51 (cmd->se_cmd.data_length > 51 ((cmd->se_cmd.data_length >
52 conn->sess->sess_ops->FirstBurstLength) ? 52 conn->sess->sess_ops->FirstBurstLength) ?
53 conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length); 53 conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length));
54 return; 54 return;
55 } 55 }
56 56
@@ -95,14 +95,15 @@ static int iscsit_dataout_within_command_recovery_check(
95 */ 95 */
96 if (conn->sess->sess_ops->DataSequenceInOrder) { 96 if (conn->sess->sess_ops->DataSequenceInOrder) {
97 if ((cmd->cmd_flags & ICF_WITHIN_COMMAND_RECOVERY) && 97 if ((cmd->cmd_flags & ICF_WITHIN_COMMAND_RECOVERY) &&
98 (cmd->write_data_done != hdr->offset)) 98 cmd->write_data_done != be32_to_cpu(hdr->offset))
99 goto dump; 99 goto dump;
100 100
101 cmd->cmd_flags &= ~ICF_WITHIN_COMMAND_RECOVERY; 101 cmd->cmd_flags &= ~ICF_WITHIN_COMMAND_RECOVERY;
102 } else { 102 } else {
103 struct iscsi_seq *seq; 103 struct iscsi_seq *seq;
104 104
105 seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length); 105 seq = iscsit_get_seq_holder(cmd, be32_to_cpu(hdr->offset),
106 payload_length);
106 if (!seq) 107 if (!seq)
107 return DATAOUT_CANNOT_RECOVER; 108 return DATAOUT_CANNOT_RECOVER;
108 /* 109 /*
@@ -111,15 +112,15 @@ static int iscsit_dataout_within_command_recovery_check(
111 cmd->seq_ptr = seq; 112 cmd->seq_ptr = seq;
112 113
113 if (conn->sess->sess_ops->DataPDUInOrder) { 114 if (conn->sess->sess_ops->DataPDUInOrder) {
114 if ((seq->status == 115 if (seq->status ==
115 DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) && 116 DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY &&
116 ((seq->offset != hdr->offset) || 117 (seq->offset != be32_to_cpu(hdr->offset) ||
117 (seq->data_sn != hdr->datasn))) 118 seq->data_sn != be32_to_cpu(hdr->datasn)))
118 goto dump; 119 goto dump;
119 } else { 120 } else {
120 if ((seq->status == 121 if (seq->status ==
121 DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) && 122 DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY &&
122 (seq->data_sn != hdr->datasn)) 123 seq->data_sn != be32_to_cpu(hdr->datasn))
123 goto dump; 124 goto dump;
124 } 125 }
125 126
@@ -148,12 +149,12 @@ static int iscsit_dataout_check_unsolicited_sequence(
148 u32 payload_length = ntoh24(hdr->dlength); 149 u32 payload_length = ntoh24(hdr->dlength);
149 150
150 151
151 if ((hdr->offset < cmd->seq_start_offset) || 152 if ((be32_to_cpu(hdr->offset) < cmd->seq_start_offset) ||
152 ((hdr->offset + payload_length) > cmd->seq_end_offset)) { 153 ((be32_to_cpu(hdr->offset) + payload_length) > cmd->seq_end_offset)) {
153 pr_err("Command ITT: 0x%08x with Offset: %u," 154 pr_err("Command ITT: 0x%08x with Offset: %u,"
154 " Length: %u outside of Unsolicited Sequence %u:%u while" 155 " Length: %u outside of Unsolicited Sequence %u:%u while"
155 " DataSequenceInOrder=Yes.\n", cmd->init_task_tag, 156 " DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
156 hdr->offset, payload_length, cmd->seq_start_offset, 157 be32_to_cpu(hdr->offset), payload_length, cmd->seq_start_offset,
157 cmd->seq_end_offset); 158 cmd->seq_end_offset);
158 return DATAOUT_CANNOT_RECOVER; 159 return DATAOUT_CANNOT_RECOVER;
159 } 160 }
@@ -236,12 +237,12 @@ static int iscsit_dataout_check_sequence(
236 * fullfilling an Recovery R2T, it's best to just dump the 237 * fullfilling an Recovery R2T, it's best to just dump the
237 * payload here, instead of erroring out. 238 * payload here, instead of erroring out.
238 */ 239 */
239 if ((hdr->offset < cmd->seq_start_offset) || 240 if ((be32_to_cpu(hdr->offset) < cmd->seq_start_offset) ||
240 ((hdr->offset + payload_length) > cmd->seq_end_offset)) { 241 ((be32_to_cpu(hdr->offset) + payload_length) > cmd->seq_end_offset)) {
241 pr_err("Command ITT: 0x%08x with Offset: %u," 242 pr_err("Command ITT: 0x%08x with Offset: %u,"
242 " Length: %u outside of Sequence %u:%u while" 243 " Length: %u outside of Sequence %u:%u while"
243 " DataSequenceInOrder=Yes.\n", cmd->init_task_tag, 244 " DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
244 hdr->offset, payload_length, cmd->seq_start_offset, 245 be32_to_cpu(hdr->offset), payload_length, cmd->seq_start_offset,
245 cmd->seq_end_offset); 246 cmd->seq_end_offset);
246 247
247 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) 248 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
@@ -251,7 +252,8 @@ static int iscsit_dataout_check_sequence(
251 252
252 next_burst_len = (cmd->next_burst_len + payload_length); 253 next_burst_len = (cmd->next_burst_len + payload_length);
253 } else { 254 } else {
254 seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length); 255 seq = iscsit_get_seq_holder(cmd, be32_to_cpu(hdr->offset),
256 payload_length);
255 if (!seq) 257 if (!seq)
256 return DATAOUT_CANNOT_RECOVER; 258 return DATAOUT_CANNOT_RECOVER;
257 /* 259 /*
@@ -366,16 +368,16 @@ static int iscsit_dataout_check_datasn(
366 data_sn = seq->data_sn; 368 data_sn = seq->data_sn;
367 } 369 }
368 370
369 if (hdr->datasn > data_sn) { 371 if (be32_to_cpu(hdr->datasn) > data_sn) {
370 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x" 372 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
371 " higher than expected 0x%08x.\n", cmd->init_task_tag, 373 " higher than expected 0x%08x.\n", cmd->init_task_tag,
372 hdr->datasn, data_sn); 374 be32_to_cpu(hdr->datasn), data_sn);
373 recovery = 1; 375 recovery = 1;
374 goto recover; 376 goto recover;
375 } else if (hdr->datasn < data_sn) { 377 } else if (be32_to_cpu(hdr->datasn) < data_sn) {
376 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x" 378 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
377 " lower than expected 0x%08x, discarding payload.\n", 379 " lower than expected 0x%08x, discarding payload.\n",
378 cmd->init_task_tag, hdr->datasn, data_sn); 380 cmd->init_task_tag, be32_to_cpu(hdr->datasn), data_sn);
379 dump = 1; 381 dump = 1;
380 goto dump; 382 goto dump;
381 } 383 }
@@ -415,26 +417,27 @@ static int iscsit_dataout_pre_datapduinorder_yes(
415 * error has occured and fail the connection. 417 * error has occured and fail the connection.
416 */ 418 */
417 if (conn->sess->sess_ops->DataSequenceInOrder) { 419 if (conn->sess->sess_ops->DataSequenceInOrder) {
418 if (hdr->offset != cmd->write_data_done) { 420 if (be32_to_cpu(hdr->offset) != cmd->write_data_done) {
419 pr_err("Command ITT: 0x%08x, received offset" 421 pr_err("Command ITT: 0x%08x, received offset"
420 " %u different than expected %u.\n", cmd->init_task_tag, 422 " %u different than expected %u.\n", cmd->init_task_tag,
421 hdr->offset, cmd->write_data_done); 423 be32_to_cpu(hdr->offset), cmd->write_data_done);
422 recovery = 1; 424 recovery = 1;
423 goto recover; 425 goto recover;
424 } 426 }
425 } else { 427 } else {
426 struct iscsi_seq *seq = cmd->seq_ptr; 428 struct iscsi_seq *seq = cmd->seq_ptr;
427 429
428 if (hdr->offset > seq->offset) { 430 if (be32_to_cpu(hdr->offset) > seq->offset) {
429 pr_err("Command ITT: 0x%08x, received offset" 431 pr_err("Command ITT: 0x%08x, received offset"
430 " %u greater than expected %u.\n", cmd->init_task_tag, 432 " %u greater than expected %u.\n", cmd->init_task_tag,
431 hdr->offset, seq->offset); 433 be32_to_cpu(hdr->offset), seq->offset);
432 recovery = 1; 434 recovery = 1;
433 goto recover; 435 goto recover;
434 } else if (hdr->offset < seq->offset) { 436 } else if (be32_to_cpu(hdr->offset) < seq->offset) {
435 pr_err("Command ITT: 0x%08x, received offset" 437 pr_err("Command ITT: 0x%08x, received offset"
436 " %u less than expected %u, discarding payload.\n", 438 " %u less than expected %u, discarding payload.\n",
437 cmd->init_task_tag, hdr->offset, seq->offset); 439 cmd->init_task_tag, be32_to_cpu(hdr->offset),
440 seq->offset);
438 dump = 1; 441 dump = 1;
439 goto dump; 442 goto dump;
440 } 443 }
@@ -453,7 +456,7 @@ dump:
453 return DATAOUT_CANNOT_RECOVER; 456 return DATAOUT_CANNOT_RECOVER;
454 457
455 return (recovery) ? iscsit_recover_dataout_sequence(cmd, 458 return (recovery) ? iscsit_recover_dataout_sequence(cmd,
456 hdr->offset, payload_length) : 459 be32_to_cpu(hdr->offset), payload_length) :
457 (dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : DATAOUT_NORMAL; 460 (dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : DATAOUT_NORMAL;
458} 461}
459 462
@@ -465,7 +468,8 @@ static int iscsit_dataout_pre_datapduinorder_no(
465 struct iscsi_data *hdr = (struct iscsi_data *) buf; 468 struct iscsi_data *hdr = (struct iscsi_data *) buf;
466 u32 payload_length = ntoh24(hdr->dlength); 469 u32 payload_length = ntoh24(hdr->dlength);
467 470
468 pdu = iscsit_get_pdu_holder(cmd, hdr->offset, payload_length); 471 pdu = iscsit_get_pdu_holder(cmd, be32_to_cpu(hdr->offset),
472 payload_length);
469 if (!pdu) 473 if (!pdu)
470 return DATAOUT_CANNOT_RECOVER; 474 return DATAOUT_CANNOT_RECOVER;
471 475
@@ -479,7 +483,7 @@ static int iscsit_dataout_pre_datapduinorder_no(
479 case ISCSI_PDU_RECEIVED_OK: 483 case ISCSI_PDU_RECEIVED_OK:
480 pr_err("Command ITT: 0x%08x received already gotten" 484 pr_err("Command ITT: 0x%08x received already gotten"
481 " Offset: %u, Length: %u\n", cmd->init_task_tag, 485 " Offset: %u, Length: %u\n", cmd->init_task_tag,
482 hdr->offset, payload_length); 486 be32_to_cpu(hdr->offset), payload_length);
483 return iscsit_dump_data_payload(cmd->conn, payload_length, 1); 487 return iscsit_dump_data_payload(cmd->conn, payload_length, 1);
484 default: 488 default:
485 return DATAOUT_CANNOT_RECOVER; 489 return DATAOUT_CANNOT_RECOVER;
@@ -553,7 +557,7 @@ static int iscsit_dataout_post_crc_passed(
553 if (cmd->unsolicited_data) { 557 if (cmd->unsolicited_data) {
554 if ((cmd->first_burst_len + payload_length) == 558 if ((cmd->first_burst_len + payload_length) ==
555 conn->sess->sess_ops->FirstBurstLength) { 559 conn->sess->sess_ops->FirstBurstLength) {
556 if (iscsit_dataout_update_r2t(cmd, hdr->offset, 560 if (iscsit_dataout_update_r2t(cmd, be32_to_cpu(hdr->offset),
557 payload_length) < 0) 561 payload_length) < 0)
558 return DATAOUT_CANNOT_RECOVER; 562 return DATAOUT_CANNOT_RECOVER;
559 send_r2t = 1; 563 send_r2t = 1;
@@ -561,7 +565,8 @@ static int iscsit_dataout_post_crc_passed(
561 565
562 if (!conn->sess->sess_ops->DataPDUInOrder) { 566 if (!conn->sess->sess_ops->DataPDUInOrder) {
563 ret = iscsit_dataout_update_datapduinorder_no(cmd, 567 ret = iscsit_dataout_update_datapduinorder_no(cmd,
564 hdr->datasn, (hdr->flags & ISCSI_FLAG_CMD_FINAL)); 568 be32_to_cpu(hdr->datasn),
569 (hdr->flags & ISCSI_FLAG_CMD_FINAL));
565 if (ret == DATAOUT_CANNOT_RECOVER) 570 if (ret == DATAOUT_CANNOT_RECOVER)
566 return ret; 571 return ret;
567 } 572 }
@@ -586,7 +591,8 @@ static int iscsit_dataout_post_crc_passed(
586 if (conn->sess->sess_ops->DataSequenceInOrder) { 591 if (conn->sess->sess_ops->DataSequenceInOrder) {
587 if ((cmd->next_burst_len + payload_length) == 592 if ((cmd->next_burst_len + payload_length) ==
588 conn->sess->sess_ops->MaxBurstLength) { 593 conn->sess->sess_ops->MaxBurstLength) {
589 if (iscsit_dataout_update_r2t(cmd, hdr->offset, 594 if (iscsit_dataout_update_r2t(cmd,
595 be32_to_cpu(hdr->offset),
590 payload_length) < 0) 596 payload_length) < 0)
591 return DATAOUT_CANNOT_RECOVER; 597 return DATAOUT_CANNOT_RECOVER;
592 send_r2t = 1; 598 send_r2t = 1;
@@ -594,7 +600,7 @@ static int iscsit_dataout_post_crc_passed(
594 600
595 if (!conn->sess->sess_ops->DataPDUInOrder) { 601 if (!conn->sess->sess_ops->DataPDUInOrder) {
596 ret = iscsit_dataout_update_datapduinorder_no( 602 ret = iscsit_dataout_update_datapduinorder_no(
597 cmd, hdr->datasn, 603 cmd, be32_to_cpu(hdr->datasn),
598 (hdr->flags & ISCSI_FLAG_CMD_FINAL)); 604 (hdr->flags & ISCSI_FLAG_CMD_FINAL));
599 if (ret == DATAOUT_CANNOT_RECOVER) 605 if (ret == DATAOUT_CANNOT_RECOVER)
600 return ret; 606 return ret;
@@ -610,7 +616,8 @@ static int iscsit_dataout_post_crc_passed(
610 616
611 if ((seq->next_burst_len + payload_length) == 617 if ((seq->next_burst_len + payload_length) ==
612 seq->xfer_len) { 618 seq->xfer_len) {
613 if (iscsit_dataout_update_r2t(cmd, hdr->offset, 619 if (iscsit_dataout_update_r2t(cmd,
620 be32_to_cpu(hdr->offset),
614 payload_length) < 0) 621 payload_length) < 0)
615 return DATAOUT_CANNOT_RECOVER; 622 return DATAOUT_CANNOT_RECOVER;
616 send_r2t = 1; 623 send_r2t = 1;
@@ -618,7 +625,7 @@ static int iscsit_dataout_post_crc_passed(
618 625
619 if (!conn->sess->sess_ops->DataPDUInOrder) { 626 if (!conn->sess->sess_ops->DataPDUInOrder) {
620 ret = iscsit_dataout_update_datapduinorder_no( 627 ret = iscsit_dataout_update_datapduinorder_no(
621 cmd, hdr->datasn, 628 cmd, be32_to_cpu(hdr->datasn),
622 (hdr->flags & ISCSI_FLAG_CMD_FINAL)); 629 (hdr->flags & ISCSI_FLAG_CMD_FINAL));
623 if (ret == DATAOUT_CANNOT_RECOVER) 630 if (ret == DATAOUT_CANNOT_RECOVER)
624 return ret; 631 return ret;
@@ -678,14 +685,15 @@ static int iscsit_dataout_post_crc_failed(
678 } 685 }
679 686
680recover: 687recover:
681 return iscsit_recover_dataout_sequence(cmd, hdr->offset, payload_length); 688 return iscsit_recover_dataout_sequence(cmd, be32_to_cpu(hdr->offset),
689 payload_length);
682} 690}
683 691
684/* 692/*
685 * Called from iscsit_handle_data_out() before DataOUT Payload is received 693 * Called from iscsit_handle_data_out() before DataOUT Payload is received
686 * and CRC computed. 694 * and CRC computed.
687 */ 695 */
688extern int iscsit_check_pre_dataout( 696int iscsit_check_pre_dataout(
689 struct iscsi_cmd *cmd, 697 struct iscsi_cmd *cmd,
690 unsigned char *buf) 698 unsigned char *buf)
691{ 699{
@@ -789,7 +797,7 @@ static void iscsit_handle_time2retain_timeout(unsigned long data)
789 target_put_session(sess->se_sess); 797 target_put_session(sess->se_sess);
790} 798}
791 799
792extern void iscsit_start_time2retain_handler(struct iscsi_session *sess) 800void iscsit_start_time2retain_handler(struct iscsi_session *sess)
793{ 801{
794 int tpg_active; 802 int tpg_active;
795 /* 803 /*
@@ -822,7 +830,7 @@ extern void iscsit_start_time2retain_handler(struct iscsi_session *sess)
822/* 830/*
823 * Called with spin_lock_bh(&struct se_portal_group->session_lock) held 831 * Called with spin_lock_bh(&struct se_portal_group->session_lock) held
824 */ 832 */
825extern int iscsit_stop_time2retain_timer(struct iscsi_session *sess) 833int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
826{ 834{
827 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); 835 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
828 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 836 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
@@ -926,7 +934,7 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
926 } 934 }
927} 935}
928 936
929extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) 937void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
930{ 938{
931 spin_lock_bh(&conn->state_lock); 939 spin_lock_bh(&conn->state_lock);
932 if (atomic_read(&conn->connection_exit)) { 940 if (atomic_read(&conn->connection_exit)) {
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 3df8a2cef86f..21f29d91a8cb 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -466,7 +466,7 @@ static int iscsit_handle_recovery_datain(
466int iscsit_handle_recovery_datain_or_r2t( 466int iscsit_handle_recovery_datain_or_r2t(
467 struct iscsi_conn *conn, 467 struct iscsi_conn *conn,
468 unsigned char *buf, 468 unsigned char *buf,
469 u32 init_task_tag, 469 itt_t init_task_tag,
470 u32 targ_xfer_tag, 470 u32 targ_xfer_tag,
471 u32 begrun, 471 u32 begrun,
472 u32 runlength) 472 u32 runlength)
@@ -498,7 +498,7 @@ int iscsit_handle_recovery_datain_or_r2t(
498/* #warning FIXME: Status SNACK needs to be dependent on OPCODE!!! */ 498/* #warning FIXME: Status SNACK needs to be dependent on OPCODE!!! */
499int iscsit_handle_status_snack( 499int iscsit_handle_status_snack(
500 struct iscsi_conn *conn, 500 struct iscsi_conn *conn,
501 u32 init_task_tag, 501 itt_t init_task_tag,
502 u32 targ_xfer_tag, 502 u32 targ_xfer_tag,
503 u32 begrun, 503 u32 begrun,
504 u32 runlength) 504 u32 runlength)
diff --git a/drivers/target/iscsi/iscsi_target_erl1.h b/drivers/target/iscsi/iscsi_target_erl1.h
index 85e67e29de6b..2a3ebf118a34 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.h
+++ b/drivers/target/iscsi/iscsi_target_erl1.h
@@ -7,8 +7,8 @@ extern int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
7extern int iscsit_create_recovery_datain_values_datasequenceinorder_no( 7extern int iscsit_create_recovery_datain_values_datasequenceinorder_no(
8 struct iscsi_cmd *, struct iscsi_datain_req *); 8 struct iscsi_cmd *, struct iscsi_datain_req *);
9extern int iscsit_handle_recovery_datain_or_r2t(struct iscsi_conn *, unsigned char *, 9extern int iscsit_handle_recovery_datain_or_r2t(struct iscsi_conn *, unsigned char *,
10 u32, u32, u32, u32); 10 itt_t, u32, u32, u32);
11extern int iscsit_handle_status_snack(struct iscsi_conn *, u32, u32, 11extern int iscsit_handle_status_snack(struct iscsi_conn *, itt_t, u32,
12 u32, u32); 12 u32, u32);
13extern int iscsit_handle_data_ack(struct iscsi_conn *, u32, u32, u32); 13extern int iscsit_handle_data_ack(struct iscsi_conn *, u32, u32, u32);
14extern int iscsit_dataout_datapduinorder_no_fbit(struct iscsi_cmd *, struct iscsi_pdu *); 14extern int iscsit_dataout_datapduinorder_no_fbit(struct iscsi_cmd *, struct iscsi_pdu *);
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 65aac14fd831..17d8c20094fd 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -36,7 +36,7 @@
36 */ 36 */
37void iscsit_create_conn_recovery_datain_values( 37void iscsit_create_conn_recovery_datain_values(
38 struct iscsi_cmd *cmd, 38 struct iscsi_cmd *cmd,
39 u32 exp_data_sn) 39 __be32 exp_data_sn)
40{ 40{
41 u32 data_sn = 0; 41 u32 data_sn = 0;
42 struct iscsi_conn *conn = cmd->conn; 42 struct iscsi_conn *conn = cmd->conn;
@@ -44,7 +44,7 @@ void iscsit_create_conn_recovery_datain_values(
44 cmd->next_burst_len = 0; 44 cmd->next_burst_len = 0;
45 cmd->read_data_done = 0; 45 cmd->read_data_done = 0;
46 46
47 while (exp_data_sn > data_sn) { 47 while (be32_to_cpu(exp_data_sn) > data_sn) {
48 if ((cmd->next_burst_len + 48 if ((cmd->next_burst_len +
49 conn->conn_ops->MaxRecvDataSegmentLength) < 49 conn->conn_ops->MaxRecvDataSegmentLength) <
50 conn->sess->sess_ops->MaxBurstLength) { 50 conn->sess->sess_ops->MaxBurstLength) {
@@ -193,15 +193,13 @@ int iscsit_remove_active_connection_recovery_entry(
193 return 0; 193 return 0;
194} 194}
195 195
196int iscsit_remove_inactive_connection_recovery_entry( 196static void iscsit_remove_inactive_connection_recovery_entry(
197 struct iscsi_conn_recovery *cr, 197 struct iscsi_conn_recovery *cr,
198 struct iscsi_session *sess) 198 struct iscsi_session *sess)
199{ 199{
200 spin_lock(&sess->cr_i_lock); 200 spin_lock(&sess->cr_i_lock);
201 list_del(&cr->cr_list); 201 list_del(&cr->cr_list);
202 spin_unlock(&sess->cr_i_lock); 202 spin_unlock(&sess->cr_i_lock);
203
204 return 0;
205} 203}
206 204
207/* 205/*
@@ -421,6 +419,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
421 cr->cid = conn->cid; 419 cr->cid = conn->cid;
422 cr->cmd_count = cmd_count; 420 cr->cmd_count = cmd_count;
423 cr->maxrecvdatasegmentlength = conn->conn_ops->MaxRecvDataSegmentLength; 421 cr->maxrecvdatasegmentlength = conn->conn_ops->MaxRecvDataSegmentLength;
422 cr->maxxmitdatasegmentlength = conn->conn_ops->MaxXmitDataSegmentLength;
424 cr->sess = conn->sess; 423 cr->sess = conn->sess;
425 424
426 iscsit_attach_inactive_connection_recovery_entry(conn->sess, cr); 425 iscsit_attach_inactive_connection_recovery_entry(conn->sess, cr);
diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h
index 22f8d24780a6..63f2501f3fe0 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.h
+++ b/drivers/target/iscsi/iscsi_target_erl2.h
@@ -1,7 +1,7 @@
1#ifndef ISCSI_TARGET_ERL2_H 1#ifndef ISCSI_TARGET_ERL2_H
2#define ISCSI_TARGET_ERL2_H 2#define ISCSI_TARGET_ERL2_H
3 3
4extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, u32); 4extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, __be32);
5extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *); 5extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
6extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry( 6extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
7 struct iscsi_session *, u16); 7 struct iscsi_session *, u16);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 6aba4395e8d8..f8dbec05d5e5 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -39,12 +39,9 @@
39#include "iscsi_target.h" 39#include "iscsi_target.h"
40#include "iscsi_target_parameters.h" 40#include "iscsi_target_parameters.h"
41 41
42extern struct idr sess_idr;
43extern struct mutex auth_id_lock;
44extern spinlock_t sess_idr_lock;
45
46static int iscsi_login_init_conn(struct iscsi_conn *conn) 42static int iscsi_login_init_conn(struct iscsi_conn *conn)
47{ 43{
44 init_waitqueue_head(&conn->queues_wq);
48 INIT_LIST_HEAD(&conn->conn_list); 45 INIT_LIST_HEAD(&conn->conn_list);
49 INIT_LIST_HEAD(&conn->conn_cmd_list); 46 INIT_LIST_HEAD(&conn->conn_cmd_list);
50 INIT_LIST_HEAD(&conn->immed_queue_list); 47 INIT_LIST_HEAD(&conn->immed_queue_list);
@@ -196,10 +193,10 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
196static void iscsi_login_set_conn_values( 193static void iscsi_login_set_conn_values(
197 struct iscsi_session *sess, 194 struct iscsi_session *sess,
198 struct iscsi_conn *conn, 195 struct iscsi_conn *conn,
199 u16 cid) 196 __be16 cid)
200{ 197{
201 conn->sess = sess; 198 conn->sess = sess;
202 conn->cid = cid; 199 conn->cid = be16_to_cpu(cid);
203 /* 200 /*
204 * Generate a random Status sequence number (statsn) for the new 201 * Generate a random Status sequence number (statsn) for the new
205 * iSCSI connection. 202 * iSCSI connection.
@@ -234,7 +231,7 @@ static int iscsi_login_zero_tsih_s1(
234 iscsi_login_set_conn_values(sess, conn, pdu->cid); 231 iscsi_login_set_conn_values(sess, conn, pdu->cid);
235 sess->init_task_tag = pdu->itt; 232 sess->init_task_tag = pdu->itt;
236 memcpy(&sess->isid, pdu->isid, 6); 233 memcpy(&sess->isid, pdu->isid, 6);
237 sess->exp_cmd_sn = pdu->cmdsn; 234 sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn);
238 INIT_LIST_HEAD(&sess->sess_conn_list); 235 INIT_LIST_HEAD(&sess->sess_conn_list);
239 INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list); 236 INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list);
240 INIT_LIST_HEAD(&sess->cr_active_list); 237 INIT_LIST_HEAD(&sess->cr_active_list);
@@ -275,7 +272,7 @@ static int iscsi_login_zero_tsih_s1(
275 * The FFP CmdSN window values will be allocated from the TPG's 272 * The FFP CmdSN window values will be allocated from the TPG's
276 * Initiator Node's ACL once the login has been successfully completed. 273 * Initiator Node's ACL once the login has been successfully completed.
277 */ 274 */
278 sess->max_cmd_sn = pdu->cmdsn; 275 sess->max_cmd_sn = be32_to_cpu(pdu->cmdsn);
279 276
280 sess->sess_ops = kzalloc(sizeof(struct iscsi_sess_ops), GFP_KERNEL); 277 sess->sess_ops = kzalloc(sizeof(struct iscsi_sess_ops), GFP_KERNEL);
281 if (!sess->sess_ops) { 278 if (!sess->sess_ops) {
@@ -453,7 +450,7 @@ static int iscsi_login_non_zero_tsih_s2(
453 (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) 450 (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
454 continue; 451 continue;
455 if (!memcmp(sess_p->isid, pdu->isid, 6) && 452 if (!memcmp(sess_p->isid, pdu->isid, 6) &&
456 (sess_p->tsih == pdu->tsih)) { 453 (sess_p->tsih == be16_to_cpu(pdu->tsih))) {
457 iscsit_inc_session_usage_count(sess_p); 454 iscsit_inc_session_usage_count(sess_p);
458 iscsit_stop_time2retain_timer(sess_p); 455 iscsit_stop_time2retain_timer(sess_p);
459 sess = sess_p; 456 sess = sess_p;
@@ -955,11 +952,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
955 } 952 }
956 953
957 pdu = (struct iscsi_login_req *) buffer; 954 pdu = (struct iscsi_login_req *) buffer;
958 pdu->cid = be16_to_cpu(pdu->cid); 955
959 pdu->tsih = be16_to_cpu(pdu->tsih);
960 pdu->itt = be32_to_cpu(pdu->itt);
961 pdu->cmdsn = be32_to_cpu(pdu->cmdsn);
962 pdu->exp_statsn = be32_to_cpu(pdu->exp_statsn);
963 /* 956 /*
964 * Used by iscsit_tx_login_rsp() for Login Resonses PDUs 957 * Used by iscsit_tx_login_rsp() for Login Resonses PDUs
965 * when Status-Class != 0. 958 * when Status-Class != 0.
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 2dba448cac19..e9053a04f24c 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -44,7 +44,7 @@ void convert_null_to_semi(char *buf, int len)
44 buf[i] = ';'; 44 buf[i] = ';';
45} 45}
46 46
47int strlen_semi(char *buf) 47static int strlen_semi(char *buf)
48{ 48{
49 int i = 0; 49 int i = 0;
50 50
@@ -339,14 +339,14 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
339 hton24(login_rsp->dlength, login->rsp_length); 339 hton24(login_rsp->dlength, login->rsp_length);
340 memcpy(login_rsp->isid, login->isid, 6); 340 memcpy(login_rsp->isid, login->isid, 6);
341 login_rsp->tsih = cpu_to_be16(login->tsih); 341 login_rsp->tsih = cpu_to_be16(login->tsih);
342 login_rsp->itt = cpu_to_be32(login->init_task_tag); 342 login_rsp->itt = login->init_task_tag;
343 login_rsp->statsn = cpu_to_be32(conn->stat_sn++); 343 login_rsp->statsn = cpu_to_be32(conn->stat_sn++);
344 login_rsp->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 344 login_rsp->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
345 login_rsp->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 345 login_rsp->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
346 346
347 pr_debug("Sending Login Response, Flags: 0x%02x, ITT: 0x%08x," 347 pr_debug("Sending Login Response, Flags: 0x%02x, ITT: 0x%08x,"
348 " ExpCmdSN; 0x%08x, MaxCmdSN: 0x%08x, StatSN: 0x%08x, Length:" 348 " ExpCmdSN; 0x%08x, MaxCmdSN: 0x%08x, StatSN: 0x%08x, Length:"
349 " %u\n", login_rsp->flags, ntohl(login_rsp->itt), 349 " %u\n", login_rsp->flags, (__force u32)login_rsp->itt,
350 ntohl(login_rsp->exp_cmdsn), ntohl(login_rsp->max_cmdsn), 350 ntohl(login_rsp->exp_cmdsn), ntohl(login_rsp->max_cmdsn),
351 ntohl(login_rsp->statsn), login->rsp_length); 351 ntohl(login_rsp->statsn), login->rsp_length);
352 352
@@ -360,12 +360,9 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
360 return -1; 360 return -1;
361 361
362 login->rsp_length = 0; 362 login->rsp_length = 0;
363 login_rsp->tsih = be16_to_cpu(login_rsp->tsih);
364 login_rsp->itt = be32_to_cpu(login_rsp->itt);
365 login_rsp->statsn = be32_to_cpu(login_rsp->statsn);
366 mutex_lock(&sess->cmdsn_mutex); 363 mutex_lock(&sess->cmdsn_mutex);
367 login_rsp->exp_cmdsn = be32_to_cpu(sess->exp_cmd_sn); 364 login_rsp->exp_cmdsn = cpu_to_be32(sess->exp_cmd_sn);
368 login_rsp->max_cmdsn = be32_to_cpu(sess->max_cmd_sn); 365 login_rsp->max_cmdsn = cpu_to_be32(sess->max_cmd_sn);
369 mutex_unlock(&sess->cmdsn_mutex); 366 mutex_unlock(&sess->cmdsn_mutex);
370 367
371 return 0; 368 return 0;
@@ -381,11 +378,6 @@ static int iscsi_target_do_rx_login_io(struct iscsi_conn *conn, struct iscsi_log
381 378
382 login_req = (struct iscsi_login_req *) login->req; 379 login_req = (struct iscsi_login_req *) login->req;
383 payload_length = ntoh24(login_req->dlength); 380 payload_length = ntoh24(login_req->dlength);
384 login_req->tsih = be16_to_cpu(login_req->tsih);
385 login_req->itt = be32_to_cpu(login_req->itt);
386 login_req->cid = be16_to_cpu(login_req->cid);
387 login_req->cmdsn = be32_to_cpu(login_req->cmdsn);
388 login_req->exp_statsn = be32_to_cpu(login_req->exp_statsn);
389 381
390 pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x," 382 pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
391 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n", 383 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
@@ -550,7 +542,7 @@ static int iscsi_target_handle_csg_zero(
550 SENDER_INITIATOR|SENDER_RECEIVER, 542 SENDER_INITIATOR|SENDER_RECEIVER,
551 login->req_buf, 543 login->req_buf,
552 payload_length, 544 payload_length,
553 conn->param_list); 545 conn);
554 if (ret < 0) 546 if (ret < 0)
555 return -1; 547 return -1;
556 548
@@ -627,7 +619,7 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
627 SENDER_INITIATOR|SENDER_RECEIVER, 619 SENDER_INITIATOR|SENDER_RECEIVER,
628 login->req_buf, 620 login->req_buf,
629 payload_length, 621 payload_length,
630 conn->param_list); 622 conn);
631 if (ret < 0) 623 if (ret < 0)
632 return -1; 624 return -1;
633 625
@@ -762,11 +754,11 @@ static int iscsi_target_locate_portal(
762 login->version_min = login_req->min_version; 754 login->version_min = login_req->min_version;
763 login->version_max = login_req->max_version; 755 login->version_max = login_req->max_version;
764 memcpy(login->isid, login_req->isid, 6); 756 memcpy(login->isid, login_req->isid, 6);
765 login->cmd_sn = login_req->cmdsn; 757 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
766 login->init_task_tag = login_req->itt; 758 login->init_task_tag = login_req->itt;
767 login->initial_exp_statsn = login_req->exp_statsn; 759 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
768 login->cid = login_req->cid; 760 login->cid = be16_to_cpu(login_req->cid);
769 login->tsih = login_req->tsih; 761 login->tsih = be16_to_cpu(login_req->tsih);
770 762
771 if (iscsi_target_get_initial_payload(conn, login) < 0) 763 if (iscsi_target_get_initial_payload(conn, login) < 0)
772 return -1; 764 return -1;
@@ -1000,7 +992,6 @@ struct iscsi_login *iscsi_target_init_negotiation(
1000 * Locates Target Portal from NP -> Target IQN 992 * Locates Target Portal from NP -> Target IQN
1001 */ 993 */
1002 if (iscsi_target_locate_portal(np, conn, login) < 0) { 994 if (iscsi_target_locate_portal(np, conn, login) < 0) {
1003 pr_err("iSCSI Login negotiation failed.\n");
1004 goto out; 995 goto out;
1005 } 996 }
1006 997
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 240f7aa76ed1..90b740048f26 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -334,6 +334,13 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
334 if (!param) 334 if (!param)
335 goto out; 335 goto out;
336 336
337 param = iscsi_set_default_param(pl, MAXXMITDATASEGMENTLENGTH,
338 INITIAL_MAXXMITDATASEGMENTLENGTH,
339 PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
340 TYPERANGE_512_TO_16777215, USE_ALL);
341 if (!param)
342 goto out;
343
337 param = iscsi_set_default_param(pl, MAXRECVDATASEGMENTLENGTH, 344 param = iscsi_set_default_param(pl, MAXRECVDATASEGMENTLENGTH,
338 INITIAL_MAXRECVDATASEGMENTLENGTH, 345 INITIAL_MAXRECVDATASEGMENTLENGTH,
339 PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH, 346 PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
@@ -467,6 +474,8 @@ int iscsi_set_keys_to_negotiate(
467 SET_PSTATE_NEGOTIATE(param); 474 SET_PSTATE_NEGOTIATE(param);
468 } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) { 475 } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
469 SET_PSTATE_NEGOTIATE(param); 476 SET_PSTATE_NEGOTIATE(param);
477 } else if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) {
478 continue;
470 } else if (!strcmp(param->name, MAXBURSTLENGTH)) { 479 } else if (!strcmp(param->name, MAXBURSTLENGTH)) {
471 SET_PSTATE_NEGOTIATE(param); 480 SET_PSTATE_NEGOTIATE(param);
472 } else if (!strcmp(param->name, FIRSTBURSTLENGTH)) { 481 } else if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
@@ -1056,7 +1065,8 @@ out:
1056 return proposer_values; 1065 return proposer_values;
1057} 1066}
1058 1067
1059static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value) 1068static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value,
1069 struct iscsi_conn *conn)
1060{ 1070{
1061 u8 acceptor_boolean_value = 0, proposer_boolean_value = 0; 1071 u8 acceptor_boolean_value = 0, proposer_boolean_value = 0;
1062 char *negoitated_value = NULL; 1072 char *negoitated_value = NULL;
@@ -1131,8 +1141,35 @@ static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value)
1131 return -1; 1141 return -1;
1132 } 1142 }
1133 1143
1134 if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) 1144 if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
1135 SET_PSTATE_REPLY_OPTIONAL(param); 1145 struct iscsi_param *param_mxdsl;
1146 unsigned long long tmp;
1147 int rc;
1148
1149 rc = strict_strtoull(param->value, 0, &tmp);
1150 if (rc < 0)
1151 return -1;
1152
1153 conn->conn_ops->MaxRecvDataSegmentLength = tmp;
1154 pr_debug("Saving op->MaxRecvDataSegmentLength from"
1155 " original initiator received value: %u\n",
1156 conn->conn_ops->MaxRecvDataSegmentLength);
1157
1158 param_mxdsl = iscsi_find_param_from_key(
1159 MAXXMITDATASEGMENTLENGTH,
1160 conn->param_list);
1161 if (!param_mxdsl)
1162 return -1;
1163
1164 rc = iscsi_update_param_value(param,
1165 param_mxdsl->value);
1166 if (rc < 0)
1167 return -1;
1168
1169 pr_debug("Updated %s to target MXDSL value: %s\n",
1170 param->name, param->value);
1171 }
1172
1136 } else if (IS_TYPE_NUMBER_RANGE(param)) { 1173 } else if (IS_TYPE_NUMBER_RANGE(param)) {
1137 negoitated_value = iscsi_get_value_from_number_range( 1174 negoitated_value = iscsi_get_value_from_number_range(
1138 param, value); 1175 param, value);
@@ -1526,8 +1563,9 @@ int iscsi_decode_text_input(
1526 u8 sender, 1563 u8 sender,
1527 char *textbuf, 1564 char *textbuf,
1528 u32 length, 1565 u32 length,
1529 struct iscsi_param_list *param_list) 1566 struct iscsi_conn *conn)
1530{ 1567{
1568 struct iscsi_param_list *param_list = conn->param_list;
1531 char *tmpbuf, *start = NULL, *end = NULL; 1569 char *tmpbuf, *start = NULL, *end = NULL;
1532 1570
1533 tmpbuf = kzalloc(length + 1, GFP_KERNEL); 1571 tmpbuf = kzalloc(length + 1, GFP_KERNEL);
@@ -1585,7 +1623,7 @@ int iscsi_decode_text_input(
1585 } 1623 }
1586 SET_PSTATE_RESPONSE_GOT(param); 1624 SET_PSTATE_RESPONSE_GOT(param);
1587 } else { 1625 } else {
1588 if (iscsi_check_acceptor_state(param, value) < 0) { 1626 if (iscsi_check_acceptor_state(param, value, conn) < 0) {
1589 kfree(tmpbuf); 1627 kfree(tmpbuf);
1590 return -1; 1628 return -1;
1591 } 1629 }
@@ -1720,6 +1758,18 @@ void iscsi_set_connection_parameters(
1720 pr_debug("---------------------------------------------------" 1758 pr_debug("---------------------------------------------------"
1721 "---------------\n"); 1759 "---------------\n");
1722 list_for_each_entry(param, &param_list->param_list, p_list) { 1760 list_for_each_entry(param, &param_list->param_list, p_list) {
1761 /*
1762 * Special case to set MAXXMITDATASEGMENTLENGTH from the
1763 * target requested MaxRecvDataSegmentLength, even though
1764 * this key is not sent over the wire.
1765 */
1766 if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) {
1767 ops->MaxXmitDataSegmentLength =
1768 simple_strtoul(param->value, &tmpptr, 0);
1769 pr_debug("MaxXmitDataSegmentLength: %s\n",
1770 param->value);
1771 }
1772
1723 if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param)) 1773 if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param))
1724 continue; 1774 continue;
1725 if (!strcmp(param->name, AUTHMETHOD)) { 1775 if (!strcmp(param->name, AUTHMETHOD)) {
@@ -1734,10 +1784,13 @@ void iscsi_set_connection_parameters(
1734 pr_debug("DataDigest: %s\n", 1784 pr_debug("DataDigest: %s\n",
1735 param->value); 1785 param->value);
1736 } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) { 1786 } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
1737 ops->MaxRecvDataSegmentLength = 1787 /*
1738 simple_strtoul(param->value, &tmpptr, 0); 1788 * At this point iscsi_check_acceptor_state() will have
1739 pr_debug("MaxRecvDataSegmentLength: %s\n", 1789 * set ops->MaxRecvDataSegmentLength from the original
1740 param->value); 1790 * initiator provided value.
1791 */
1792 pr_debug("MaxRecvDataSegmentLength: %u\n",
1793 ops->MaxRecvDataSegmentLength);
1741 } else if (!strcmp(param->name, OFMARKER)) { 1794 } else if (!strcmp(param->name, OFMARKER)) {
1742 ops->OFMarker = !strcmp(param->value, YES); 1795 ops->OFMarker = !strcmp(param->value, YES);
1743 pr_debug("OFMarker: %s\n", 1796 pr_debug("OFMarker: %s\n",
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index 6a37fd6f1285..1e1b7504a76b 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -36,7 +36,7 @@ extern void iscsi_release_param_list(struct iscsi_param_list *);
36extern struct iscsi_param *iscsi_find_param_from_key(char *, struct iscsi_param_list *); 36extern struct iscsi_param *iscsi_find_param_from_key(char *, struct iscsi_param_list *);
37extern int iscsi_extract_key_value(char *, char **, char **); 37extern int iscsi_extract_key_value(char *, char **, char **);
38extern int iscsi_update_param_value(struct iscsi_param *, char *); 38extern int iscsi_update_param_value(struct iscsi_param *, char *);
39extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_param_list *); 39extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_conn *);
40extern int iscsi_encode_text_output(u8, u8, char *, u32 *, 40extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
41 struct iscsi_param_list *); 41 struct iscsi_param_list *);
42extern int iscsi_check_negotiated_keys(struct iscsi_param_list *); 42extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
@@ -70,6 +70,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
70#define INITIALR2T "InitialR2T" 70#define INITIALR2T "InitialR2T"
71#define IMMEDIATEDATA "ImmediateData" 71#define IMMEDIATEDATA "ImmediateData"
72#define MAXRECVDATASEGMENTLENGTH "MaxRecvDataSegmentLength" 72#define MAXRECVDATASEGMENTLENGTH "MaxRecvDataSegmentLength"
73#define MAXXMITDATASEGMENTLENGTH "MaxXmitDataSegmentLength"
73#define MAXBURSTLENGTH "MaxBurstLength" 74#define MAXBURSTLENGTH "MaxBurstLength"
74#define FIRSTBURSTLENGTH "FirstBurstLength" 75#define FIRSTBURSTLENGTH "FirstBurstLength"
75#define DEFAULTTIME2WAIT "DefaultTime2Wait" 76#define DEFAULTTIME2WAIT "DefaultTime2Wait"
@@ -113,6 +114,10 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
113#define INITIAL_INITIALR2T YES 114#define INITIAL_INITIALR2T YES
114#define INITIAL_IMMEDIATEDATA YES 115#define INITIAL_IMMEDIATEDATA YES
115#define INITIAL_MAXRECVDATASEGMENTLENGTH "8192" 116#define INITIAL_MAXRECVDATASEGMENTLENGTH "8192"
117/*
118 * Match outgoing MXDSL default to incoming Open-iSCSI default
119 */
120#define INITIAL_MAXXMITDATASEGMENTLENGTH "262144"
116#define INITIAL_MAXBURSTLENGTH "262144" 121#define INITIAL_MAXBURSTLENGTH "262144"
117#define INITIAL_FIRSTBURSTLENGTH "65536" 122#define INITIAL_FIRSTBURSTLENGTH "65536"
118#define INITIAL_DEFAULTTIME2WAIT "2" 123#define INITIAL_DEFAULTTIME2WAIT "2"
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
index 85a306e067ba..edb592a368ef 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -219,8 +219,14 @@ static void iscsit_determine_counts_for_list(
219 int check_immediate = 0; 219 int check_immediate = 0;
220 u32 burstlength = 0, offset = 0; 220 u32 burstlength = 0, offset = 0;
221 u32 unsolicited_data_length = 0; 221 u32 unsolicited_data_length = 0;
222 u32 mdsl;
222 struct iscsi_conn *conn = cmd->conn; 223 struct iscsi_conn *conn = cmd->conn;
223 224
225 if (cmd->se_cmd.data_direction == DMA_TO_DEVICE)
226 mdsl = cmd->conn->conn_ops->MaxXmitDataSegmentLength;
227 else
228 mdsl = cmd->conn->conn_ops->MaxRecvDataSegmentLength;
229
224 if ((bl->type == PDULIST_IMMEDIATE) || 230 if ((bl->type == PDULIST_IMMEDIATE) ||
225 (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED)) 231 (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
226 check_immediate = 1; 232 check_immediate = 1;
@@ -243,14 +249,13 @@ static void iscsit_determine_counts_for_list(
243 continue; 249 continue;
244 } 250 }
245 if (unsolicited_data_length > 0) { 251 if (unsolicited_data_length > 0) {
246 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) 252 if ((offset + mdsl) >= cmd->se_cmd.data_length) {
247 >= cmd->se_cmd.data_length) {
248 unsolicited_data_length -= 253 unsolicited_data_length -=
249 (cmd->se_cmd.data_length - offset); 254 (cmd->se_cmd.data_length - offset);
250 offset += (cmd->se_cmd.data_length - offset); 255 offset += (cmd->se_cmd.data_length - offset);
251 continue; 256 continue;
252 } 257 }
253 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) 258 if ((offset + mdsl)
254 >= conn->sess->sess_ops->FirstBurstLength) { 259 >= conn->sess->sess_ops->FirstBurstLength) {
255 unsolicited_data_length -= 260 unsolicited_data_length -=
256 (conn->sess->sess_ops->FirstBurstLength - 261 (conn->sess->sess_ops->FirstBurstLength -
@@ -262,17 +267,15 @@ static void iscsit_determine_counts_for_list(
262 continue; 267 continue;
263 } 268 }
264 269
265 offset += conn->conn_ops->MaxRecvDataSegmentLength; 270 offset += mdsl;
266 unsolicited_data_length -= 271 unsolicited_data_length -= mdsl;
267 conn->conn_ops->MaxRecvDataSegmentLength;
268 continue; 272 continue;
269 } 273 }
270 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >= 274 if ((offset + mdsl) >= cmd->se_cmd.data_length) {
271 cmd->se_cmd.data_length) {
272 offset += (cmd->se_cmd.data_length - offset); 275 offset += (cmd->se_cmd.data_length - offset);
273 continue; 276 continue;
274 } 277 }
275 if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >= 278 if ((burstlength + mdsl) >=
276 conn->sess->sess_ops->MaxBurstLength) { 279 conn->sess->sess_ops->MaxBurstLength) {
277 offset += (conn->sess->sess_ops->MaxBurstLength - 280 offset += (conn->sess->sess_ops->MaxBurstLength -
278 burstlength); 281 burstlength);
@@ -281,8 +284,8 @@ static void iscsit_determine_counts_for_list(
281 continue; 284 continue;
282 } 285 }
283 286
284 burstlength += conn->conn_ops->MaxRecvDataSegmentLength; 287 burstlength += mdsl;
285 offset += conn->conn_ops->MaxRecvDataSegmentLength; 288 offset += mdsl;
286 } 289 }
287} 290}
288 291
@@ -296,12 +299,17 @@ static int iscsit_do_build_pdu_and_seq_lists(
296 struct iscsi_build_list *bl) 299 struct iscsi_build_list *bl)
297{ 300{
298 int check_immediate = 0, datapduinorder, datasequenceinorder; 301 int check_immediate = 0, datapduinorder, datasequenceinorder;
299 u32 burstlength = 0, offset = 0, i = 0; 302 u32 burstlength = 0, offset = 0, i = 0, mdsl;
300 u32 pdu_count = 0, seq_no = 0, unsolicited_data_length = 0; 303 u32 pdu_count = 0, seq_no = 0, unsolicited_data_length = 0;
301 struct iscsi_conn *conn = cmd->conn; 304 struct iscsi_conn *conn = cmd->conn;
302 struct iscsi_pdu *pdu = cmd->pdu_list; 305 struct iscsi_pdu *pdu = cmd->pdu_list;
303 struct iscsi_seq *seq = cmd->seq_list; 306 struct iscsi_seq *seq = cmd->seq_list;
304 307
308 if (cmd->se_cmd.data_direction == DMA_TO_DEVICE)
309 mdsl = cmd->conn->conn_ops->MaxXmitDataSegmentLength;
310 else
311 mdsl = cmd->conn->conn_ops->MaxRecvDataSegmentLength;
312
305 datapduinorder = conn->sess->sess_ops->DataPDUInOrder; 313 datapduinorder = conn->sess->sess_ops->DataPDUInOrder;
306 datasequenceinorder = conn->sess->sess_ops->DataSequenceInOrder; 314 datasequenceinorder = conn->sess->sess_ops->DataSequenceInOrder;
307 315
@@ -348,9 +356,7 @@ static int iscsit_do_build_pdu_and_seq_lists(
348 continue; 356 continue;
349 } 357 }
350 if (unsolicited_data_length > 0) { 358 if (unsolicited_data_length > 0) {
351 if ((offset + 359 if ((offset + mdsl) >= cmd->se_cmd.data_length) {
352 conn->conn_ops->MaxRecvDataSegmentLength) >=
353 cmd->se_cmd.data_length) {
354 if (!datapduinorder) { 360 if (!datapduinorder) {
355 pdu[i].type = PDUTYPE_UNSOLICITED; 361 pdu[i].type = PDUTYPE_UNSOLICITED;
356 pdu[i].length = 362 pdu[i].length =
@@ -367,8 +373,7 @@ static int iscsit_do_build_pdu_and_seq_lists(
367 offset += (cmd->se_cmd.data_length - offset); 373 offset += (cmd->se_cmd.data_length - offset);
368 continue; 374 continue;
369 } 375 }
370 if ((offset + 376 if ((offset + mdsl) >=
371 conn->conn_ops->MaxRecvDataSegmentLength) >=
372 conn->sess->sess_ops->FirstBurstLength) { 377 conn->sess->sess_ops->FirstBurstLength) {
373 if (!datapduinorder) { 378 if (!datapduinorder) {
374 pdu[i].type = PDUTYPE_UNSOLICITED; 379 pdu[i].type = PDUTYPE_UNSOLICITED;
@@ -396,17 +401,14 @@ static int iscsit_do_build_pdu_and_seq_lists(
396 401
397 if (!datapduinorder) { 402 if (!datapduinorder) {
398 pdu[i].type = PDUTYPE_UNSOLICITED; 403 pdu[i].type = PDUTYPE_UNSOLICITED;
399 pdu[i++].length = 404 pdu[i++].length = mdsl;
400 conn->conn_ops->MaxRecvDataSegmentLength;
401 } 405 }
402 burstlength += conn->conn_ops->MaxRecvDataSegmentLength; 406 burstlength += mdsl;
403 offset += conn->conn_ops->MaxRecvDataSegmentLength; 407 offset += mdsl;
404 unsolicited_data_length -= 408 unsolicited_data_length -= mdsl;
405 conn->conn_ops->MaxRecvDataSegmentLength;
406 continue; 409 continue;
407 } 410 }
408 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >= 411 if ((offset + mdsl) >= cmd->se_cmd.data_length) {
409 cmd->se_cmd.data_length) {
410 if (!datapduinorder) { 412 if (!datapduinorder) {
411 pdu[i].type = PDUTYPE_NORMAL; 413 pdu[i].type = PDUTYPE_NORMAL;
412 pdu[i].length = (cmd->se_cmd.data_length - offset); 414 pdu[i].length = (cmd->se_cmd.data_length - offset);
@@ -420,7 +422,7 @@ static int iscsit_do_build_pdu_and_seq_lists(
420 offset += (cmd->se_cmd.data_length - offset); 422 offset += (cmd->se_cmd.data_length - offset);
421 continue; 423 continue;
422 } 424 }
423 if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >= 425 if ((burstlength + mdsl) >=
424 conn->sess->sess_ops->MaxBurstLength) { 426 conn->sess->sess_ops->MaxBurstLength) {
425 if (!datapduinorder) { 427 if (!datapduinorder) {
426 pdu[i].type = PDUTYPE_NORMAL; 428 pdu[i].type = PDUTYPE_NORMAL;
@@ -445,11 +447,10 @@ static int iscsit_do_build_pdu_and_seq_lists(
445 447
446 if (!datapduinorder) { 448 if (!datapduinorder) {
447 pdu[i].type = PDUTYPE_NORMAL; 449 pdu[i].type = PDUTYPE_NORMAL;
448 pdu[i++].length = 450 pdu[i++].length = mdsl;
449 conn->conn_ops->MaxRecvDataSegmentLength;
450 } 451 }
451 burstlength += conn->conn_ops->MaxRecvDataSegmentLength; 452 burstlength += mdsl;
452 offset += conn->conn_ops->MaxRecvDataSegmentLength; 453 offset += mdsl;
453 } 454 }
454 455
455 if (!datasequenceinorder) { 456 if (!datasequenceinorder) {
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index f62fe123d902..4a99820d063b 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -50,21 +50,20 @@ u8 iscsit_tmr_abort_task(
50 if (!ref_cmd) { 50 if (!ref_cmd) {
51 pr_err("Unable to locate RefTaskTag: 0x%08x on CID:" 51 pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
52 " %hu.\n", hdr->rtt, conn->cid); 52 " %hu.\n", hdr->rtt, conn->cid);
53 return ((hdr->refcmdsn >= conn->sess->exp_cmd_sn) && 53 return (be32_to_cpu(hdr->refcmdsn) >= conn->sess->exp_cmd_sn &&
54 (hdr->refcmdsn <= conn->sess->max_cmd_sn)) ? 54 be32_to_cpu(hdr->refcmdsn) <= conn->sess->max_cmd_sn) ?
55 ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK; 55 ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
56 } 56 }
57 if (ref_cmd->cmd_sn != hdr->refcmdsn) { 57 if (ref_cmd->cmd_sn != be32_to_cpu(hdr->refcmdsn)) {
58 pr_err("RefCmdSN 0x%08x does not equal" 58 pr_err("RefCmdSN 0x%08x does not equal"
59 " task's CmdSN 0x%08x. Rejecting ABORT_TASK.\n", 59 " task's CmdSN 0x%08x. Rejecting ABORT_TASK.\n",
60 hdr->refcmdsn, ref_cmd->cmd_sn); 60 hdr->refcmdsn, ref_cmd->cmd_sn);
61 return ISCSI_TMF_RSP_REJECTED; 61 return ISCSI_TMF_RSP_REJECTED;
62 } 62 }
63 63
64 se_tmr->ref_task_tag = hdr->rtt; 64 se_tmr->ref_task_tag = (__force u32)hdr->rtt;
65 tmr_req->ref_cmd = ref_cmd; 65 tmr_req->ref_cmd = ref_cmd;
66 tmr_req->ref_cmd_sn = hdr->refcmdsn; 66 tmr_req->exp_data_sn = be32_to_cpu(hdr->exp_datasn);
67 tmr_req->exp_data_sn = hdr->exp_datasn;
68 67
69 return ISCSI_TMF_RSP_COMPLETE; 68 return ISCSI_TMF_RSP_COMPLETE;
70} 69}
@@ -146,7 +145,7 @@ u8 iscsit_tmr_task_reassign(
146 } 145 }
147 /* 146 /*
148 * Temporary check to prevent connection recovery for 147 * Temporary check to prevent connection recovery for
149 * connections with a differing MaxRecvDataSegmentLength. 148 * connections with a differing Max*DataSegmentLength.
150 */ 149 */
151 if (cr->maxrecvdatasegmentlength != 150 if (cr->maxrecvdatasegmentlength !=
152 conn->conn_ops->MaxRecvDataSegmentLength) { 151 conn->conn_ops->MaxRecvDataSegmentLength) {
@@ -155,6 +154,13 @@ u8 iscsit_tmr_task_reassign(
155 " TMR TASK_REASSIGN.\n"); 154 " TMR TASK_REASSIGN.\n");
156 return ISCSI_TMF_RSP_REJECTED; 155 return ISCSI_TMF_RSP_REJECTED;
157 } 156 }
157 if (cr->maxxmitdatasegmentlength !=
158 conn->conn_ops->MaxXmitDataSegmentLength) {
159 pr_err("Unable to perform connection recovery for"
160 " differing MaxXmitDataSegmentLength, rejecting"
161 " TMR TASK_REASSIGN.\n");
162 return ISCSI_TMF_RSP_REJECTED;
163 }
158 164
159 ref_lun = scsilun_to_int(&hdr->lun); 165 ref_lun = scsilun_to_int(&hdr->lun);
160 if (ref_lun != ref_cmd->se_cmd.orig_fe_lun) { 166 if (ref_lun != ref_cmd->se_cmd.orig_fe_lun) {
@@ -164,10 +170,9 @@ u8 iscsit_tmr_task_reassign(
164 return ISCSI_TMF_RSP_REJECTED; 170 return ISCSI_TMF_RSP_REJECTED;
165 } 171 }
166 172
167 se_tmr->ref_task_tag = hdr->rtt; 173 se_tmr->ref_task_tag = (__force u32)hdr->rtt;
168 tmr_req->ref_cmd = ref_cmd; 174 tmr_req->ref_cmd = ref_cmd;
169 tmr_req->ref_cmd_sn = hdr->refcmdsn; 175 tmr_req->exp_data_sn = be32_to_cpu(hdr->exp_datasn);
170 tmr_req->exp_data_sn = hdr->exp_datasn;
171 tmr_req->conn_recovery = cr; 176 tmr_req->conn_recovery = cr;
172 tmr_req->task_reassign = 1; 177 tmr_req->task_reassign = 1;
173 /* 178 /*
@@ -455,7 +460,7 @@ static int iscsit_task_reassign_complete(
455 * Right now the only one that its really needed for is 460 * Right now the only one that its really needed for is
456 * connection recovery releated TASK_REASSIGN. 461 * connection recovery releated TASK_REASSIGN.
457 */ 462 */
458extern int iscsit_tmr_post_handler(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 463int iscsit_tmr_post_handler(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
459{ 464{
460 struct iscsi_tmr_req *tmr_req = cmd->tmr_req; 465 struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
461 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; 466 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
@@ -470,7 +475,7 @@ extern int iscsit_tmr_post_handler(struct iscsi_cmd *cmd, struct iscsi_conn *con
470/* 475/*
471 * Nothing to do here, but leave it for good measure. :-) 476 * Nothing to do here, but leave it for good measure. :-)
472 */ 477 */
473int iscsit_task_reassign_prepare_read( 478static int iscsit_task_reassign_prepare_read(
474 struct iscsi_tmr_req *tmr_req, 479 struct iscsi_tmr_req *tmr_req,
475 struct iscsi_conn *conn) 480 struct iscsi_conn *conn)
476{ 481{
@@ -545,7 +550,7 @@ static void iscsit_task_reassign_prepare_unsolicited_dataout(
545 } 550 }
546} 551}
547 552
548int iscsit_task_reassign_prepare_write( 553static int iscsit_task_reassign_prepare_write(
549 struct iscsi_tmr_req *tmr_req, 554 struct iscsi_tmr_req *tmr_req,
550 struct iscsi_conn *conn) 555 struct iscsi_conn *conn)
551{ 556{
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index a38a3f8ab0d9..de9ea32b6104 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -677,6 +677,12 @@ int iscsit_ta_generate_node_acls(
677 pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n", 677 pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n",
678 tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled"); 678 tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled");
679 679
680 if (flag == 1 && a->cache_dynamic_acls == 0) {
681 pr_debug("Explicitly setting cache_dynamic_acls=1 when "
682 "generate_node_acls=1\n");
683 a->cache_dynamic_acls = 1;
684 }
685
680 return 0; 686 return 0;
681} 687}
682 688
@@ -716,6 +722,12 @@ int iscsit_ta_cache_dynamic_acls(
716 return -EINVAL; 722 return -EINVAL;
717 } 723 }
718 724
725 if (a->generate_node_acls == 1 && flag == 0) {
726 pr_debug("Skipping cache_dynamic_acls=0 when"
727 " generate_node_acls=1\n");
728 return 0;
729 }
730
719 a->cache_dynamic_acls = flag; 731 a->cache_dynamic_acls = flag;
720 pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group" 732 pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group"
721 " ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ? 733 " ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ?
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
index 977e1cf90e83..9d881a000e42 100644
--- a/drivers/target/iscsi/iscsi_target_tq.c
+++ b/drivers/target/iscsi/iscsi_target_tq.c
@@ -40,7 +40,7 @@ static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts)
40 spin_unlock(&active_ts_lock); 40 spin_unlock(&active_ts_lock);
41} 41}
42 42
43extern void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts) 43static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
44{ 44{
45 spin_lock(&inactive_ts_lock); 45 spin_lock(&inactive_ts_lock);
46 list_add_tail(&ts->ts_list, &inactive_ts_list); 46 list_add_tail(&ts->ts_list, &inactive_ts_list);
@@ -76,7 +76,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
76 return ts; 76 return ts;
77} 77}
78 78
79extern int iscsi_allocate_thread_sets(u32 thread_pair_count) 79int iscsi_allocate_thread_sets(u32 thread_pair_count)
80{ 80{
81 int allocated_thread_pair_count = 0, i, thread_id; 81 int allocated_thread_pair_count = 0, i, thread_id;
82 struct iscsi_thread_set *ts = NULL; 82 struct iscsi_thread_set *ts = NULL;
@@ -140,7 +140,7 @@ extern int iscsi_allocate_thread_sets(u32 thread_pair_count)
140 return allocated_thread_pair_count; 140 return allocated_thread_pair_count;
141} 141}
142 142
143extern void iscsi_deallocate_thread_sets(void) 143void iscsi_deallocate_thread_sets(void)
144{ 144{
145 u32 released_count = 0; 145 u32 released_count = 0;
146 struct iscsi_thread_set *ts = NULL; 146 struct iscsi_thread_set *ts = NULL;
diff --git a/drivers/target/iscsi/iscsi_target_tq.h b/drivers/target/iscsi/iscsi_target_tq.h
index 26e6a95ec203..547d11831282 100644
--- a/drivers/target/iscsi/iscsi_target_tq.h
+++ b/drivers/target/iscsi/iscsi_target_tq.h
@@ -5,7 +5,6 @@
5 * Defines for thread sets. 5 * Defines for thread sets.
6 */ 6 */
7extern int iscsi_thread_set_force_reinstatement(struct iscsi_conn *); 7extern int iscsi_thread_set_force_reinstatement(struct iscsi_conn *);
8extern void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *);
9extern int iscsi_allocate_thread_sets(u32); 8extern int iscsi_allocate_thread_sets(u32);
10extern void iscsi_deallocate_thread_sets(void); 9extern void iscsi_deallocate_thread_sets(void);
11extern void iscsi_activate_thread_set(struct iscsi_conn *, struct iscsi_thread_set *); 10extern void iscsi_activate_thread_set(struct iscsi_conn *, struct iscsi_thread_set *);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index b42cdeb153df..1a91195ab619 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -274,14 +274,14 @@ static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cm
274int iscsit_sequence_cmd( 274int iscsit_sequence_cmd(
275 struct iscsi_conn *conn, 275 struct iscsi_conn *conn,
276 struct iscsi_cmd *cmd, 276 struct iscsi_cmd *cmd,
277 u32 cmdsn) 277 __be32 cmdsn)
278{ 278{
279 int ret; 279 int ret;
280 int cmdsn_ret; 280 int cmdsn_ret;
281 281
282 mutex_lock(&conn->sess->cmdsn_mutex); 282 mutex_lock(&conn->sess->cmdsn_mutex);
283 283
284 cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, cmdsn); 284 cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, be32_to_cpu(cmdsn));
285 switch (cmdsn_ret) { 285 switch (cmdsn_ret) {
286 case CMDSN_NORMAL_OPERATION: 286 case CMDSN_NORMAL_OPERATION:
287 ret = iscsit_execute_cmd(cmd, 0); 287 ret = iscsit_execute_cmd(cmd, 0);
@@ -289,7 +289,7 @@ int iscsit_sequence_cmd(
289 iscsit_execute_ooo_cmdsns(conn->sess); 289 iscsit_execute_ooo_cmdsns(conn->sess);
290 break; 290 break;
291 case CMDSN_HIGHER_THAN_EXP: 291 case CMDSN_HIGHER_THAN_EXP:
292 ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, cmdsn); 292 ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, be32_to_cpu(cmdsn));
293 break; 293 break;
294 case CMDSN_LOWER_THAN_EXP: 294 case CMDSN_LOWER_THAN_EXP:
295 cmd->i_state = ISTATE_REMOVE; 295 cmd->i_state = ISTATE_REMOVE;
@@ -351,7 +351,7 @@ int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
351 351
352struct iscsi_cmd *iscsit_find_cmd_from_itt( 352struct iscsi_cmd *iscsit_find_cmd_from_itt(
353 struct iscsi_conn *conn, 353 struct iscsi_conn *conn,
354 u32 init_task_tag) 354 itt_t init_task_tag)
355{ 355{
356 struct iscsi_cmd *cmd; 356 struct iscsi_cmd *cmd;
357 357
@@ -371,7 +371,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt(
371 371
372struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump( 372struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
373 struct iscsi_conn *conn, 373 struct iscsi_conn *conn,
374 u32 init_task_tag, 374 itt_t init_task_tag,
375 u32 length) 375 u32 length)
376{ 376{
377 struct iscsi_cmd *cmd; 377 struct iscsi_cmd *cmd;
@@ -417,7 +417,7 @@ int iscsit_find_cmd_for_recovery(
417 struct iscsi_session *sess, 417 struct iscsi_session *sess,
418 struct iscsi_cmd **cmd_ptr, 418 struct iscsi_cmd **cmd_ptr,
419 struct iscsi_conn_recovery **cr_ptr, 419 struct iscsi_conn_recovery **cr_ptr,
420 u32 init_task_tag) 420 itt_t init_task_tag)
421{ 421{
422 struct iscsi_cmd *cmd = NULL; 422 struct iscsi_cmd *cmd = NULL;
423 struct iscsi_conn_recovery *cr; 423 struct iscsi_conn_recovery *cr;
@@ -488,7 +488,7 @@ void iscsit_add_cmd_to_immediate_queue(
488 atomic_set(&conn->check_immediate_queue, 1); 488 atomic_set(&conn->check_immediate_queue, 1);
489 spin_unlock_bh(&conn->immed_queue_lock); 489 spin_unlock_bh(&conn->immed_queue_lock);
490 490
491 wake_up_process(conn->thread_set->tx_thread); 491 wake_up(&conn->queues_wq);
492} 492}
493 493
494struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn) 494struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
@@ -562,7 +562,7 @@ void iscsit_add_cmd_to_response_queue(
562 atomic_inc(&cmd->response_queue_count); 562 atomic_inc(&cmd->response_queue_count);
563 spin_unlock_bh(&conn->response_queue_lock); 563 spin_unlock_bh(&conn->response_queue_lock);
564 564
565 wake_up_process(conn->thread_set->tx_thread); 565 wake_up(&conn->queues_wq);
566} 566}
567 567
568struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn) 568struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
@@ -616,6 +616,24 @@ static void iscsit_remove_cmd_from_response_queue(
616 } 616 }
617} 617}
618 618
619bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn)
620{
621 bool empty;
622
623 spin_lock_bh(&conn->immed_queue_lock);
624 empty = list_empty(&conn->immed_queue_list);
625 spin_unlock_bh(&conn->immed_queue_lock);
626
627 if (!empty)
628 return empty;
629
630 spin_lock_bh(&conn->response_queue_lock);
631 empty = list_empty(&conn->response_queue_list);
632 spin_unlock_bh(&conn->response_queue_lock);
633
634 return empty;
635}
636
619void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn) 637void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
620{ 638{
621 struct iscsi_queue_req *qr, *qr_tmp; 639 struct iscsi_queue_req *qr, *qr_tmp;
@@ -855,7 +873,7 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
855 cmd->iscsi_opcode = ISCSI_OP_NOOP_IN; 873 cmd->iscsi_opcode = ISCSI_OP_NOOP_IN;
856 state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE : 874 state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
857 ISTATE_SEND_NOPIN_NO_RESPONSE; 875 ISTATE_SEND_NOPIN_NO_RESPONSE;
858 cmd->init_task_tag = 0xFFFFFFFF; 876 cmd->init_task_tag = RESERVED_ITT;
859 spin_lock_bh(&conn->sess->ttt_lock); 877 spin_lock_bh(&conn->sess->ttt_lock);
860 cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ : 878 cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ :
861 0xFFFFFFFF; 879 0xFFFFFFFF;
@@ -1222,7 +1240,7 @@ int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_deta
1222 hdr->opcode = ISCSI_OP_LOGIN_RSP; 1240 hdr->opcode = ISCSI_OP_LOGIN_RSP;
1223 hdr->status_class = status_class; 1241 hdr->status_class = status_class;
1224 hdr->status_detail = status_detail; 1242 hdr->status_detail = status_detail;
1225 hdr->itt = cpu_to_be32(conn->login_itt); 1243 hdr->itt = conn->login_itt;
1226 1244
1227 iov.iov_base = &iscsi_hdr; 1245 iov.iov_base = &iscsi_hdr;
1228 iov.iov_len = ISCSI_HDR_LEN; 1246 iov.iov_len = ISCSI_HDR_LEN;
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index e1c729b8a1c5..894d0f837924 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -12,19 +12,20 @@ extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
12extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32); 12extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
13extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *); 13extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
14extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32); 14extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
15int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, u32 cmdsn); 15int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, __be32 cmdsn);
16extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *); 16extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
17extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, u32); 17extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
18extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *, 18extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
19 u32, u32); 19 itt_t, u32);
20extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32); 20extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32);
21extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd **, 21extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd **,
22 struct iscsi_conn_recovery **, u32); 22 struct iscsi_conn_recovery **, itt_t);
23extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); 23extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
24extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *); 24extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
25extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); 25extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
26extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *); 26extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
27extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *); 27extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
28extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
28extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); 29extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
29extern void iscsit_release_cmd(struct iscsi_cmd *); 30extern void iscsit_release_cmd(struct iscsi_cmd *);
30extern void iscsit_free_cmd(struct iscsi_cmd *); 31extern void iscsit_free_cmd(struct iscsi_cmd *);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 5491c632a15e..2d444b1ccd33 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -166,7 +166,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
166 struct tcm_loop_tpg *tl_tpg; 166 struct tcm_loop_tpg *tl_tpg;
167 struct scatterlist *sgl_bidi = NULL; 167 struct scatterlist *sgl_bidi = NULL;
168 u32 sgl_bidi_count = 0; 168 u32 sgl_bidi_count = 0;
169 int ret; 169 int rc;
170 170
171 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 171 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
172 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 172 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
@@ -187,12 +187,6 @@ static void tcm_loop_submission_work(struct work_struct *work)
187 set_host_byte(sc, DID_ERROR); 187 set_host_byte(sc, DID_ERROR);
188 goto out_done; 188 goto out_done;
189 } 189 }
190
191 transport_init_se_cmd(se_cmd, tl_tpg->tl_se_tpg.se_tpg_tfo,
192 tl_nexus->se_sess,
193 scsi_bufflen(sc), sc->sc_data_direction,
194 tcm_loop_sam_attr(sc), &tl_cmd->tl_sense_buf[0]);
195
196 if (scsi_bidi_cmnd(sc)) { 190 if (scsi_bidi_cmnd(sc)) {
197 struct scsi_data_buffer *sdb = scsi_in(sc); 191 struct scsi_data_buffer *sdb = scsi_in(sc);
198 192
@@ -201,56 +195,16 @@ static void tcm_loop_submission_work(struct work_struct *work)
201 se_cmd->se_cmd_flags |= SCF_BIDI; 195 se_cmd->se_cmd_flags |= SCF_BIDI;
202 196
203 } 197 }
204 198 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
205 if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) { 199 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
206 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 200 scsi_bufflen(sc), tcm_loop_sam_attr(sc),
201 sc->sc_data_direction, 0,
202 scsi_sglist(sc), scsi_sg_count(sc),
203 sgl_bidi, sgl_bidi_count);
204 if (rc < 0) {
207 set_host_byte(sc, DID_NO_CONNECT); 205 set_host_byte(sc, DID_NO_CONNECT);
208 goto out_done; 206 goto out_done;
209 } 207 }
210
211 /*
212 * Because some userspace code via scsi-generic do not memset their
213 * associated read buffers, go ahead and do that here for type
214 * non-data CDBs. Also note that this is currently guaranteed to be a
215 * single SGL for this case by target core in
216 * target_setup_cmd_from_cdb() -> transport_generic_cmd_sequencer().
217 */
218 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
219 se_cmd->data_direction == DMA_FROM_DEVICE) {
220 struct scatterlist *sg = scsi_sglist(sc);
221 unsigned char *buf = kmap(sg_page(sg)) + sg->offset;
222
223 if (buf != NULL) {
224 memset(buf, 0, sg->length);
225 kunmap(sg_page(sg));
226 }
227 }
228
229 ret = target_setup_cmd_from_cdb(se_cmd, sc->cmnd);
230 if (ret == -ENOMEM) {
231 transport_send_check_condition_and_sense(se_cmd,
232 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
233 transport_generic_free_cmd(se_cmd, 0);
234 return;
235 } else if (ret < 0) {
236 if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
237 tcm_loop_queue_status(se_cmd);
238 else
239 transport_send_check_condition_and_sense(se_cmd,
240 se_cmd->scsi_sense_reason, 0);
241 transport_generic_free_cmd(se_cmd, 0);
242 return;
243 }
244
245 ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
246 scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
247 if (ret) {
248 transport_send_check_condition_and_sense(se_cmd,
249 se_cmd->scsi_sense_reason, 0);
250 transport_generic_free_cmd(se_cmd, 0);
251 return;
252 }
253 transport_handle_cdb_direct(se_cmd);
254 return; 208 return;
255 209
256out_done: 210out_done:
@@ -846,16 +800,6 @@ static int tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
846 return 0; 800 return 0;
847} 801}
848 802
849static u16 tcm_loop_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
850{
851 return 0;
852}
853
854static u16 tcm_loop_get_fabric_sense_len(void)
855{
856 return 0;
857}
858
859static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) 803static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
860{ 804{
861 switch (tl_hba->tl_proto_id) { 805 switch (tl_hba->tl_proto_id) {
@@ -1373,8 +1317,6 @@ static int tcm_loop_register_configfs(void)
1373 fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in; 1317 fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
1374 fabric->tf_ops.queue_status = &tcm_loop_queue_status; 1318 fabric->tf_ops.queue_status = &tcm_loop_queue_status;
1375 fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; 1319 fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
1376 fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len;
1377 fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len;
1378 1320
1379 /* 1321 /*
1380 * Setup function pointers for generic logic in target_core_fabric_configfs.c 1322 * Setup function pointers for generic logic in target_core_fabric_configfs.c
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 39ddba584b30..0d6d7c1f025e 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -660,8 +660,7 @@ static void session_reconnect_expired(struct sbp_session *sess)
660 spin_lock_bh(&sess->lock); 660 spin_lock_bh(&sess->lock);
661 list_for_each_entry_safe(login, temp, &sess->login_list, link) { 661 list_for_each_entry_safe(login, temp, &sess->login_list, link) {
662 login->sess = NULL; 662 login->sess = NULL;
663 list_del(&login->link); 663 list_move_tail(&login->link, &login_list);
664 list_add_tail(&login->link, &login_list);
665 } 664 }
666 spin_unlock_bh(&sess->lock); 665 spin_unlock_bh(&sess->lock);
667 666
@@ -1847,16 +1846,6 @@ static int sbp_queue_tm_rsp(struct se_cmd *se_cmd)
1847 return 0; 1846 return 0;
1848} 1847}
1849 1848
1850static u16 sbp_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
1851{
1852 return 0;
1853}
1854
1855static u16 sbp_get_fabric_sense_len(void)
1856{
1857 return 0;
1858}
1859
1860static int sbp_check_stop_free(struct se_cmd *se_cmd) 1849static int sbp_check_stop_free(struct se_cmd *se_cmd)
1861{ 1850{
1862 struct sbp_target_request *req = container_of(se_cmd, 1851 struct sbp_target_request *req = container_of(se_cmd,
@@ -2068,7 +2057,7 @@ static int sbp_update_unit_directory(struct sbp_tport *tport)
2068 return ret; 2057 return ret;
2069} 2058}
2070 2059
2071static ssize_t sbp_parse_wwn(const char *name, u64 *wwn, int strict) 2060static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
2072{ 2061{
2073 const char *cp; 2062 const char *cp;
2074 char c, nibble; 2063 char c, nibble;
@@ -2088,7 +2077,7 @@ static ssize_t sbp_parse_wwn(const char *name, u64 *wwn, int strict)
2088 err = 3; 2077 err = 3;
2089 if (isdigit(c)) 2078 if (isdigit(c))
2090 nibble = c - '0'; 2079 nibble = c - '0';
2091 else if (isxdigit(c) && (islower(c) || !strict)) 2080 else if (isxdigit(c))
2092 nibble = tolower(c) - 'a' + 10; 2081 nibble = tolower(c) - 'a' + 10;
2093 else 2082 else
2094 goto fail; 2083 goto fail;
@@ -2117,7 +2106,7 @@ static struct se_node_acl *sbp_make_nodeacl(
2117 u64 guid = 0; 2106 u64 guid = 0;
2118 u32 nexus_depth = 1; 2107 u32 nexus_depth = 1;
2119 2108
2120 if (sbp_parse_wwn(name, &guid, 1) < 0) 2109 if (sbp_parse_wwn(name, &guid) < 0)
2121 return ERR_PTR(-EINVAL); 2110 return ERR_PTR(-EINVAL);
2122 2111
2123 se_nacl_new = sbp_alloc_fabric_acl(se_tpg); 2112 se_nacl_new = sbp_alloc_fabric_acl(se_tpg);
@@ -2253,7 +2242,7 @@ static struct se_wwn *sbp_make_tport(
2253 struct sbp_tport *tport; 2242 struct sbp_tport *tport;
2254 u64 guid = 0; 2243 u64 guid = 0;
2255 2244
2256 if (sbp_parse_wwn(name, &guid, 1) < 0) 2245 if (sbp_parse_wwn(name, &guid) < 0)
2257 return ERR_PTR(-EINVAL); 2246 return ERR_PTR(-EINVAL);
2258 2247
2259 tport = kzalloc(sizeof(*tport), GFP_KERNEL); 2248 tport = kzalloc(sizeof(*tport), GFP_KERNEL);
@@ -2534,8 +2523,6 @@ static struct target_core_fabric_ops sbp_ops = {
2534 .queue_data_in = sbp_queue_data_in, 2523 .queue_data_in = sbp_queue_data_in,
2535 .queue_status = sbp_queue_status, 2524 .queue_status = sbp_queue_status,
2536 .queue_tm_rsp = sbp_queue_tm_rsp, 2525 .queue_tm_rsp = sbp_queue_tm_rsp,
2537 .get_fabric_sense_len = sbp_get_fabric_sense_len,
2538 .set_fabric_sense_len = sbp_set_fabric_sense_len,
2539 .check_stop_free = sbp_check_stop_free, 2526 .check_stop_free = sbp_check_stop_free,
2540 2527
2541 .fabric_make_wwn = sbp_make_tport, 2528 .fabric_make_wwn = sbp_make_tport,
@@ -2556,9 +2543,9 @@ static int sbp_register_configfs(void)
2556 int ret; 2543 int ret;
2557 2544
2558 fabric = target_fabric_configfs_init(THIS_MODULE, "sbp"); 2545 fabric = target_fabric_configfs_init(THIS_MODULE, "sbp");
2559 if (!fabric) { 2546 if (IS_ERR(fabric)) {
2560 pr_err("target_fabric_configfs_init() failed\n"); 2547 pr_err("target_fabric_configfs_init() failed\n");
2561 return -ENOMEM; 2548 return PTR_ERR(fabric);
2562 } 2549 }
2563 2550
2564 fabric->tf_ops = sbp_ops; 2551 fabric->tf_ops = sbp_ops;
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 41641ba54828..9a5f9a7aecd2 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -344,7 +344,7 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
344 */ 344 */
345 rtpi = get_unaligned_be16(ptr + 2); 345 rtpi = get_unaligned_be16(ptr + 2);
346 /* 346 /*
347 * Locate the matching relative target port identifer 347 * Locate the matching relative target port identifier
348 * for the struct se_device storage object. 348 * for the struct se_device storage object.
349 */ 349 */
350 spin_lock(&dev->se_port_lock); 350 spin_lock(&dev->se_port_lock);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 801efa892046..c123327499a3 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -457,14 +457,6 @@ static int target_fabric_tf_ops_check(
457 pr_err("Missing tfo->queue_tm_rsp()\n"); 457 pr_err("Missing tfo->queue_tm_rsp()\n");
458 return -EINVAL; 458 return -EINVAL;
459 } 459 }
460 if (!tfo->set_fabric_sense_len) {
461 pr_err("Missing tfo->set_fabric_sense_len()\n");
462 return -EINVAL;
463 }
464 if (!tfo->get_fabric_sense_len) {
465 pr_err("Missing tfo->get_fabric_sense_len()\n");
466 return -EINVAL;
467 }
468 /* 460 /*
469 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() 461 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
470 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in 462 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
@@ -1208,7 +1200,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
1208 " Target Node Endpoint: %s\n", tfo->get_fabric_name(), 1200 " Target Node Endpoint: %s\n", tfo->get_fabric_name(),
1209 tfo->tpg_get_wwn(se_tpg)); 1201 tfo->tpg_get_wwn(se_tpg));
1210 len += sprintf(page+len, "SPC-3 Reservation: Relative Port" 1202 len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
1211 " Identifer Tag: %hu %s Portal Group Tag: %hu" 1203 " Identifier Tag: %hu %s Portal Group Tag: %hu"
1212 " %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi, 1204 " %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi,
1213 tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg), 1205 tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
1214 tfo->get_fabric_name(), lun->unpacked_lun); 1206 tfo->get_fabric_name(), lun->unpacked_lun);
@@ -3132,6 +3124,7 @@ static int __init target_core_init_configfs(void)
3132 GFP_KERNEL); 3124 GFP_KERNEL);
3133 if (!target_cg->default_groups) { 3125 if (!target_cg->default_groups) {
3134 pr_err("Unable to allocate target_cg->default_groups\n"); 3126 pr_err("Unable to allocate target_cg->default_groups\n");
3127 ret = -ENOMEM;
3135 goto out_global; 3128 goto out_global;
3136 } 3129 }
3137 3130
@@ -3147,6 +3140,7 @@ static int __init target_core_init_configfs(void)
3147 GFP_KERNEL); 3140 GFP_KERNEL);
3148 if (!hba_cg->default_groups) { 3141 if (!hba_cg->default_groups) {
3149 pr_err("Unable to allocate hba_cg->default_groups\n"); 3142 pr_err("Unable to allocate hba_cg->default_groups\n");
3143 ret = -ENOMEM;
3150 goto out_global; 3144 goto out_global;
3151 } 3145 }
3152 config_group_init_type_name(&alua_group, 3146 config_group_init_type_name(&alua_group,
@@ -3162,6 +3156,7 @@ static int __init target_core_init_configfs(void)
3162 GFP_KERNEL); 3156 GFP_KERNEL);
3163 if (!alua_cg->default_groups) { 3157 if (!alua_cg->default_groups) {
3164 pr_err("Unable to allocate alua_cg->default_groups\n"); 3158 pr_err("Unable to allocate alua_cg->default_groups\n");
3159 ret = -ENOMEM;
3165 goto out_global; 3160 goto out_global;
3166 } 3161 }
3167 3162
@@ -3173,14 +3168,17 @@ static int __init target_core_init_configfs(void)
3173 * Add core/alua/lu_gps/default_lu_gp 3168 * Add core/alua/lu_gps/default_lu_gp
3174 */ 3169 */
3175 lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1); 3170 lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
3176 if (IS_ERR(lu_gp)) 3171 if (IS_ERR(lu_gp)) {
3172 ret = -ENOMEM;
3177 goto out_global; 3173 goto out_global;
3174 }
3178 3175
3179 lu_gp_cg = &alua_lu_gps_group; 3176 lu_gp_cg = &alua_lu_gps_group;
3180 lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 3177 lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
3181 GFP_KERNEL); 3178 GFP_KERNEL);
3182 if (!lu_gp_cg->default_groups) { 3179 if (!lu_gp_cg->default_groups) {
3183 pr_err("Unable to allocate lu_gp_cg->default_groups\n"); 3180 pr_err("Unable to allocate lu_gp_cg->default_groups\n");
3181 ret = -ENOMEM;
3184 goto out_global; 3182 goto out_global;
3185 } 3183 }
3186 3184
@@ -3208,7 +3206,8 @@ static int __init target_core_init_configfs(void)
3208 if (ret < 0) 3206 if (ret < 0)
3209 goto out; 3207 goto out;
3210 3208
3211 if (core_dev_setup_virtual_lun0() < 0) 3209 ret = core_dev_setup_virtual_lun0();
3210 if (ret < 0)
3212 goto out; 3211 goto out;
3213 3212
3214 return 0; 3213 return 0;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 9fc9a6006ca0..9abef9f8eb76 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -531,7 +531,7 @@ static struct se_port *core_alloc_port(struct se_device *dev)
531 } 531 }
532again: 532again:
533 /* 533 /*
534 * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device 534 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
535 * Here is the table from spc4r17 section 7.7.3.8. 535 * Here is the table from spc4r17 section 7.7.3.8.
536 * 536 *
537 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field 537 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
@@ -548,7 +548,7 @@ again:
548 548
549 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { 549 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
550 /* 550 /*
551 * Make sure RELATIVE TARGET PORT IDENTIFER is unique 551 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
552 * for 16-bit wrap.. 552 * for 16-bit wrap..
553 */ 553 */
554 if (port->sep_rtpi == port_tmp->sep_rtpi) 554 if (port->sep_rtpi == port_tmp->sep_rtpi)
@@ -595,7 +595,7 @@ static void core_export_port(
595 } 595 }
596 596
597 dev->dev_port_count++; 597 dev->dev_port_count++;
598 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */ 598 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
599} 599}
600 600
601/* 601/*
@@ -850,20 +850,20 @@ int se_dev_check_shutdown(struct se_device *dev)
850 850
851static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 851static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
852{ 852{
853 u32 tmp, aligned_max_sectors; 853 u32 aligned_max_sectors;
854 u32 alignment;
854 /* 855 /*
855 * Limit max_sectors to a PAGE_SIZE aligned value for modern 856 * Limit max_sectors to a PAGE_SIZE aligned value for modern
856 * transport_allocate_data_tasks() operation. 857 * transport_allocate_data_tasks() operation.
857 */ 858 */
858 tmp = rounddown((max_sectors * block_size), PAGE_SIZE); 859 alignment = max(1ul, PAGE_SIZE / block_size);
859 aligned_max_sectors = (tmp / block_size); 860 aligned_max_sectors = rounddown(max_sectors, alignment);
860 if (max_sectors != aligned_max_sectors) { 861
861 printk(KERN_INFO "Rounding down aligned max_sectors from %u" 862 if (max_sectors != aligned_max_sectors)
862 " to %u\n", max_sectors, aligned_max_sectors); 863 pr_info("Rounding down aligned max_sectors from %u to %u\n",
863 return aligned_max_sectors; 864 max_sectors, aligned_max_sectors);
864 }
865 865
866 return max_sectors; 866 return aligned_max_sectors;
867} 867}
868 868
869void se_dev_set_default_attribs( 869void se_dev_set_default_attribs(
@@ -988,8 +988,9 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
988 return -EINVAL; 988 return -EINVAL;
989 } 989 }
990 990
991 if (flag && dev->transport->fua_write_emulated == 0) { 991 if (flag &&
992 pr_err("fua_write_emulated not supported\n"); 992 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
993 pr_err("emulate_fua_write not supported for pSCSI\n");
993 return -EINVAL; 994 return -EINVAL;
994 } 995 }
995 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag; 996 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
@@ -1019,8 +1020,9 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
1019 pr_err("Illegal value %d\n", flag); 1020 pr_err("Illegal value %d\n", flag);
1020 return -EINVAL; 1021 return -EINVAL;
1021 } 1022 }
1022 if (flag && dev->transport->write_cache_emulated == 0) { 1023 if (flag &&
1023 pr_err("write_cache_emulated not supported\n"); 1024 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1025 pr_err("emulate_write_cache not supported for pSCSI\n");
1024 return -EINVAL; 1026 return -EINVAL;
1025 } 1027 }
1026 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag; 1028 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index ea479e54f5fd..bca737bb813d 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -22,7 +22,6 @@
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/moduleparam.h> 24#include <linux/moduleparam.h>
25#include <generated/utsrelease.h>
26#include <linux/utsname.h> 25#include <linux/utsname.h>
27#include <linux/init.h> 26#include <linux/init.h>
28#include <linux/fs.h> 27#include <linux/fs.h>
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 283a36e464e6..e460d6233a0a 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -338,7 +338,7 @@ u32 iscsi_get_pr_transport_id_len(
338 * 00b: iSCSI Initiator device TransportID format 338 * 00b: iSCSI Initiator device TransportID format
339 */ 339 */
340 if (pr_reg->isid_present_at_reg) { 340 if (pr_reg->isid_present_at_reg) {
341 len += 5; /* For ",i,0x" ASCII seperator */ 341 len += 5; /* For ",i,0x" ASCII separator */
342 len += 7; /* For iSCSI Initiator Session ID + Null terminator */ 342 len += 7; /* For iSCSI Initiator Session ID + Null terminator */
343 *format_code = 1; 343 *format_code = 1;
344 } else 344 } else
@@ -415,20 +415,20 @@ char *iscsi_parse_pr_out_transport_id(
415 *out_tid_len = (add_len + 4); 415 *out_tid_len = (add_len + 4);
416 } 416 }
417 /* 417 /*
418 * Check for ',i,0x' seperator between iSCSI Name and iSCSI Initiator 418 * Check for ',i,0x' separator between iSCSI Name and iSCSI Initiator
419 * Session ID as defined in Table 390 - iSCSI initiator port TransportID 419 * Session ID as defined in Table 390 - iSCSI initiator port TransportID
420 * format. 420 * format.
421 */ 421 */
422 if (format_code == 0x40) { 422 if (format_code == 0x40) {
423 p = strstr(&buf[4], ",i,0x"); 423 p = strstr(&buf[4], ",i,0x");
424 if (!p) { 424 if (!p) {
425 pr_err("Unable to locate \",i,0x\" seperator" 425 pr_err("Unable to locate \",i,0x\" separator"
426 " for Initiator port identifier: %s\n", 426 " for Initiator port identifier: %s\n",
427 &buf[4]); 427 &buf[4]);
428 return NULL; 428 return NULL;
429 } 429 }
430 *p = '\0'; /* Terminate iSCSI Name */ 430 *p = '\0'; /* Terminate iSCSI Name */
431 p += 5; /* Skip over ",i,0x" seperator */ 431 p += 5; /* Skip over ",i,0x" separator */
432 432
433 *port_nexus_ptr = p; 433 *port_nexus_ptr = p;
434 /* 434 /*
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index cbb5aaf3e567..0360383dfb94 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -125,6 +125,19 @@ static struct se_device *fd_create_virtdevice(
125 * of pure timestamp updates. 125 * of pure timestamp updates.
126 */ 126 */
127 flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; 127 flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
128 /*
129 * Optionally allow fd_buffered_io=1 to be enabled for people
130 * who want use the fs buffer cache as an WriteCache mechanism.
131 *
132 * This means that in event of a hard failure, there is a risk
133 * of silent data-loss if the SCSI client has *not* performed a
134 * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE
135 * to write-out the entire device cache.
136 */
137 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
138 pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
139 flags &= ~O_DSYNC;
140 }
128 141
129 file = filp_open(fd_dev->fd_dev_name, flags, 0600); 142 file = filp_open(fd_dev->fd_dev_name, flags, 0600);
130 if (IS_ERR(file)) { 143 if (IS_ERR(file)) {
@@ -188,6 +201,12 @@ static struct se_device *fd_create_virtdevice(
188 if (!dev) 201 if (!dev)
189 goto fail; 202 goto fail;
190 203
204 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
205 pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
206 " with FDBD_HAS_BUFFERED_IO_WCE\n");
207 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = 1;
208 }
209
191 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; 210 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
192 fd_dev->fd_queue_depth = dev->queue_depth; 211 fd_dev->fd_queue_depth = dev->queue_depth;
193 212
@@ -407,6 +426,7 @@ enum {
407static match_table_t tokens = { 426static match_table_t tokens = {
408 {Opt_fd_dev_name, "fd_dev_name=%s"}, 427 {Opt_fd_dev_name, "fd_dev_name=%s"},
409 {Opt_fd_dev_size, "fd_dev_size=%s"}, 428 {Opt_fd_dev_size, "fd_dev_size=%s"},
429 {Opt_fd_buffered_io, "fd_buffered_io=%d"},
410 {Opt_err, NULL} 430 {Opt_err, NULL}
411}; 431};
412 432
@@ -418,7 +438,7 @@ static ssize_t fd_set_configfs_dev_params(
418 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; 438 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
419 char *orig, *ptr, *arg_p, *opts; 439 char *orig, *ptr, *arg_p, *opts;
420 substring_t args[MAX_OPT_ARGS]; 440 substring_t args[MAX_OPT_ARGS];
421 int ret = 0, token; 441 int ret = 0, arg, token;
422 442
423 opts = kstrdup(page, GFP_KERNEL); 443 opts = kstrdup(page, GFP_KERNEL);
424 if (!opts) 444 if (!opts)
@@ -459,6 +479,19 @@ static ssize_t fd_set_configfs_dev_params(
459 " bytes\n", fd_dev->fd_dev_size); 479 " bytes\n", fd_dev->fd_dev_size);
460 fd_dev->fbd_flags |= FBDF_HAS_SIZE; 480 fd_dev->fbd_flags |= FBDF_HAS_SIZE;
461 break; 481 break;
482 case Opt_fd_buffered_io:
483 match_int(args, &arg);
484 if (arg != 1) {
485 pr_err("bogus fd_buffered_io=%d value\n", arg);
486 ret = -EINVAL;
487 goto out;
488 }
489
490 pr_debug("FILEIO: Using buffered I/O"
491 " operations for struct fd_dev\n");
492
493 fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
494 break;
462 default: 495 default:
463 break; 496 break;
464 } 497 }
@@ -490,8 +523,10 @@ static ssize_t fd_show_configfs_dev_params(
490 ssize_t bl = 0; 523 ssize_t bl = 0;
491 524
492 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); 525 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
493 bl += sprintf(b + bl, " File: %s Size: %llu Mode: O_DSYNC\n", 526 bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
494 fd_dev->fd_dev_name, fd_dev->fd_dev_size); 527 fd_dev->fd_dev_name, fd_dev->fd_dev_size,
528 (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
529 "Buffered-WCE" : "O_DSYNC");
495 return bl; 530 return bl;
496} 531}
497 532
@@ -546,8 +581,6 @@ static struct se_subsystem_api fileio_template = {
546 .name = "fileio", 581 .name = "fileio",
547 .owner = THIS_MODULE, 582 .owner = THIS_MODULE,
548 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 583 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
549 .write_cache_emulated = 1,
550 .fua_write_emulated = 1,
551 .attach_hba = fd_attach_hba, 584 .attach_hba = fd_attach_hba,
552 .detach_hba = fd_detach_hba, 585 .detach_hba = fd_detach_hba,
553 .allocate_virtdevice = fd_allocate_virtdevice, 586 .allocate_virtdevice = fd_allocate_virtdevice,
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 70ce7fd7111d..876ae53ef5b8 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -14,6 +14,7 @@
14 14
15#define FBDF_HAS_PATH 0x01 15#define FBDF_HAS_PATH 0x01
16#define FBDF_HAS_SIZE 0x02 16#define FBDF_HAS_SIZE 0x02
17#define FDBD_HAS_BUFFERED_IO_WCE 0x04
17 18
18struct fd_dev { 19struct fd_dev {
19 u32 fbd_flags; 20 u32 fbd_flags;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 9ba495477fd2..57d7674c5013 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -454,14 +454,11 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
454 ret = -EEXIST; 454 ret = -EEXIST;
455 goto out; 455 goto out;
456 } 456 }
457 arg_p = match_strdup(&args[0]); 457 if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
458 if (!arg_p) { 458 SE_UDEV_PATH_LEN) == 0) {
459 ret = -ENOMEM; 459 ret = -EINVAL;
460 break; 460 break;
461 } 461 }
462 snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
463 "%s", arg_p);
464 kfree(arg_p);
465 pr_debug("IBLOCK: Referencing UDEV path: %s\n", 462 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
466 ib_dev->ibd_udev_path); 463 ib_dev->ibd_udev_path);
467 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; 464 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
@@ -556,14 +553,6 @@ static void iblock_complete_cmd(struct se_cmd *cmd)
556 kfree(ibr); 553 kfree(ibr);
557} 554}
558 555
559static void iblock_bio_destructor(struct bio *bio)
560{
561 struct se_cmd *cmd = bio->bi_private;
562 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
563
564 bio_free(bio, ib_dev->ibd_bio_set);
565}
566
567static struct bio * 556static struct bio *
568iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) 557iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
569{ 558{
@@ -585,7 +574,6 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
585 574
586 bio->bi_bdev = ib_dev->ibd_bd; 575 bio->bi_bdev = ib_dev->ibd_bd;
587 bio->bi_private = cmd; 576 bio->bi_private = cmd;
588 bio->bi_destructor = iblock_bio_destructor;
589 bio->bi_end_io = &iblock_bio_done; 577 bio->bi_end_io = &iblock_bio_done;
590 bio->bi_sector = lba; 578 bio->bi_sector = lba;
591 return bio; 579 return bio;
@@ -657,6 +645,12 @@ static int iblock_execute_rw(struct se_cmd *cmd)
657 goto fail; 645 goto fail;
658 cmd->priv = ibr; 646 cmd->priv = ibr;
659 647
648 if (!sgl_nents) {
649 atomic_set(&ibr->pending, 1);
650 iblock_complete_cmd(cmd);
651 return 0;
652 }
653
660 bio = iblock_get_bio(cmd, block_lba, sgl_nents); 654 bio = iblock_get_bio(cmd, block_lba, sgl_nents);
661 if (!bio) 655 if (!bio)
662 goto fail_free_ibr; 656 goto fail_free_ibr;
@@ -769,8 +763,6 @@ static struct se_subsystem_api iblock_template = {
769 .name = "iblock", 763 .name = "iblock",
770 .owner = THIS_MODULE, 764 .owner = THIS_MODULE,
771 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 765 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
772 .write_cache_emulated = 1,
773 .fua_write_emulated = 1,
774 .attach_hba = iblock_attach_hba, 766 .attach_hba = iblock_attach_hba,
775 .detach_hba = iblock_detach_hba, 767 .detach_hba = iblock_detach_hba,
776 .allocate_virtdevice = iblock_allocate_virtdevice, 768 .allocate_virtdevice = iblock_allocate_virtdevice,
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 956c84c6b666..8c323a98c4a0 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -197,10 +197,10 @@ int target_scsi2_reservation_release(struct se_cmd *cmd)
197{ 197{
198 struct se_device *dev = cmd->se_dev; 198 struct se_device *dev = cmd->se_dev;
199 struct se_session *sess = cmd->se_sess; 199 struct se_session *sess = cmd->se_sess;
200 struct se_portal_group *tpg = sess->se_tpg; 200 struct se_portal_group *tpg;
201 int ret = 0, rc; 201 int ret = 0, rc;
202 202
203 if (!sess || !tpg) 203 if (!sess || !sess->se_tpg)
204 goto out; 204 goto out;
205 rc = target_check_scsi2_reservation_conflict(cmd); 205 rc = target_check_scsi2_reservation_conflict(cmd);
206 if (rc == 1) 206 if (rc == 1)
@@ -228,6 +228,7 @@ int target_scsi2_reservation_release(struct se_cmd *cmd)
228 dev->dev_res_bin_isid = 0; 228 dev->dev_res_bin_isid = 0;
229 dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID; 229 dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
230 } 230 }
231 tpg = sess->se_tpg;
231 pr_debug("SCSI-2 Released reservation for %s LUN: %u ->" 232 pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
232 " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 233 " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
233 cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, 234 cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
@@ -245,7 +246,7 @@ int target_scsi2_reservation_reserve(struct se_cmd *cmd)
245{ 246{
246 struct se_device *dev = cmd->se_dev; 247 struct se_device *dev = cmd->se_dev;
247 struct se_session *sess = cmd->se_sess; 248 struct se_session *sess = cmd->se_sess;
248 struct se_portal_group *tpg = sess->se_tpg; 249 struct se_portal_group *tpg;
249 int ret = 0, rc; 250 int ret = 0, rc;
250 251
251 if ((cmd->t_task_cdb[1] & 0x01) && 252 if ((cmd->t_task_cdb[1] & 0x01) &&
@@ -260,7 +261,7 @@ int target_scsi2_reservation_reserve(struct se_cmd *cmd)
260 * This is currently the case for target_core_mod passthrough struct se_cmd 261 * This is currently the case for target_core_mod passthrough struct se_cmd
261 * ops 262 * ops
262 */ 263 */
263 if (!sess || !tpg) 264 if (!sess || !sess->se_tpg)
264 goto out; 265 goto out;
265 rc = target_check_scsi2_reservation_conflict(cmd); 266 rc = target_check_scsi2_reservation_conflict(cmd);
266 if (rc == 1) 267 if (rc == 1)
@@ -272,6 +273,7 @@ int target_scsi2_reservation_reserve(struct se_cmd *cmd)
272 } 273 }
273 274
274 ret = 0; 275 ret = 0;
276 tpg = sess->se_tpg;
275 spin_lock(&dev->dev_reservation_lock); 277 spin_lock(&dev->dev_reservation_lock);
276 if (dev->dev_reserved_node_acl && 278 if (dev->dev_reserved_node_acl &&
277 (dev->dev_reserved_node_acl != sess->se_node_acl)) { 279 (dev->dev_reserved_node_acl != sess->se_node_acl)) {
@@ -1620,7 +1622,7 @@ static int core_scsi3_decode_spec_i_port(
1620 goto out; 1622 goto out;
1621 } 1623 }
1622 /* 1624 /*
1623 * Locate the desination initiator ACL to be registered 1625 * Locate the destination initiator ACL to be registered
1624 * from the decoded fabric module specific TransportID 1626 * from the decoded fabric module specific TransportID
1625 * at *i_str. 1627 * at *i_str.
1626 */ 1628 */
@@ -4257,7 +4259,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4257 buf[off++] = ((port->sep_rtpi >> 8) & 0xff); 4259 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
4258 buf[off++] = (port->sep_rtpi & 0xff); 4260 buf[off++] = (port->sep_rtpi & 0xff);
4259 } else 4261 } else
4260 off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFER */ 4262 off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFIER */
4261 4263
4262 /* 4264 /*
4263 * Now, have the $FABRIC_MOD fill in the protocol identifier 4265 * Now, have the $FABRIC_MOD fill in the protocol identifier
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 9d7ce3daa262..617c086a8a02 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -264,7 +264,7 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
264 " length zero!\n"); 264 " length zero!\n");
265 break; 265 break;
266 } 266 }
267 pr_debug("T10 VPD Identifer Length: %d\n", ident_len); 267 pr_debug("T10 VPD Identifier Length: %d\n", ident_len);
268 268
269 vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL); 269 vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
270 if (!vpd) { 270 if (!vpd) {
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index a9dd9469e3bd..a6e27d967c7b 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -40,8 +40,9 @@
40static int sbc_emulate_readcapacity(struct se_cmd *cmd) 40static int sbc_emulate_readcapacity(struct se_cmd *cmd)
41{ 41{
42 struct se_device *dev = cmd->se_dev; 42 struct se_device *dev = cmd->se_dev;
43 unsigned char *buf;
44 unsigned long long blocks_long = dev->transport->get_blocks(dev); 43 unsigned long long blocks_long = dev->transport->get_blocks(dev);
44 unsigned char *rbuf;
45 unsigned char buf[8];
45 u32 blocks; 46 u32 blocks;
46 47
47 if (blocks_long >= 0x00000000ffffffff) 48 if (blocks_long >= 0x00000000ffffffff)
@@ -49,8 +50,6 @@ static int sbc_emulate_readcapacity(struct se_cmd *cmd)
49 else 50 else
50 blocks = (u32)blocks_long; 51 blocks = (u32)blocks_long;
51 52
52 buf = transport_kmap_data_sg(cmd);
53
54 buf[0] = (blocks >> 24) & 0xff; 53 buf[0] = (blocks >> 24) & 0xff;
55 buf[1] = (blocks >> 16) & 0xff; 54 buf[1] = (blocks >> 16) & 0xff;
56 buf[2] = (blocks >> 8) & 0xff; 55 buf[2] = (blocks >> 8) & 0xff;
@@ -60,7 +59,11 @@ static int sbc_emulate_readcapacity(struct se_cmd *cmd)
60 buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; 59 buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
61 buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; 60 buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
62 61
63 transport_kunmap_data_sg(cmd); 62 rbuf = transport_kmap_data_sg(cmd);
63 if (rbuf) {
64 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
65 transport_kunmap_data_sg(cmd);
66 }
64 67
65 target_complete_cmd(cmd, GOOD); 68 target_complete_cmd(cmd, GOOD);
66 return 0; 69 return 0;
@@ -69,11 +72,11 @@ static int sbc_emulate_readcapacity(struct se_cmd *cmd)
69static int sbc_emulate_readcapacity_16(struct se_cmd *cmd) 72static int sbc_emulate_readcapacity_16(struct se_cmd *cmd)
70{ 73{
71 struct se_device *dev = cmd->se_dev; 74 struct se_device *dev = cmd->se_dev;
72 unsigned char *buf; 75 unsigned char *rbuf;
76 unsigned char buf[32];
73 unsigned long long blocks = dev->transport->get_blocks(dev); 77 unsigned long long blocks = dev->transport->get_blocks(dev);
74 78
75 buf = transport_kmap_data_sg(cmd); 79 memset(buf, 0, sizeof(buf));
76
77 buf[0] = (blocks >> 56) & 0xff; 80 buf[0] = (blocks >> 56) & 0xff;
78 buf[1] = (blocks >> 48) & 0xff; 81 buf[1] = (blocks >> 48) & 0xff;
79 buf[2] = (blocks >> 40) & 0xff; 82 buf[2] = (blocks >> 40) & 0xff;
@@ -93,7 +96,11 @@ static int sbc_emulate_readcapacity_16(struct se_cmd *cmd)
93 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) 96 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
94 buf[14] = 0x80; 97 buf[14] = 0x80;
95 98
96 transport_kunmap_data_sg(cmd); 99 rbuf = transport_kmap_data_sg(cmd);
100 if (rbuf) {
101 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
102 transport_kunmap_data_sg(cmd);
103 }
97 104
98 target_complete_cmd(cmd, GOOD); 105 target_complete_cmd(cmd, GOOD);
99 return 0; 106 return 0;
@@ -128,6 +135,12 @@ static int sbc_emulate_verify(struct se_cmd *cmd)
128 return 0; 135 return 0;
129} 136}
130 137
138static int sbc_emulate_noop(struct se_cmd *cmd)
139{
140 target_complete_cmd(cmd, GOOD);
141 return 0;
142}
143
131static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) 144static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
132{ 145{
133 return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors; 146 return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors;
@@ -524,6 +537,18 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
524 size = 0; 537 size = 0;
525 cmd->execute_cmd = sbc_emulate_verify; 538 cmd->execute_cmd = sbc_emulate_verify;
526 break; 539 break;
540 case REZERO_UNIT:
541 case SEEK_6:
542 case SEEK_10:
543 /*
544 * There are still clients out there which use these old SCSI-2
545 * commands. This mainly happens when running VMs with legacy
546 * guest systems, connected via SCSI command pass-through to
547 * iSCSI targets. Make them happy and return status GOOD.
548 */
549 size = 0;
550 cmd->execute_cmd = sbc_emulate_noop;
551 break;
527 default: 552 default:
528 ret = spc_parse_cdb(cmd, &size); 553 ret = spc_parse_cdb(cmd, &size);
529 if (ret) 554 if (ret)
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 388a922c8f6d..6fd434d3d7e4 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -600,29 +600,12 @@ static int spc_emulate_inquiry(struct se_cmd *cmd)
600{ 600{
601 struct se_device *dev = cmd->se_dev; 601 struct se_device *dev = cmd->se_dev;
602 struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; 602 struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
603 unsigned char *buf, *map_buf; 603 unsigned char *rbuf;
604 unsigned char *cdb = cmd->t_task_cdb; 604 unsigned char *cdb = cmd->t_task_cdb;
605 unsigned char buf[SE_INQUIRY_BUF];
605 int p, ret; 606 int p, ret;
606 607
607 map_buf = transport_kmap_data_sg(cmd); 608 memset(buf, 0, SE_INQUIRY_BUF);
608 /*
609 * If SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is not set, then we
610 * know we actually allocated a full page. Otherwise, if the
611 * data buffer is too small, allocate a temporary buffer so we
612 * don't have to worry about overruns in all our INQUIRY
613 * emulation handling.
614 */
615 if (cmd->data_length < SE_INQUIRY_BUF &&
616 (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
617 buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
618 if (!buf) {
619 transport_kunmap_data_sg(cmd);
620 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
621 return -ENOMEM;
622 }
623 } else {
624 buf = map_buf;
625 }
626 609
627 if (dev == tpg->tpg_virt_lun0.lun_se_dev) 610 if (dev == tpg->tpg_virt_lun0.lun_se_dev)
628 buf[0] = 0x3f; /* Not connected */ 611 buf[0] = 0x3f; /* Not connected */
@@ -655,11 +638,11 @@ static int spc_emulate_inquiry(struct se_cmd *cmd)
655 ret = -EINVAL; 638 ret = -EINVAL;
656 639
657out: 640out:
658 if (buf != map_buf) { 641 rbuf = transport_kmap_data_sg(cmd);
659 memcpy(map_buf, buf, cmd->data_length); 642 if (rbuf) {
660 kfree(buf); 643 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
644 transport_kunmap_data_sg(cmd);
661 } 645 }
662 transport_kunmap_data_sg(cmd);
663 646
664 if (!ret) 647 if (!ret)
665 target_complete_cmd(cmd, GOOD); 648 target_complete_cmd(cmd, GOOD);
@@ -803,7 +786,7 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
803 unsigned char *rbuf; 786 unsigned char *rbuf;
804 int type = dev->transport->get_device_type(dev); 787 int type = dev->transport->get_device_type(dev);
805 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10); 788 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
806 int offset = ten ? 8 : 4; 789 u32 offset = ten ? 8 : 4;
807 int length = 0; 790 int length = 0;
808 unsigned char buf[SE_MODE_PAGE_BUF]; 791 unsigned char buf[SE_MODE_PAGE_BUF];
809 792
@@ -836,6 +819,7 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
836 offset -= 2; 819 offset -= 2;
837 buf[0] = (offset >> 8) & 0xff; 820 buf[0] = (offset >> 8) & 0xff;
838 buf[1] = offset & 0xff; 821 buf[1] = offset & 0xff;
822 offset += 2;
839 823
840 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || 824 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
841 (cmd->se_deve && 825 (cmd->se_deve &&
@@ -845,13 +829,10 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
845 if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && 829 if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
846 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) 830 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
847 spc_modesense_dpofua(&buf[3], type); 831 spc_modesense_dpofua(&buf[3], type);
848
849 if ((offset + 2) > cmd->data_length)
850 offset = cmd->data_length;
851
852 } else { 832 } else {
853 offset -= 1; 833 offset -= 1;
854 buf[0] = offset & 0xff; 834 buf[0] = offset & 0xff;
835 offset += 1;
855 836
856 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || 837 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
857 (cmd->se_deve && 838 (cmd->se_deve &&
@@ -861,14 +842,13 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
861 if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && 842 if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
862 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) 843 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
863 spc_modesense_dpofua(&buf[2], type); 844 spc_modesense_dpofua(&buf[2], type);
864
865 if ((offset + 1) > cmd->data_length)
866 offset = cmd->data_length;
867 } 845 }
868 846
869 rbuf = transport_kmap_data_sg(cmd); 847 rbuf = transport_kmap_data_sg(cmd);
870 memcpy(rbuf, buf, offset); 848 if (rbuf) {
871 transport_kunmap_data_sg(cmd); 849 memcpy(rbuf, buf, min(offset, cmd->data_length));
850 transport_kunmap_data_sg(cmd);
851 }
872 852
873 target_complete_cmd(cmd, GOOD); 853 target_complete_cmd(cmd, GOOD);
874 return 0; 854 return 0;
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 3d44beb0cf1f..cb6b0036ae95 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -32,7 +32,6 @@
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/timer.h> 33#include <linux/timer.h>
34#include <linux/string.h> 34#include <linux/string.h>
35#include <generated/utsrelease.h>
36#include <linux/utsname.h> 35#include <linux/utsname.h>
37#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
38#include <linux/seq_file.h> 37#include <linux/seq_file.h>
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 1c59a3c23b2c..be75c4331a92 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -140,15 +140,15 @@ void core_tmr_abort_task(
140 printk("ABORT_TASK: Found referenced %s task_tag: %u\n", 140 printk("ABORT_TASK: Found referenced %s task_tag: %u\n",
141 se_cmd->se_tfo->get_fabric_name(), ref_tag); 141 se_cmd->se_tfo->get_fabric_name(), ref_tag);
142 142
143 spin_lock_irq(&se_cmd->t_state_lock); 143 spin_lock(&se_cmd->t_state_lock);
144 if (se_cmd->transport_state & CMD_T_COMPLETE) { 144 if (se_cmd->transport_state & CMD_T_COMPLETE) {
145 printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag); 145 printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag);
146 spin_unlock_irq(&se_cmd->t_state_lock); 146 spin_unlock(&se_cmd->t_state_lock);
147 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 147 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
148 goto out; 148 goto out;
149 } 149 }
150 se_cmd->transport_state |= CMD_T_ABORTED; 150 se_cmd->transport_state |= CMD_T_ABORTED;
151 spin_unlock_irq(&se_cmd->t_state_lock); 151 spin_unlock(&se_cmd->t_state_lock);
152 152
153 list_del_init(&se_cmd->se_cmd_list); 153 list_del_init(&se_cmd->se_cmd_list);
154 kref_get(&se_cmd->cmd_kref); 154 kref_get(&se_cmd->cmd_kref);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index b8628a5014b9..a531fe282b1e 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -303,7 +303,7 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
303 } 303 }
304 /* 304 /*
305 * Here we only create demo-mode MappedLUNs from the active 305 * Here we only create demo-mode MappedLUNs from the active
306 * TPG LUNs if the fabric is not explictly asking for 306 * TPG LUNs if the fabric is not explicitly asking for
307 * tpg_check_demo_mode_login_only() == 1. 307 * tpg_check_demo_mode_login_only() == 1.
308 */ 308 */
309 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) || 309 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 269f54488397..9097155e9ebe 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -55,8 +55,6 @@
55#include "target_core_pr.h" 55#include "target_core_pr.h"
56#include "target_core_ua.h" 56#include "target_core_ua.h"
57 57
58static int sub_api_initialized;
59
60static struct workqueue_struct *target_completion_wq; 58static struct workqueue_struct *target_completion_wq;
61static struct kmem_cache *se_sess_cache; 59static struct kmem_cache *se_sess_cache;
62struct kmem_cache *se_ua_cache; 60struct kmem_cache *se_ua_cache;
@@ -195,6 +193,7 @@ u32 scsi_get_new_index(scsi_index_t type)
195void transport_subsystem_check_init(void) 193void transport_subsystem_check_init(void)
196{ 194{
197 int ret; 195 int ret;
196 static int sub_api_initialized;
198 197
199 if (sub_api_initialized) 198 if (sub_api_initialized)
200 return; 199 return;
@@ -211,12 +210,7 @@ void transport_subsystem_check_init(void)
211 if (ret != 0) 210 if (ret != 0)
212 pr_err("Unable to load target_core_pscsi\n"); 211 pr_err("Unable to load target_core_pscsi\n");
213 212
214 ret = request_module("target_core_stgt");
215 if (ret != 0)
216 pr_err("Unable to load target_core_stgt\n");
217
218 sub_api_initialized = 1; 213 sub_api_initialized = 1;
219 return;
220} 214}
221 215
222struct se_session *transport_init_session(void) 216struct se_session *transport_init_session(void)
@@ -573,9 +567,7 @@ static void target_complete_failure_work(struct work_struct *work)
573 */ 567 */
574static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 568static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
575{ 569{
576 unsigned char *buffer = cmd->sense_buffer;
577 struct se_device *dev = cmd->se_dev; 570 struct se_device *dev = cmd->se_dev;
578 u32 offset = 0;
579 571
580 WARN_ON(!cmd->se_lun); 572 WARN_ON(!cmd->se_lun);
581 573
@@ -585,14 +577,11 @@ static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
585 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 577 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
586 return NULL; 578 return NULL;
587 579
588 offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); 580 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
589
590 /* Automatically padded */
591 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
592 581
593 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 582 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
594 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 583 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
595 return &buffer[offset]; 584 return cmd->sense_buffer;
596} 585}
597 586
598void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 587void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
@@ -969,7 +958,7 @@ int
969transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 958transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
970{ 959{
971 static const char hex_str[] = "0123456789abcdef"; 960 static const char hex_str[] = "0123456789abcdef";
972 int j = 0, i = 4; /* offset to start of the identifer */ 961 int j = 0, i = 4; /* offset to start of the identifier */
973 962
974 /* 963 /*
975 * The VPD Code Set (encoding) 964 * The VPD Code Set (encoding)
@@ -1466,8 +1455,9 @@ int transport_handle_cdb_direct(
1466} 1455}
1467EXPORT_SYMBOL(transport_handle_cdb_direct); 1456EXPORT_SYMBOL(transport_handle_cdb_direct);
1468 1457
1469/** 1458/*
1470 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1459 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
1460 * se_cmd + use pre-allocated SGL memory.
1471 * 1461 *
1472 * @se_cmd: command descriptor to submit 1462 * @se_cmd: command descriptor to submit
1473 * @se_sess: associated se_sess for endpoint 1463 * @se_sess: associated se_sess for endpoint
@@ -1478,6 +1468,10 @@ EXPORT_SYMBOL(transport_handle_cdb_direct);
1478 * @task_addr: SAM task attribute 1468 * @task_addr: SAM task attribute
1479 * @data_dir: DMA data direction 1469 * @data_dir: DMA data direction
1480 * @flags: flags for command submission from target_sc_flags_tables 1470 * @flags: flags for command submission from target_sc_flags_tables
1471 * @sgl: struct scatterlist memory for unidirectional mapping
1472 * @sgl_count: scatterlist count for unidirectional mapping
1473 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
1474 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
1481 * 1475 *
1482 * Returns non zero to signal active I/O shutdown failure. All other 1476 * Returns non zero to signal active I/O shutdown failure. All other
1483 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1477 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
@@ -1485,10 +1479,12 @@ EXPORT_SYMBOL(transport_handle_cdb_direct);
1485 * 1479 *
1486 * This may only be called from process context, and also currently 1480 * This may only be called from process context, and also currently
1487 * assumes internal allocation of fabric payload buffer by target-core. 1481 * assumes internal allocation of fabric payload buffer by target-core.
1488 **/ 1482 */
1489int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1483int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
1490 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, 1484 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
1491 u32 data_length, int task_attr, int data_dir, int flags) 1485 u32 data_length, int task_attr, int data_dir, int flags,
1486 struct scatterlist *sgl, u32 sgl_count,
1487 struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
1492{ 1488{
1493 struct se_portal_group *se_tpg; 1489 struct se_portal_group *se_tpg;
1494 int rc; 1490 int rc;
@@ -1535,7 +1531,42 @@ int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1535 transport_generic_request_failure(se_cmd); 1531 transport_generic_request_failure(se_cmd);
1536 return 0; 1532 return 0;
1537 } 1533 }
1534 /*
1535 * When a non zero sgl_count has been passed perform SGL passthrough
1536 * mapping for pre-allocated fabric memory instead of having target
1537 * core perform an internal SGL allocation..
1538 */
1539 if (sgl_count != 0) {
1540 BUG_ON(!sgl);
1541
1542 /*
1543 * A work-around for tcm_loop as some userspace code via
1544 * scsi-generic do not memset their associated read buffers,
1545 * so go ahead and do that here for type non-data CDBs. Also
1546 * note that this is currently guaranteed to be a single SGL
1547 * for this case by target core in target_setup_cmd_from_cdb()
1548 * -> transport_generic_cmd_sequencer().
1549 */
1550 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
1551 se_cmd->data_direction == DMA_FROM_DEVICE) {
1552 unsigned char *buf = NULL;
1553
1554 if (sgl)
1555 buf = kmap(sg_page(sgl)) + sgl->offset;
1556
1557 if (buf) {
1558 memset(buf, 0, sgl->length);
1559 kunmap(sg_page(sgl));
1560 }
1561 }
1538 1562
1563 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
1564 sgl_bidi, sgl_bidi_count);
1565 if (rc != 0) {
1566 transport_generic_request_failure(se_cmd);
1567 return 0;
1568 }
1569 }
1539 /* 1570 /*
1540 * Check if we need to delay processing because of ALUA 1571 * Check if we need to delay processing because of ALUA
1541 * Active/NonOptimized primary access state.. 1572 * Active/NonOptimized primary access state..
@@ -1545,6 +1576,38 @@ int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1545 transport_handle_cdb_direct(se_cmd); 1576 transport_handle_cdb_direct(se_cmd);
1546 return 0; 1577 return 0;
1547} 1578}
1579EXPORT_SYMBOL(target_submit_cmd_map_sgls);
1580
1581/*
1582 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1583 *
1584 * @se_cmd: command descriptor to submit
1585 * @se_sess: associated se_sess for endpoint
1586 * @cdb: pointer to SCSI CDB
1587 * @sense: pointer to SCSI sense buffer
1588 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1589 * @data_length: fabric expected data transfer length
1590 * @task_addr: SAM task attribute
1591 * @data_dir: DMA data direction
1592 * @flags: flags for command submission from target_sc_flags_tables
1593 *
1594 * Returns non zero to signal active I/O shutdown failure. All other
1595 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1596 * but still return zero here.
1597 *
1598 * This may only be called from process context, and also currently
1599 * assumes internal allocation of fabric payload buffer by target-core.
1600 *
1601 * It also assumes interal target core SGL memory allocation.
1602 */
1603int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1604 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
1605 u32 data_length, int task_attr, int data_dir, int flags)
1606{
1607 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
1608 unpacked_lun, data_length, task_attr, data_dir,
1609 flags, NULL, 0, NULL, 0);
1610}
1548EXPORT_SYMBOL(target_submit_cmd); 1611EXPORT_SYMBOL(target_submit_cmd);
1549 1612
1550static void target_complete_tmr_failure(struct work_struct *work) 1613static void target_complete_tmr_failure(struct work_struct *work)
@@ -1553,7 +1616,6 @@ static void target_complete_tmr_failure(struct work_struct *work)
1553 1616
1554 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1617 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1555 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1618 se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1556 transport_generic_free_cmd(se_cmd, 0);
1557} 1619}
1558 1620
1559/** 1621/**
@@ -2300,23 +2362,6 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
2300 if (ret < 0) 2362 if (ret < 0)
2301 goto out_fail; 2363 goto out_fail;
2302 } 2364 }
2303 /*
2304 * If this command doesn't have any payload and we don't have to call
2305 * into the fabric for data transfers, go ahead and complete it right
2306 * away.
2307 */
2308 if (!cmd->data_length &&
2309 cmd->t_task_cdb[0] != REQUEST_SENSE &&
2310 cmd->se_dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
2311 spin_lock_irq(&cmd->t_state_lock);
2312 cmd->t_state = TRANSPORT_COMPLETE;
2313 cmd->transport_state |= CMD_T_ACTIVE;
2314 spin_unlock_irq(&cmd->t_state_lock);
2315
2316 INIT_WORK(&cmd->work, target_complete_ok_work);
2317 queue_work(target_completion_wq, &cmd->work);
2318 return 0;
2319 }
2320 2365
2321 atomic_inc(&cmd->t_fe_count); 2366 atomic_inc(&cmd->t_fe_count);
2322 2367
@@ -2771,7 +2816,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
2771 spin_lock_irqsave(&cmd->t_state_lock, flags); 2816 spin_lock_irqsave(&cmd->t_state_lock, flags);
2772 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 2817 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
2773 2818
2774 pr_debug("wait_for_tasks: Stopped wait_for_compltion(" 2819 pr_debug("wait_for_tasks: Stopped wait_for_completion("
2775 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", 2820 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
2776 cmd->se_tfo->get_task_tag(cmd)); 2821 cmd->se_tfo->get_task_tag(cmd));
2777 2822
@@ -2810,7 +2855,6 @@ int transport_send_check_condition_and_sense(
2810{ 2855{
2811 unsigned char *buffer = cmd->sense_buffer; 2856 unsigned char *buffer = cmd->sense_buffer;
2812 unsigned long flags; 2857 unsigned long flags;
2813 int offset;
2814 u8 asc = 0, ascq = 0; 2858 u8 asc = 0, ascq = 0;
2815 2859
2816 spin_lock_irqsave(&cmd->t_state_lock, flags); 2860 spin_lock_irqsave(&cmd->t_state_lock, flags);
@@ -2826,14 +2870,7 @@ int transport_send_check_condition_and_sense(
2826 2870
2827 if (!from_transport) 2871 if (!from_transport)
2828 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 2872 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
2829 /* 2873
2830 * Data Segment and SenseLength of the fabric response PDU.
2831 *
2832 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
2833 * from include/scsi/scsi_cmnd.h
2834 */
2835 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
2836 TRANSPORT_SENSE_BUFFER);
2837 /* 2874 /*
2838 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses 2875 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
2839 * SENSE KEY values from include/scsi/scsi.h 2876 * SENSE KEY values from include/scsi/scsi.h
@@ -2841,151 +2878,151 @@ int transport_send_check_condition_and_sense(
2841 switch (reason) { 2878 switch (reason) {
2842 case TCM_NON_EXISTENT_LUN: 2879 case TCM_NON_EXISTENT_LUN:
2843 /* CURRENT ERROR */ 2880 /* CURRENT ERROR */
2844 buffer[offset] = 0x70; 2881 buffer[0] = 0x70;
2845 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 2882 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2846 /* ILLEGAL REQUEST */ 2883 /* ILLEGAL REQUEST */
2847 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2884 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2848 /* LOGICAL UNIT NOT SUPPORTED */ 2885 /* LOGICAL UNIT NOT SUPPORTED */
2849 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25; 2886 buffer[SPC_ASC_KEY_OFFSET] = 0x25;
2850 break; 2887 break;
2851 case TCM_UNSUPPORTED_SCSI_OPCODE: 2888 case TCM_UNSUPPORTED_SCSI_OPCODE:
2852 case TCM_SECTOR_COUNT_TOO_MANY: 2889 case TCM_SECTOR_COUNT_TOO_MANY:
2853 /* CURRENT ERROR */ 2890 /* CURRENT ERROR */
2854 buffer[offset] = 0x70; 2891 buffer[0] = 0x70;
2855 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 2892 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2856 /* ILLEGAL REQUEST */ 2893 /* ILLEGAL REQUEST */
2857 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2894 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2858 /* INVALID COMMAND OPERATION CODE */ 2895 /* INVALID COMMAND OPERATION CODE */
2859 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20; 2896 buffer[SPC_ASC_KEY_OFFSET] = 0x20;
2860 break; 2897 break;
2861 case TCM_UNKNOWN_MODE_PAGE: 2898 case TCM_UNKNOWN_MODE_PAGE:
2862 /* CURRENT ERROR */ 2899 /* CURRENT ERROR */
2863 buffer[offset] = 0x70; 2900 buffer[0] = 0x70;
2864 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 2901 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2865 /* ILLEGAL REQUEST */ 2902 /* ILLEGAL REQUEST */
2866 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2903 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2867 /* INVALID FIELD IN CDB */ 2904 /* INVALID FIELD IN CDB */
2868 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; 2905 buffer[SPC_ASC_KEY_OFFSET] = 0x24;
2869 break; 2906 break;
2870 case TCM_CHECK_CONDITION_ABORT_CMD: 2907 case TCM_CHECK_CONDITION_ABORT_CMD:
2871 /* CURRENT ERROR */ 2908 /* CURRENT ERROR */
2872 buffer[offset] = 0x70; 2909 buffer[0] = 0x70;
2873 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 2910 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2874 /* ABORTED COMMAND */ 2911 /* ABORTED COMMAND */
2875 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2912 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
2876 /* BUS DEVICE RESET FUNCTION OCCURRED */ 2913 /* BUS DEVICE RESET FUNCTION OCCURRED */
2877 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29; 2914 buffer[SPC_ASC_KEY_OFFSET] = 0x29;
2878 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03; 2915 buffer[SPC_ASCQ_KEY_OFFSET] = 0x03;
2879 break; 2916 break;
2880 case TCM_INCORRECT_AMOUNT_OF_DATA: 2917 case TCM_INCORRECT_AMOUNT_OF_DATA:
2881 /* CURRENT ERROR */ 2918 /* CURRENT ERROR */
2882 buffer[offset] = 0x70; 2919 buffer[0] = 0x70;
2883 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 2920 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2884 /* ABORTED COMMAND */ 2921 /* ABORTED COMMAND */
2885 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2922 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
2886 /* WRITE ERROR */ 2923 /* WRITE ERROR */
2887 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; 2924 buffer[SPC_ASC_KEY_OFFSET] = 0x0c;
2888 /* NOT ENOUGH UNSOLICITED DATA */ 2925 /* NOT ENOUGH UNSOLICITED DATA */
2889 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d; 2926 buffer[SPC_ASCQ_KEY_OFFSET] = 0x0d;
2890 break; 2927 break;
2891 case TCM_INVALID_CDB_FIELD: 2928 case TCM_INVALID_CDB_FIELD:
2892 /* CURRENT ERROR */ 2929 /* CURRENT ERROR */
2893 buffer[offset] = 0x70; 2930 buffer[0] = 0x70;
2894 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 2931 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2895 /* ILLEGAL REQUEST */ 2932 /* ILLEGAL REQUEST */
2896 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2933 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2897 /* INVALID FIELD IN CDB */ 2934 /* INVALID FIELD IN CDB */
2898 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; 2935 buffer[SPC_ASC_KEY_OFFSET] = 0x24;
2899 break; 2936 break;
2900 case TCM_INVALID_PARAMETER_LIST: 2937 case TCM_INVALID_PARAMETER_LIST:
2901 /* CURRENT ERROR */ 2938 /* CURRENT ERROR */
2902 buffer[offset] = 0x70; 2939 buffer[0] = 0x70;
2903 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 2940 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2904 /* ILLEGAL REQUEST */ 2941 /* ILLEGAL REQUEST */
2905 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2942 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2906 /* INVALID FIELD IN PARAMETER LIST */ 2943 /* INVALID FIELD IN PARAMETER LIST */
2907 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26; 2944 buffer[SPC_ASC_KEY_OFFSET] = 0x26;
2908 break; 2945 break;
2909 case TCM_UNEXPECTED_UNSOLICITED_DATA: 2946 case TCM_UNEXPECTED_UNSOLICITED_DATA:
2910 /* CURRENT ERROR */ 2947 /* CURRENT ERROR */
2911 buffer[offset] = 0x70; 2948 buffer[0] = 0x70;
2912 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 2949 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2913 /* ABORTED COMMAND */ 2950 /* ABORTED COMMAND */
2914 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2951 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
2915 /* WRITE ERROR */ 2952 /* WRITE ERROR */
2916 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; 2953 buffer[SPC_ASC_KEY_OFFSET] = 0x0c;
2917 /* UNEXPECTED_UNSOLICITED_DATA */ 2954 /* UNEXPECTED_UNSOLICITED_DATA */
2918 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c; 2955 buffer[SPC_ASCQ_KEY_OFFSET] = 0x0c;
2919 break; 2956 break;
2920 case TCM_SERVICE_CRC_ERROR: 2957 case TCM_SERVICE_CRC_ERROR:
2921 /* CURRENT ERROR */ 2958 /* CURRENT ERROR */
2922 buffer[offset] = 0x70; 2959 buffer[0] = 0x70;
2923 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 2960 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2924 /* ABORTED COMMAND */ 2961 /* ABORTED COMMAND */
2925 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2962 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
2926 /* PROTOCOL SERVICE CRC ERROR */ 2963 /* PROTOCOL SERVICE CRC ERROR */
2927 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47; 2964 buffer[SPC_ASC_KEY_OFFSET] = 0x47;
2928 /* N/A */ 2965 /* N/A */
2929 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05; 2966 buffer[SPC_ASCQ_KEY_OFFSET] = 0x05;
2930 break; 2967 break;
2931 case TCM_SNACK_REJECTED: 2968 case TCM_SNACK_REJECTED:
2932 /* CURRENT ERROR */ 2969 /* CURRENT ERROR */
2933 buffer[offset] = 0x70; 2970 buffer[0] = 0x70;
2934 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 2971 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2935 /* ABORTED COMMAND */ 2972 /* ABORTED COMMAND */
2936 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2973 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
2937 /* READ ERROR */ 2974 /* READ ERROR */
2938 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11; 2975 buffer[SPC_ASC_KEY_OFFSET] = 0x11;
2939 /* FAILED RETRANSMISSION REQUEST */ 2976 /* FAILED RETRANSMISSION REQUEST */
2940 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13; 2977 buffer[SPC_ASCQ_KEY_OFFSET] = 0x13;
2941 break; 2978 break;
2942 case TCM_WRITE_PROTECTED: 2979 case TCM_WRITE_PROTECTED:
2943 /* CURRENT ERROR */ 2980 /* CURRENT ERROR */
2944 buffer[offset] = 0x70; 2981 buffer[0] = 0x70;
2945 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 2982 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2946 /* DATA PROTECT */ 2983 /* DATA PROTECT */
2947 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; 2984 buffer[SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
2948 /* WRITE PROTECTED */ 2985 /* WRITE PROTECTED */
2949 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27; 2986 buffer[SPC_ASC_KEY_OFFSET] = 0x27;
2950 break; 2987 break;
2951 case TCM_ADDRESS_OUT_OF_RANGE: 2988 case TCM_ADDRESS_OUT_OF_RANGE:
2952 /* CURRENT ERROR */ 2989 /* CURRENT ERROR */
2953 buffer[offset] = 0x70; 2990 buffer[0] = 0x70;
2954 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 2991 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2955 /* ILLEGAL REQUEST */ 2992 /* ILLEGAL REQUEST */
2956 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2993 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2957 /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 2994 /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
2958 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x21; 2995 buffer[SPC_ASC_KEY_OFFSET] = 0x21;
2959 break; 2996 break;
2960 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 2997 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
2961 /* CURRENT ERROR */ 2998 /* CURRENT ERROR */
2962 buffer[offset] = 0x70; 2999 buffer[0] = 0x70;
2963 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 3000 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2964 /* UNIT ATTENTION */ 3001 /* UNIT ATTENTION */
2965 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; 3002 buffer[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
2966 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); 3003 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
2967 buffer[offset+SPC_ASC_KEY_OFFSET] = asc; 3004 buffer[SPC_ASC_KEY_OFFSET] = asc;
2968 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; 3005 buffer[SPC_ASCQ_KEY_OFFSET] = ascq;
2969 break; 3006 break;
2970 case TCM_CHECK_CONDITION_NOT_READY: 3007 case TCM_CHECK_CONDITION_NOT_READY:
2971 /* CURRENT ERROR */ 3008 /* CURRENT ERROR */
2972 buffer[offset] = 0x70; 3009 buffer[0] = 0x70;
2973 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 3010 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2974 /* Not Ready */ 3011 /* Not Ready */
2975 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; 3012 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
2976 transport_get_sense_codes(cmd, &asc, &ascq); 3013 transport_get_sense_codes(cmd, &asc, &ascq);
2977 buffer[offset+SPC_ASC_KEY_OFFSET] = asc; 3014 buffer[SPC_ASC_KEY_OFFSET] = asc;
2978 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; 3015 buffer[SPC_ASCQ_KEY_OFFSET] = ascq;
2979 break; 3016 break;
2980 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 3017 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
2981 default: 3018 default:
2982 /* CURRENT ERROR */ 3019 /* CURRENT ERROR */
2983 buffer[offset] = 0x70; 3020 buffer[0] = 0x70;
2984 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 3021 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2985 /* ILLEGAL REQUEST */ 3022 /* ILLEGAL REQUEST */
2986 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 3023 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2987 /* LOGICAL UNIT COMMUNICATION FAILURE */ 3024 /* LOGICAL UNIT COMMUNICATION FAILURE */
2988 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80; 3025 buffer[SPC_ASC_KEY_OFFSET] = 0x80;
2989 break; 3026 break;
2990 } 3027 }
2991 /* 3028 /*
@@ -2996,7 +3033,7 @@ int transport_send_check_condition_and_sense(
2996 * Automatically padded, this value is encoded in the fabric's 3033 * Automatically padded, this value is encoded in the fabric's
2997 * data_length response PDU containing the SCSI defined sense data. 3034 * data_length response PDU containing the SCSI defined sense data.
2998 */ 3035 */
2999 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; 3036 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
3000 3037
3001after_reason: 3038after_reason:
3002 return cmd->se_tfo->queue_status(cmd); 3039 return cmd->se_tfo->queue_status(cmd);
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 823e6922249d..b406f178ff39 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -19,7 +19,6 @@
19 19
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/moduleparam.h> 21#include <linux/moduleparam.h>
22#include <generated/utsrelease.h>
23#include <linux/utsname.h> 22#include <linux/utsname.h>
24#include <linux/init.h> 23#include <linux/init.h>
25#include <linux/slab.h> 24#include <linux/slab.h>
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 9501844fae2d..b74feb0d5133 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -495,16 +495,6 @@ static void ft_set_default_node_attr(struct se_node_acl *se_nacl)
495{ 495{
496} 496}
497 497
498static u16 ft_get_fabric_sense_len(void)
499{
500 return 0;
501}
502
503static u16 ft_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_len)
504{
505 return 0;
506}
507
508static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg) 498static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
509{ 499{
510 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr; 500 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
@@ -542,8 +532,6 @@ static struct target_core_fabric_ops ft_fabric_ops = {
542 .queue_data_in = ft_queue_data_in, 532 .queue_data_in = ft_queue_data_in,
543 .queue_status = ft_queue_status, 533 .queue_status = ft_queue_status,
544 .queue_tm_rsp = ft_queue_tm_resp, 534 .queue_tm_rsp = ft_queue_tm_resp,
545 .get_fabric_sense_len = ft_get_fabric_sense_len,
546 .set_fabric_sense_len = ft_set_fabric_sense_len,
547 /* 535 /*
548 * Setup function pointers for generic logic in 536 * Setup function pointers for generic logic in
549 * target_core_fabric_configfs.c 537 * target_core_fabric_configfs.c
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index ad36ede1a1ea..b6fd4cf42840 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -28,7 +28,6 @@
28 28
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/moduleparam.h> 30#include <linux/moduleparam.h>
31#include <generated/utsrelease.h>
32#include <linux/utsname.h> 31#include <linux/utsname.h>
33#include <linux/init.h> 32#include <linux/init.h>
34#include <linux/slab.h> 33#include <linux/slab.h>
@@ -328,11 +327,12 @@ drop:
328 */ 327 */
329void ft_invl_hw_context(struct ft_cmd *cmd) 328void ft_invl_hw_context(struct ft_cmd *cmd)
330{ 329{
331 struct fc_seq *seq = cmd->seq; 330 struct fc_seq *seq;
332 struct fc_exch *ep = NULL; 331 struct fc_exch *ep = NULL;
333 struct fc_lport *lport = NULL; 332 struct fc_lport *lport = NULL;
334 333
335 BUG_ON(!cmd); 334 BUG_ON(!cmd);
335 seq = cmd->seq;
336 336
337 /* Cleanup the DDP context in HW if DDP was setup */ 337 /* Cleanup the DDP context in HW if DDP was setup */
338 if (cmd->was_ddp_setup && seq) { 338 if (cmd->was_ddp_setup && seq) {
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 3c9e5b57caab..9585010964ec 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -19,7 +19,6 @@
19 19
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/moduleparam.h> 21#include <linux/moduleparam.h>
22#include <generated/utsrelease.h>
23#include <linux/utsname.h> 22#include <linux/utsname.h>
24#include <linux/init.h> 23#include <linux/init.h>
25#include <linux/slab.h> 24#include <linux/slab.h>
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 3ab2bd540b54..e1cb6bd75f60 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -19,6 +19,18 @@ config THERMAL_HWMON
19 depends on HWMON=y || HWMON=THERMAL 19 depends on HWMON=y || HWMON=THERMAL
20 default y 20 default y
21 21
22config CPU_THERMAL
23 bool "generic cpu cooling support"
24 depends on THERMAL && CPU_FREQ
25 select CPU_FREQ_TABLE
26 help
27 This implements the generic cpu cooling mechanism through frequency
28 reduction, cpu hotplug and any other ways of reducing temperature. An
29 ACPI version of this already exists(drivers/acpi/processor_thermal.c).
30 This will be useful for platforms using the generic thermal interface
31 and not the ACPI interface.
32 If you want this support, you should say Y here.
33
22config SPEAR_THERMAL 34config SPEAR_THERMAL
23 bool "SPEAr thermal sensor driver" 35 bool "SPEAr thermal sensor driver"
24 depends on THERMAL 36 depends on THERMAL
@@ -27,3 +39,19 @@ config SPEAR_THERMAL
27 help 39 help
28 Enable this to plug the SPEAr thermal sensor driver into the Linux 40 Enable this to plug the SPEAr thermal sensor driver into the Linux
29 thermal framework 41 thermal framework
42
43config RCAR_THERMAL
44 tristate "Renesas R-Car thermal driver"
45 depends on THERMAL
46 depends on ARCH_SHMOBILE
47 help
48 Enable this to plug the R-Car thermal sensor driver into the Linux
49 thermal framework
50
51config EXYNOS_THERMAL
52 tristate "Temperature sensor on Samsung EXYNOS"
53 depends on (ARCH_EXYNOS4 || ARCH_EXYNOS5) && THERMAL
54 select CPU_FREQ_TABLE
55 help
56 If you say yes here you get support for TMU (Thermal Managment
57 Unit) on SAMSUNG EXYNOS series of SoC.
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index a9fff0bf4b14..885550dc64b7 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -3,4 +3,7 @@
3# 3#
4 4
5obj-$(CONFIG_THERMAL) += thermal_sys.o 5obj-$(CONFIG_THERMAL) += thermal_sys.o
6obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o \ No newline at end of file 6obj-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
7obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
8obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o
9obj-$(CONFIG_EXYNOS_THERMAL) += exynos_thermal.o
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
new file mode 100644
index 000000000000..cc1c930a90e4
--- /dev/null
+++ b/drivers/thermal/cpu_cooling.c
@@ -0,0 +1,449 @@
1/*
2 * linux/drivers/thermal/cpu_cooling.c
3 *
4 * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
5 * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
6 *
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
20 *
21 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
22 */
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/thermal.h>
26#include <linux/platform_device.h>
27#include <linux/cpufreq.h>
28#include <linux/err.h>
29#include <linux/slab.h>
30#include <linux/cpu.h>
31#include <linux/cpu_cooling.h>
32
33/**
34 * struct cpufreq_cooling_device
35 * @id: unique integer value corresponding to each cpufreq_cooling_device
36 * registered.
37 * @cool_dev: thermal_cooling_device pointer to keep track of the the
38 * egistered cooling device.
39 * @cpufreq_state: integer value representing the current state of cpufreq
40 * cooling devices.
41 * @cpufreq_val: integer value representing the absolute value of the clipped
42 * frequency.
43 * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
44 * @node: list_head to link all cpufreq_cooling_device together.
45 *
46 * This structure is required for keeping information of each
47 * cpufreq_cooling_device registered as a list whose head is represented by
48 * cooling_cpufreq_list. In order to prevent corruption of this list a
49 * mutex lock cooling_cpufreq_lock is used.
50 */
51struct cpufreq_cooling_device {
52 int id;
53 struct thermal_cooling_device *cool_dev;
54 unsigned int cpufreq_state;
55 unsigned int cpufreq_val;
56 struct cpumask allowed_cpus;
57 struct list_head node;
58};
59static LIST_HEAD(cooling_cpufreq_list);
60static DEFINE_IDR(cpufreq_idr);
61
62static struct mutex cooling_cpufreq_lock;
63
64/* notify_table passes value to the CPUFREQ_ADJUST callback function. */
65#define NOTIFY_INVALID NULL
66struct cpufreq_cooling_device *notify_device;
67
68/**
69 * get_idr - function to get a unique id.
70 * @idr: struct idr * handle used to create a id.
71 * @id: int * value generated by this function.
72 */
73static int get_idr(struct idr *idr, int *id)
74{
75 int err;
76again:
77 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
78 return -ENOMEM;
79
80 mutex_lock(&cooling_cpufreq_lock);
81 err = idr_get_new(idr, NULL, id);
82 mutex_unlock(&cooling_cpufreq_lock);
83
84 if (unlikely(err == -EAGAIN))
85 goto again;
86 else if (unlikely(err))
87 return err;
88
89 *id = *id & MAX_IDR_MASK;
90 return 0;
91}
92
93/**
94 * release_idr - function to free the unique id.
95 * @idr: struct idr * handle used for creating the id.
96 * @id: int value representing the unique id.
97 */
98static void release_idr(struct idr *idr, int id)
99{
100 mutex_lock(&cooling_cpufreq_lock);
101 idr_remove(idr, id);
102 mutex_unlock(&cooling_cpufreq_lock);
103}
104
105/* Below code defines functions to be used for cpufreq as cooling device */
106
107/**
108 * is_cpufreq_valid - function to check if a cpu has frequency transition policy.
109 * @cpu: cpu for which check is needed.
110 */
111static int is_cpufreq_valid(int cpu)
112{
113 struct cpufreq_policy policy;
114 return !cpufreq_get_policy(&policy, cpu);
115}
116
117/**
118 * get_cpu_frequency - get the absolute value of frequency from level.
119 * @cpu: cpu for which frequency is fetched.
120 * @level: level of frequency of the CPU
121 * e.g level=1 --> 1st MAX FREQ, LEVEL=2 ---> 2nd MAX FREQ, .... etc
122 */
123static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
124{
125 int ret = 0, i = 0;
126 unsigned long level_index;
127 bool descend = false;
128 struct cpufreq_frequency_table *table =
129 cpufreq_frequency_get_table(cpu);
130 if (!table)
131 return ret;
132
133 while (table[i].frequency != CPUFREQ_TABLE_END) {
134 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
135 continue;
136
137 /*check if table in ascending or descending order*/
138 if ((table[i + 1].frequency != CPUFREQ_TABLE_END) &&
139 (table[i + 1].frequency < table[i].frequency)
140 && !descend) {
141 descend = true;
142 }
143
144 /*return if level matched and table in descending order*/
145 if (descend && i == level)
146 return table[i].frequency;
147 i++;
148 }
149 i--;
150
151 if (level > i || descend)
152 return ret;
153 level_index = i - level;
154
155 /*Scan the table in reverse order and match the level*/
156 while (i >= 0) {
157 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
158 continue;
159 /*return if level matched*/
160 if (i == level_index)
161 return table[i].frequency;
162 i--;
163 }
164 return ret;
165}
166
167/**
168 * cpufreq_apply_cooling - function to apply frequency clipping.
169 * @cpufreq_device: cpufreq_cooling_device pointer containing frequency
170 * clipping data.
171 * @cooling_state: value of the cooling state.
172 */
173static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
174 unsigned long cooling_state)
175{
176 unsigned int cpuid, clip_freq;
177 struct cpumask *maskPtr = &cpufreq_device->allowed_cpus;
178 unsigned int cpu = cpumask_any(maskPtr);
179
180
181 /* Check if the old cooling action is same as new cooling action */
182 if (cpufreq_device->cpufreq_state == cooling_state)
183 return 0;
184
185 clip_freq = get_cpu_frequency(cpu, cooling_state);
186 if (!clip_freq)
187 return -EINVAL;
188
189 cpufreq_device->cpufreq_state = cooling_state;
190 cpufreq_device->cpufreq_val = clip_freq;
191 notify_device = cpufreq_device;
192
193 for_each_cpu(cpuid, maskPtr) {
194 if (is_cpufreq_valid(cpuid))
195 cpufreq_update_policy(cpuid);
196 }
197
198 notify_device = NOTIFY_INVALID;
199
200 return 0;
201}
202
203/**
204 * cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
205 * @nb: struct notifier_block * with callback info.
206 * @event: value showing cpufreq event for which this function invoked.
207 * @data: callback-specific data
208 */
209static int cpufreq_thermal_notifier(struct notifier_block *nb,
210 unsigned long event, void *data)
211{
212 struct cpufreq_policy *policy = data;
213 unsigned long max_freq = 0;
214
215 if (event != CPUFREQ_ADJUST || notify_device == NOTIFY_INVALID)
216 return 0;
217
218 if (cpumask_test_cpu(policy->cpu, &notify_device->allowed_cpus))
219 max_freq = notify_device->cpufreq_val;
220
221 /* Never exceed user_policy.max*/
222 if (max_freq > policy->user_policy.max)
223 max_freq = policy->user_policy.max;
224
225 if (policy->max != max_freq)
226 cpufreq_verify_within_limits(policy, 0, max_freq);
227
228 return 0;
229}
230
231/*
232 * cpufreq cooling device callback functions are defined below
233 */
234
235/**
236 * cpufreq_get_max_state - callback function to get the max cooling state.
237 * @cdev: thermal cooling device pointer.
238 * @state: fill this variable with the max cooling state.
239 */
240static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
241 unsigned long *state)
242{
243 int ret = -EINVAL, i = 0;
244 struct cpufreq_cooling_device *cpufreq_device;
245 struct cpumask *maskPtr;
246 unsigned int cpu;
247 struct cpufreq_frequency_table *table;
248
249 mutex_lock(&cooling_cpufreq_lock);
250 list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) {
251 if (cpufreq_device && cpufreq_device->cool_dev == cdev)
252 break;
253 }
254 if (cpufreq_device == NULL)
255 goto return_get_max_state;
256
257 maskPtr = &cpufreq_device->allowed_cpus;
258 cpu = cpumask_any(maskPtr);
259 table = cpufreq_frequency_get_table(cpu);
260 if (!table) {
261 *state = 0;
262 ret = 0;
263 goto return_get_max_state;
264 }
265
266 while (table[i].frequency != CPUFREQ_TABLE_END) {
267 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
268 continue;
269 i++;
270 }
271 if (i > 0) {
272 *state = --i;
273 ret = 0;
274 }
275
276return_get_max_state:
277 mutex_unlock(&cooling_cpufreq_lock);
278 return ret;
279}
280
281/**
282 * cpufreq_get_cur_state - callback function to get the current cooling state.
283 * @cdev: thermal cooling device pointer.
284 * @state: fill this variable with the current cooling state.
285 */
286static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
287 unsigned long *state)
288{
289 int ret = -EINVAL;
290 struct cpufreq_cooling_device *cpufreq_device;
291
292 mutex_lock(&cooling_cpufreq_lock);
293 list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) {
294 if (cpufreq_device && cpufreq_device->cool_dev == cdev) {
295 *state = cpufreq_device->cpufreq_state;
296 ret = 0;
297 break;
298 }
299 }
300 mutex_unlock(&cooling_cpufreq_lock);
301
302 return ret;
303}
304
305/**
306 * cpufreq_set_cur_state - callback function to set the current cooling state.
307 * @cdev: thermal cooling device pointer.
308 * @state: set this variable to the current cooling state.
309 */
310static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
311 unsigned long state)
312{
313 int ret = -EINVAL;
314 struct cpufreq_cooling_device *cpufreq_device;
315
316 mutex_lock(&cooling_cpufreq_lock);
317 list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) {
318 if (cpufreq_device && cpufreq_device->cool_dev == cdev) {
319 ret = 0;
320 break;
321 }
322 }
323 if (!ret)
324 ret = cpufreq_apply_cooling(cpufreq_device, state);
325
326 mutex_unlock(&cooling_cpufreq_lock);
327
328 return ret;
329}
330
331/* Bind cpufreq callbacks to thermal cooling device ops */
332static struct thermal_cooling_device_ops const cpufreq_cooling_ops = {
333 .get_max_state = cpufreq_get_max_state,
334 .get_cur_state = cpufreq_get_cur_state,
335 .set_cur_state = cpufreq_set_cur_state,
336};
337
338/* Notifier for cpufreq policy change */
339static struct notifier_block thermal_cpufreq_notifier_block = {
340 .notifier_call = cpufreq_thermal_notifier,
341};
342
343/**
344 * cpufreq_cooling_register - function to create cpufreq cooling device.
345 * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
346 */
347struct thermal_cooling_device *cpufreq_cooling_register(
348 struct cpumask *clip_cpus)
349{
350 struct thermal_cooling_device *cool_dev;
351 struct cpufreq_cooling_device *cpufreq_dev = NULL;
352 unsigned int cpufreq_dev_count = 0, min = 0, max = 0;
353 char dev_name[THERMAL_NAME_LENGTH];
354 int ret = 0, i;
355 struct cpufreq_policy policy;
356
357 list_for_each_entry(cpufreq_dev, &cooling_cpufreq_list, node)
358 cpufreq_dev_count++;
359
360 /*Verify that all the clip cpus have same freq_min, freq_max limit*/
361 for_each_cpu(i, clip_cpus) {
362 /*continue if cpufreq policy not found and not return error*/
363 if (!cpufreq_get_policy(&policy, i))
364 continue;
365 if (min == 0 && max == 0) {
366 min = policy.cpuinfo.min_freq;
367 max = policy.cpuinfo.max_freq;
368 } else {
369 if (min != policy.cpuinfo.min_freq ||
370 max != policy.cpuinfo.max_freq)
371 return ERR_PTR(-EINVAL);
372}
373 }
374 cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device),
375 GFP_KERNEL);
376 if (!cpufreq_dev)
377 return ERR_PTR(-ENOMEM);
378
379 cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
380
381 if (cpufreq_dev_count == 0)
382 mutex_init(&cooling_cpufreq_lock);
383
384 ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
385 if (ret) {
386 kfree(cpufreq_dev);
387 return ERR_PTR(-EINVAL);
388 }
389
390 sprintf(dev_name, "thermal-cpufreq-%d", cpufreq_dev->id);
391
392 cool_dev = thermal_cooling_device_register(dev_name, cpufreq_dev,
393 &cpufreq_cooling_ops);
394 if (!cool_dev) {
395 release_idr(&cpufreq_idr, cpufreq_dev->id);
396 kfree(cpufreq_dev);
397 return ERR_PTR(-EINVAL);
398 }
399 cpufreq_dev->cool_dev = cool_dev;
400 cpufreq_dev->cpufreq_state = 0;
401 mutex_lock(&cooling_cpufreq_lock);
402 list_add_tail(&cpufreq_dev->node, &cooling_cpufreq_list);
403
404 /* Register the notifier for first cpufreq cooling device */
405 if (cpufreq_dev_count == 0)
406 cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
407 CPUFREQ_POLICY_NOTIFIER);
408
409 mutex_unlock(&cooling_cpufreq_lock);
410 return cool_dev;
411}
412EXPORT_SYMBOL(cpufreq_cooling_register);
413
414/**
415 * cpufreq_cooling_unregister - function to remove cpufreq cooling device.
416 * @cdev: thermal cooling device pointer.
417 */
418void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
419{
420 struct cpufreq_cooling_device *cpufreq_dev = NULL;
421 unsigned int cpufreq_dev_count = 0;
422
423 mutex_lock(&cooling_cpufreq_lock);
424 list_for_each_entry(cpufreq_dev, &cooling_cpufreq_list, node) {
425 if (cpufreq_dev && cpufreq_dev->cool_dev == cdev)
426 break;
427 cpufreq_dev_count++;
428 }
429
430 if (!cpufreq_dev || cpufreq_dev->cool_dev != cdev) {
431 mutex_unlock(&cooling_cpufreq_lock);
432 return;
433 }
434
435 list_del(&cpufreq_dev->node);
436
437 /* Unregister the notifier for the last cpufreq cooling device */
438 if (cpufreq_dev_count == 1) {
439 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
440 CPUFREQ_POLICY_NOTIFIER);
441 }
442 mutex_unlock(&cooling_cpufreq_lock);
443 thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
444 release_idr(&cpufreq_idr, cpufreq_dev->id);
445 if (cpufreq_dev_count == 1)
446 mutex_destroy(&cooling_cpufreq_lock);
447 kfree(cpufreq_dev);
448}
449EXPORT_SYMBOL(cpufreq_cooling_unregister);
diff --git a/drivers/thermal/exynos_thermal.c b/drivers/thermal/exynos_thermal.c
new file mode 100644
index 000000000000..6dd29e4ce36b
--- /dev/null
+++ b/drivers/thermal/exynos_thermal.c
@@ -0,0 +1,997 @@
1/*
2 * exynos_thermal.c - Samsung EXYNOS TMU (Thermal Management Unit)
3 *
4 * Copyright (C) 2011 Samsung Electronics
5 * Donggeun Kim <dg77.kim@samsung.com>
6 * Amit Daniel Kachhap <amit.kachhap@linaro.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/err.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/platform_device.h>
29#include <linux/interrupt.h>
30#include <linux/clk.h>
31#include <linux/workqueue.h>
32#include <linux/sysfs.h>
33#include <linux/kobject.h>
34#include <linux/io.h>
35#include <linux/mutex.h>
36#include <linux/platform_data/exynos_thermal.h>
37#include <linux/thermal.h>
38#include <linux/cpufreq.h>
39#include <linux/cpu_cooling.h>
40#include <linux/of.h>
41
42#include <plat/cpu.h>
43
44/* Exynos generic registers */
45#define EXYNOS_TMU_REG_TRIMINFO 0x0
46#define EXYNOS_TMU_REG_CONTROL 0x20
47#define EXYNOS_TMU_REG_STATUS 0x28
48#define EXYNOS_TMU_REG_CURRENT_TEMP 0x40
49#define EXYNOS_TMU_REG_INTEN 0x70
50#define EXYNOS_TMU_REG_INTSTAT 0x74
51#define EXYNOS_TMU_REG_INTCLEAR 0x78
52
53#define EXYNOS_TMU_TRIM_TEMP_MASK 0xff
54#define EXYNOS_TMU_GAIN_SHIFT 8
55#define EXYNOS_TMU_REF_VOLTAGE_SHIFT 24
56#define EXYNOS_TMU_CORE_ON 3
57#define EXYNOS_TMU_CORE_OFF 2
58#define EXYNOS_TMU_DEF_CODE_TO_TEMP_OFFSET 50
59
60/* Exynos4210 specific registers */
61#define EXYNOS4210_TMU_REG_THRESHOLD_TEMP 0x44
62#define EXYNOS4210_TMU_REG_TRIG_LEVEL0 0x50
63#define EXYNOS4210_TMU_REG_TRIG_LEVEL1 0x54
64#define EXYNOS4210_TMU_REG_TRIG_LEVEL2 0x58
65#define EXYNOS4210_TMU_REG_TRIG_LEVEL3 0x5C
66#define EXYNOS4210_TMU_REG_PAST_TEMP0 0x60
67#define EXYNOS4210_TMU_REG_PAST_TEMP1 0x64
68#define EXYNOS4210_TMU_REG_PAST_TEMP2 0x68
69#define EXYNOS4210_TMU_REG_PAST_TEMP3 0x6C
70
71#define EXYNOS4210_TMU_TRIG_LEVEL0_MASK 0x1
72#define EXYNOS4210_TMU_TRIG_LEVEL1_MASK 0x10
73#define EXYNOS4210_TMU_TRIG_LEVEL2_MASK 0x100
74#define EXYNOS4210_TMU_TRIG_LEVEL3_MASK 0x1000
75#define EXYNOS4210_TMU_INTCLEAR_VAL 0x1111
76
77/* Exynos5250 and Exynos4412 specific registers */
78#define EXYNOS_TMU_TRIMINFO_CON 0x14
79#define EXYNOS_THD_TEMP_RISE 0x50
80#define EXYNOS_THD_TEMP_FALL 0x54
81#define EXYNOS_EMUL_CON 0x80
82
83#define EXYNOS_TRIMINFO_RELOAD 0x1
84#define EXYNOS_TMU_CLEAR_RISE_INT 0x111
85#define EXYNOS_TMU_CLEAR_FALL_INT (0x111 << 16)
86#define EXYNOS_MUX_ADDR_VALUE 6
87#define EXYNOS_MUX_ADDR_SHIFT 20
88#define EXYNOS_TMU_TRIP_MODE_SHIFT 13
89
90#define EFUSE_MIN_VALUE 40
91#define EFUSE_MAX_VALUE 100
92
93/* In-kernel thermal framework related macros & definations */
94#define SENSOR_NAME_LEN 16
95#define MAX_TRIP_COUNT 8
96#define MAX_COOLING_DEVICE 4
97
98#define ACTIVE_INTERVAL 500
99#define IDLE_INTERVAL 10000
100#define MCELSIUS 1000
101
102/* CPU Zone information */
103#define PANIC_ZONE 4
104#define WARN_ZONE 3
105#define MONITOR_ZONE 2
106#define SAFE_ZONE 1
107
108#define GET_ZONE(trip) (trip + 2)
109#define GET_TRIP(zone) (zone - 2)
110
111#define EXYNOS_ZONE_COUNT 3
112
113struct exynos_tmu_data {
114 struct exynos_tmu_platform_data *pdata;
115 struct resource *mem;
116 void __iomem *base;
117 int irq;
118 enum soc_type soc;
119 struct work_struct irq_work;
120 struct mutex lock;
121 struct clk *clk;
122 u8 temp_error1, temp_error2;
123};
124
125struct thermal_trip_point_conf {
126 int trip_val[MAX_TRIP_COUNT];
127 int trip_count;
128};
129
130struct thermal_cooling_conf {
131 struct freq_clip_table freq_data[MAX_TRIP_COUNT];
132 int freq_clip_count;
133};
134
135struct thermal_sensor_conf {
136 char name[SENSOR_NAME_LEN];
137 int (*read_temperature)(void *data);
138 struct thermal_trip_point_conf trip_data;
139 struct thermal_cooling_conf cooling_data;
140 void *private_data;
141};
142
143struct exynos_thermal_zone {
144 enum thermal_device_mode mode;
145 struct thermal_zone_device *therm_dev;
146 struct thermal_cooling_device *cool_dev[MAX_COOLING_DEVICE];
147 unsigned int cool_dev_size;
148 struct platform_device *exynos4_dev;
149 struct thermal_sensor_conf *sensor_conf;
150 bool bind;
151};
152
153static struct exynos_thermal_zone *th_zone;
154static void exynos_unregister_thermal(void);
155static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf);
156
157/* Get mode callback functions for thermal zone */
158static int exynos_get_mode(struct thermal_zone_device *thermal,
159 enum thermal_device_mode *mode)
160{
161 if (th_zone)
162 *mode = th_zone->mode;
163 return 0;
164}
165
166/* Set mode callback functions for thermal zone */
167static int exynos_set_mode(struct thermal_zone_device *thermal,
168 enum thermal_device_mode mode)
169{
170 if (!th_zone->therm_dev) {
171 pr_notice("thermal zone not registered\n");
172 return 0;
173 }
174
175 mutex_lock(&th_zone->therm_dev->lock);
176
177 if (mode == THERMAL_DEVICE_ENABLED)
178 th_zone->therm_dev->polling_delay = IDLE_INTERVAL;
179 else
180 th_zone->therm_dev->polling_delay = 0;
181
182 mutex_unlock(&th_zone->therm_dev->lock);
183
184 th_zone->mode = mode;
185 thermal_zone_device_update(th_zone->therm_dev);
186 pr_info("thermal polling set for duration=%d msec\n",
187 th_zone->therm_dev->polling_delay);
188 return 0;
189}
190
191
192/* Get trip type callback functions for thermal zone */
193static int exynos_get_trip_type(struct thermal_zone_device *thermal, int trip,
194 enum thermal_trip_type *type)
195{
196 switch (GET_ZONE(trip)) {
197 case MONITOR_ZONE:
198 case WARN_ZONE:
199 *type = THERMAL_TRIP_ACTIVE;
200 break;
201 case PANIC_ZONE:
202 *type = THERMAL_TRIP_CRITICAL;
203 break;
204 default:
205 return -EINVAL;
206 }
207 return 0;
208}
209
210/* Get trip temperature callback functions for thermal zone */
211static int exynos_get_trip_temp(struct thermal_zone_device *thermal, int trip,
212 unsigned long *temp)
213{
214 if (trip < GET_TRIP(MONITOR_ZONE) || trip > GET_TRIP(PANIC_ZONE))
215 return -EINVAL;
216
217 *temp = th_zone->sensor_conf->trip_data.trip_val[trip];
218 /* convert the temperature into millicelsius */
219 *temp = *temp * MCELSIUS;
220
221 return 0;
222}
223
224/* Get critical temperature callback functions for thermal zone */
225static int exynos_get_crit_temp(struct thermal_zone_device *thermal,
226 unsigned long *temp)
227{
228 int ret;
229 /* Panic zone */
230 ret = exynos_get_trip_temp(thermal, GET_TRIP(PANIC_ZONE), temp);
231 return ret;
232}
233
234static int exynos_get_frequency_level(unsigned int cpu, unsigned int freq)
235{
236 int i = 0, ret = -EINVAL;
237 struct cpufreq_frequency_table *table = NULL;
238#ifdef CONFIG_CPU_FREQ
239 table = cpufreq_frequency_get_table(cpu);
240#endif
241 if (!table)
242 return ret;
243
244 while (table[i].frequency != CPUFREQ_TABLE_END) {
245 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
246 continue;
247 if (table[i].frequency == freq)
248 return i;
249 i++;
250 }
251 return ret;
252}
253
254/* Bind callback functions for thermal zone */
255static int exynos_bind(struct thermal_zone_device *thermal,
256 struct thermal_cooling_device *cdev)
257{
258 int ret = 0, i, tab_size, level;
259 struct freq_clip_table *tab_ptr, *clip_data;
260 struct thermal_sensor_conf *data = th_zone->sensor_conf;
261
262 tab_ptr = (struct freq_clip_table *)data->cooling_data.freq_data;
263 tab_size = data->cooling_data.freq_clip_count;
264
265 if (tab_ptr == NULL || tab_size == 0)
266 return -EINVAL;
267
268 /* find the cooling device registered*/
269 for (i = 0; i < th_zone->cool_dev_size; i++)
270 if (cdev == th_zone->cool_dev[i])
271 break;
272
273 /* No matching cooling device */
274 if (i == th_zone->cool_dev_size)
275 return 0;
276
277 /* Bind the thermal zone to the cpufreq cooling device */
278 for (i = 0; i < tab_size; i++) {
279 clip_data = (struct freq_clip_table *)&(tab_ptr[i]);
280 level = exynos_get_frequency_level(0, clip_data->freq_clip_max);
281 if (level < 0)
282 return 0;
283 switch (GET_ZONE(i)) {
284 case MONITOR_ZONE:
285 case WARN_ZONE:
286 if (thermal_zone_bind_cooling_device(thermal, i, cdev,
287 level, level)) {
288 pr_err("error binding cdev inst %d\n", i);
289 ret = -EINVAL;
290 }
291 th_zone->bind = true;
292 break;
293 default:
294 ret = -EINVAL;
295 }
296 }
297
298 return ret;
299}
300
301/* Unbind callback functions for thermal zone */
302static int exynos_unbind(struct thermal_zone_device *thermal,
303 struct thermal_cooling_device *cdev)
304{
305 int ret = 0, i, tab_size;
306 struct thermal_sensor_conf *data = th_zone->sensor_conf;
307
308 if (th_zone->bind == false)
309 return 0;
310
311 tab_size = data->cooling_data.freq_clip_count;
312
313 if (tab_size == 0)
314 return -EINVAL;
315
316 /* find the cooling device registered*/
317 for (i = 0; i < th_zone->cool_dev_size; i++)
318 if (cdev == th_zone->cool_dev[i])
319 break;
320
321 /* No matching cooling device */
322 if (i == th_zone->cool_dev_size)
323 return 0;
324
325 /* Bind the thermal zone to the cpufreq cooling device */
326 for (i = 0; i < tab_size; i++) {
327 switch (GET_ZONE(i)) {
328 case MONITOR_ZONE:
329 case WARN_ZONE:
330 if (thermal_zone_unbind_cooling_device(thermal, i,
331 cdev)) {
332 pr_err("error unbinding cdev inst=%d\n", i);
333 ret = -EINVAL;
334 }
335 th_zone->bind = false;
336 break;
337 default:
338 ret = -EINVAL;
339 }
340 }
341 return ret;
342}
343
344/* Get temperature callback functions for thermal zone */
345static int exynos_get_temp(struct thermal_zone_device *thermal,
346 unsigned long *temp)
347{
348 void *data;
349
350 if (!th_zone->sensor_conf) {
351 pr_info("Temperature sensor not initialised\n");
352 return -EINVAL;
353 }
354 data = th_zone->sensor_conf->private_data;
355 *temp = th_zone->sensor_conf->read_temperature(data);
356 /* convert the temperature into millicelsius */
357 *temp = *temp * MCELSIUS;
358 return 0;
359}
360
361/* Get the temperature trend */
362static int exynos_get_trend(struct thermal_zone_device *thermal,
363 int trip, enum thermal_trend *trend)
364{
365 if (thermal->temperature >= trip)
366 *trend = THERMAL_TREND_RAISING;
367 else
368 *trend = THERMAL_TREND_DROPPING;
369
370 return 0;
371}
372/* Operation callback functions for thermal zone */
373static struct thermal_zone_device_ops const exynos_dev_ops = {
374 .bind = exynos_bind,
375 .unbind = exynos_unbind,
376 .get_temp = exynos_get_temp,
377 .get_trend = exynos_get_trend,
378 .get_mode = exynos_get_mode,
379 .set_mode = exynos_set_mode,
380 .get_trip_type = exynos_get_trip_type,
381 .get_trip_temp = exynos_get_trip_temp,
382 .get_crit_temp = exynos_get_crit_temp,
383};
384
385/*
386 * This function may be called from interrupt based temperature sensor
387 * when threshold is changed.
388 */
389static void exynos_report_trigger(void)
390{
391 unsigned int i;
392 char data[10];
393 char *envp[] = { data, NULL };
394
395 if (!th_zone || !th_zone->therm_dev)
396 return;
397 if (th_zone->bind == false) {
398 for (i = 0; i < th_zone->cool_dev_size; i++) {
399 if (!th_zone->cool_dev[i])
400 continue;
401 exynos_bind(th_zone->therm_dev,
402 th_zone->cool_dev[i]);
403 }
404 }
405
406 thermal_zone_device_update(th_zone->therm_dev);
407
408 mutex_lock(&th_zone->therm_dev->lock);
409 /* Find the level for which trip happened */
410 for (i = 0; i < th_zone->sensor_conf->trip_data.trip_count; i++) {
411 if (th_zone->therm_dev->last_temperature <
412 th_zone->sensor_conf->trip_data.trip_val[i] * MCELSIUS)
413 break;
414 }
415
416 if (th_zone->mode == THERMAL_DEVICE_ENABLED) {
417 if (i > 0)
418 th_zone->therm_dev->polling_delay = ACTIVE_INTERVAL;
419 else
420 th_zone->therm_dev->polling_delay = IDLE_INTERVAL;
421 }
422
423 snprintf(data, sizeof(data), "%u", i);
424 kobject_uevent_env(&th_zone->therm_dev->device.kobj, KOBJ_CHANGE, envp);
425 mutex_unlock(&th_zone->therm_dev->lock);
426}
427
428/* Register with the in-kernel thermal management */
429static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
430{
431 int ret;
432 struct cpumask mask_val;
433
434 if (!sensor_conf || !sensor_conf->read_temperature) {
435 pr_err("Temperature sensor not initialised\n");
436 return -EINVAL;
437 }
438
439 th_zone = kzalloc(sizeof(struct exynos_thermal_zone), GFP_KERNEL);
440 if (!th_zone)
441 return -ENOMEM;
442
443 th_zone->sensor_conf = sensor_conf;
444 cpumask_set_cpu(0, &mask_val);
445 th_zone->cool_dev[0] = cpufreq_cooling_register(&mask_val);
446 if (IS_ERR(th_zone->cool_dev[0])) {
447 pr_err("Failed to register cpufreq cooling device\n");
448 ret = -EINVAL;
449 goto err_unregister;
450 }
451 th_zone->cool_dev_size++;
452
453 th_zone->therm_dev = thermal_zone_device_register(sensor_conf->name,
454 EXYNOS_ZONE_COUNT, 0, NULL, &exynos_dev_ops, 0,
455 IDLE_INTERVAL);
456
457 if (IS_ERR(th_zone->therm_dev)) {
458 pr_err("Failed to register thermal zone device\n");
459 ret = -EINVAL;
460 goto err_unregister;
461 }
462 th_zone->mode = THERMAL_DEVICE_ENABLED;
463
464 pr_info("Exynos: Kernel Thermal management registered\n");
465
466 return 0;
467
468err_unregister:
469 exynos_unregister_thermal();
470 return ret;
471}
472
473/* Un-Register with the in-kernel thermal management */
474static void exynos_unregister_thermal(void)
475{
476 int i;
477
478 if (!th_zone)
479 return;
480
481 if (th_zone->therm_dev)
482 thermal_zone_device_unregister(th_zone->therm_dev);
483
484 for (i = 0; i < th_zone->cool_dev_size; i++) {
485 if (th_zone->cool_dev[i])
486 cpufreq_cooling_unregister(th_zone->cool_dev[i]);
487 }
488
489 kfree(th_zone);
490 pr_info("Exynos: Kernel Thermal management unregistered\n");
491}
492
493/*
494 * TMU treats temperature as a mapped temperature code.
495 * The temperature is converted differently depending on the calibration type.
496 */
497static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
498{
499 struct exynos_tmu_platform_data *pdata = data->pdata;
500 int temp_code;
501
502 if (data->soc == SOC_ARCH_EXYNOS4210)
503 /* temp should range between 25 and 125 */
504 if (temp < 25 || temp > 125) {
505 temp_code = -EINVAL;
506 goto out;
507 }
508
509 switch (pdata->cal_type) {
510 case TYPE_TWO_POINT_TRIMMING:
511 temp_code = (temp - 25) *
512 (data->temp_error2 - data->temp_error1) /
513 (85 - 25) + data->temp_error1;
514 break;
515 case TYPE_ONE_POINT_TRIMMING:
516 temp_code = temp + data->temp_error1 - 25;
517 break;
518 default:
519 temp_code = temp + EXYNOS_TMU_DEF_CODE_TO_TEMP_OFFSET;
520 break;
521 }
522out:
523 return temp_code;
524}
525
526/*
527 * Calculate a temperature value from a temperature code.
528 * The unit of the temperature is degree Celsius.
529 */
530static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
531{
532 struct exynos_tmu_platform_data *pdata = data->pdata;
533 int temp;
534
535 if (data->soc == SOC_ARCH_EXYNOS4210)
536 /* temp_code should range between 75 and 175 */
537 if (temp_code < 75 || temp_code > 175) {
538 temp = -ENODATA;
539 goto out;
540 }
541
542 switch (pdata->cal_type) {
543 case TYPE_TWO_POINT_TRIMMING:
544 temp = (temp_code - data->temp_error1) * (85 - 25) /
545 (data->temp_error2 - data->temp_error1) + 25;
546 break;
547 case TYPE_ONE_POINT_TRIMMING:
548 temp = temp_code - data->temp_error1 + 25;
549 break;
550 default:
551 temp = temp_code - EXYNOS_TMU_DEF_CODE_TO_TEMP_OFFSET;
552 break;
553 }
554out:
555 return temp;
556}
557
558static int exynos_tmu_initialize(struct platform_device *pdev)
559{
560 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
561 struct exynos_tmu_platform_data *pdata = data->pdata;
562 unsigned int status, trim_info, rising_threshold;
563 int ret = 0, threshold_code;
564
565 mutex_lock(&data->lock);
566 clk_enable(data->clk);
567
568 status = readb(data->base + EXYNOS_TMU_REG_STATUS);
569 if (!status) {
570 ret = -EBUSY;
571 goto out;
572 }
573
574 if (data->soc == SOC_ARCH_EXYNOS) {
575 __raw_writel(EXYNOS_TRIMINFO_RELOAD,
576 data->base + EXYNOS_TMU_TRIMINFO_CON);
577 }
578 /* Save trimming info in order to perform calibration */
579 trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
580 data->temp_error1 = trim_info & EXYNOS_TMU_TRIM_TEMP_MASK;
581 data->temp_error2 = ((trim_info >> 8) & EXYNOS_TMU_TRIM_TEMP_MASK);
582
583 if ((EFUSE_MIN_VALUE > data->temp_error1) ||
584 (data->temp_error1 > EFUSE_MAX_VALUE) ||
585 (data->temp_error2 != 0))
586 data->temp_error1 = pdata->efuse_value;
587
588 if (data->soc == SOC_ARCH_EXYNOS4210) {
589 /* Write temperature code for threshold */
590 threshold_code = temp_to_code(data, pdata->threshold);
591 if (threshold_code < 0) {
592 ret = threshold_code;
593 goto out;
594 }
595 writeb(threshold_code,
596 data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
597
598 writeb(pdata->trigger_levels[0],
599 data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0);
600 writeb(pdata->trigger_levels[1],
601 data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL1);
602 writeb(pdata->trigger_levels[2],
603 data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL2);
604 writeb(pdata->trigger_levels[3],
605 data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL3);
606
607 writel(EXYNOS4210_TMU_INTCLEAR_VAL,
608 data->base + EXYNOS_TMU_REG_INTCLEAR);
609 } else if (data->soc == SOC_ARCH_EXYNOS) {
610 /* Write temperature code for threshold */
611 threshold_code = temp_to_code(data, pdata->trigger_levels[0]);
612 if (threshold_code < 0) {
613 ret = threshold_code;
614 goto out;
615 }
616 rising_threshold = threshold_code;
617 threshold_code = temp_to_code(data, pdata->trigger_levels[1]);
618 if (threshold_code < 0) {
619 ret = threshold_code;
620 goto out;
621 }
622 rising_threshold |= (threshold_code << 8);
623 threshold_code = temp_to_code(data, pdata->trigger_levels[2]);
624 if (threshold_code < 0) {
625 ret = threshold_code;
626 goto out;
627 }
628 rising_threshold |= (threshold_code << 16);
629
630 writel(rising_threshold,
631 data->base + EXYNOS_THD_TEMP_RISE);
632 writel(0, data->base + EXYNOS_THD_TEMP_FALL);
633
634 writel(EXYNOS_TMU_CLEAR_RISE_INT|EXYNOS_TMU_CLEAR_FALL_INT,
635 data->base + EXYNOS_TMU_REG_INTCLEAR);
636 }
637out:
638 clk_disable(data->clk);
639 mutex_unlock(&data->lock);
640
641 return ret;
642}
643
644static void exynos_tmu_control(struct platform_device *pdev, bool on)
645{
646 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
647 struct exynos_tmu_platform_data *pdata = data->pdata;
648 unsigned int con, interrupt_en;
649
650 mutex_lock(&data->lock);
651 clk_enable(data->clk);
652
653 con = pdata->reference_voltage << EXYNOS_TMU_REF_VOLTAGE_SHIFT |
654 pdata->gain << EXYNOS_TMU_GAIN_SHIFT;
655
656 if (data->soc == SOC_ARCH_EXYNOS) {
657 con |= pdata->noise_cancel_mode << EXYNOS_TMU_TRIP_MODE_SHIFT;
658 con |= (EXYNOS_MUX_ADDR_VALUE << EXYNOS_MUX_ADDR_SHIFT);
659 }
660
661 if (on) {
662 con |= EXYNOS_TMU_CORE_ON;
663 interrupt_en = pdata->trigger_level3_en << 12 |
664 pdata->trigger_level2_en << 8 |
665 pdata->trigger_level1_en << 4 |
666 pdata->trigger_level0_en;
667 } else {
668 con |= EXYNOS_TMU_CORE_OFF;
669 interrupt_en = 0; /* Disable all interrupts */
670 }
671 writel(interrupt_en, data->base + EXYNOS_TMU_REG_INTEN);
672 writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
673
674 clk_disable(data->clk);
675 mutex_unlock(&data->lock);
676}
677
678static int exynos_tmu_read(struct exynos_tmu_data *data)
679{
680 u8 temp_code;
681 int temp;
682
683 mutex_lock(&data->lock);
684 clk_enable(data->clk);
685
686 temp_code = readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP);
687 temp = code_to_temp(data, temp_code);
688
689 clk_disable(data->clk);
690 mutex_unlock(&data->lock);
691
692 return temp;
693}
694
695static void exynos_tmu_work(struct work_struct *work)
696{
697 struct exynos_tmu_data *data = container_of(work,
698 struct exynos_tmu_data, irq_work);
699
700 mutex_lock(&data->lock);
701 clk_enable(data->clk);
702
703
704 if (data->soc == SOC_ARCH_EXYNOS)
705 writel(EXYNOS_TMU_CLEAR_RISE_INT,
706 data->base + EXYNOS_TMU_REG_INTCLEAR);
707 else
708 writel(EXYNOS4210_TMU_INTCLEAR_VAL,
709 data->base + EXYNOS_TMU_REG_INTCLEAR);
710
711 clk_disable(data->clk);
712 mutex_unlock(&data->lock);
713 exynos_report_trigger();
714 enable_irq(data->irq);
715}
716
717static irqreturn_t exynos_tmu_irq(int irq, void *id)
718{
719 struct exynos_tmu_data *data = id;
720
721 disable_irq_nosync(irq);
722 schedule_work(&data->irq_work);
723
724 return IRQ_HANDLED;
725}
726static struct thermal_sensor_conf exynos_sensor_conf = {
727 .name = "exynos-therm",
728 .read_temperature = (int (*)(void *))exynos_tmu_read,
729};
730
731#if defined(CONFIG_CPU_EXYNOS4210)
732static struct exynos_tmu_platform_data const exynos4210_default_tmu_data = {
733 .threshold = 80,
734 .trigger_levels[0] = 5,
735 .trigger_levels[1] = 20,
736 .trigger_levels[2] = 30,
737 .trigger_level0_en = 1,
738 .trigger_level1_en = 1,
739 .trigger_level2_en = 1,
740 .trigger_level3_en = 0,
741 .gain = 15,
742 .reference_voltage = 7,
743 .cal_type = TYPE_ONE_POINT_TRIMMING,
744 .freq_tab[0] = {
745 .freq_clip_max = 800 * 1000,
746 .temp_level = 85,
747 },
748 .freq_tab[1] = {
749 .freq_clip_max = 200 * 1000,
750 .temp_level = 100,
751 },
752 .freq_tab_count = 2,
753 .type = SOC_ARCH_EXYNOS4210,
754};
755#define EXYNOS4210_TMU_DRV_DATA (&exynos4210_default_tmu_data)
756#else
757#define EXYNOS4210_TMU_DRV_DATA (NULL)
758#endif
759
760#if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412)
761static struct exynos_tmu_platform_data const exynos_default_tmu_data = {
762 .trigger_levels[0] = 85,
763 .trigger_levels[1] = 103,
764 .trigger_levels[2] = 110,
765 .trigger_level0_en = 1,
766 .trigger_level1_en = 1,
767 .trigger_level2_en = 1,
768 .trigger_level3_en = 0,
769 .gain = 8,
770 .reference_voltage = 16,
771 .noise_cancel_mode = 4,
772 .cal_type = TYPE_ONE_POINT_TRIMMING,
773 .efuse_value = 55,
774 .freq_tab[0] = {
775 .freq_clip_max = 800 * 1000,
776 .temp_level = 85,
777 },
778 .freq_tab[1] = {
779 .freq_clip_max = 200 * 1000,
780 .temp_level = 103,
781 },
782 .freq_tab_count = 2,
783 .type = SOC_ARCH_EXYNOS,
784};
785#define EXYNOS_TMU_DRV_DATA (&exynos_default_tmu_data)
786#else
787#define EXYNOS_TMU_DRV_DATA (NULL)
788#endif
789
790#ifdef CONFIG_OF
791static const struct of_device_id exynos_tmu_match[] = {
792 {
793 .compatible = "samsung,exynos4210-tmu",
794 .data = (void *)EXYNOS4210_TMU_DRV_DATA,
795 },
796 {
797 .compatible = "samsung,exynos5250-tmu",
798 .data = (void *)EXYNOS_TMU_DRV_DATA,
799 },
800 {},
801};
802MODULE_DEVICE_TABLE(of, exynos_tmu_match);
803#else
804#define exynos_tmu_match NULL
805#endif
806
807static struct platform_device_id exynos_tmu_driver_ids[] = {
808 {
809 .name = "exynos4210-tmu",
810 .driver_data = (kernel_ulong_t)EXYNOS4210_TMU_DRV_DATA,
811 },
812 {
813 .name = "exynos5250-tmu",
814 .driver_data = (kernel_ulong_t)EXYNOS_TMU_DRV_DATA,
815 },
816 { },
817};
818MODULE_DEVICE_TABLE(platform, exynos_tmu_driver_ids);
819
820static inline struct exynos_tmu_platform_data *exynos_get_driver_data(
821 struct platform_device *pdev)
822{
823#ifdef CONFIG_OF
824 if (pdev->dev.of_node) {
825 const struct of_device_id *match;
826 match = of_match_node(exynos_tmu_match, pdev->dev.of_node);
827 if (!match)
828 return NULL;
829 return (struct exynos_tmu_platform_data *) match->data;
830 }
831#endif
832 return (struct exynos_tmu_platform_data *)
833 platform_get_device_id(pdev)->driver_data;
834}
835static int __devinit exynos_tmu_probe(struct platform_device *pdev)
836{
837 struct exynos_tmu_data *data;
838 struct exynos_tmu_platform_data *pdata = pdev->dev.platform_data;
839 int ret, i;
840
841 if (!pdata)
842 pdata = exynos_get_driver_data(pdev);
843
844 if (!pdata) {
845 dev_err(&pdev->dev, "No platform init data supplied.\n");
846 return -ENODEV;
847 }
848 data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
849 GFP_KERNEL);
850 if (!data) {
851 dev_err(&pdev->dev, "Failed to allocate driver structure\n");
852 return -ENOMEM;
853 }
854
855 data->irq = platform_get_irq(pdev, 0);
856 if (data->irq < 0) {
857 dev_err(&pdev->dev, "Failed to get platform irq\n");
858 return data->irq;
859 }
860
861 INIT_WORK(&data->irq_work, exynos_tmu_work);
862
863 data->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
864 if (!data->mem) {
865 dev_err(&pdev->dev, "Failed to get platform resource\n");
866 return -ENOENT;
867 }
868
869 data->base = devm_request_and_ioremap(&pdev->dev, data->mem);
870 if (!data->base) {
871 dev_err(&pdev->dev, "Failed to ioremap memory\n");
872 return -ENODEV;
873 }
874
875 ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
876 IRQF_TRIGGER_RISING, "exynos-tmu", data);
877 if (ret) {
878 dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
879 return ret;
880 }
881
882 data->clk = clk_get(NULL, "tmu_apbif");
883 if (IS_ERR(data->clk)) {
884 dev_err(&pdev->dev, "Failed to get clock\n");
885 return PTR_ERR(data->clk);
886 }
887
888 if (pdata->type == SOC_ARCH_EXYNOS ||
889 pdata->type == SOC_ARCH_EXYNOS4210)
890 data->soc = pdata->type;
891 else {
892 ret = -EINVAL;
893 dev_err(&pdev->dev, "Platform not supported\n");
894 goto err_clk;
895 }
896
897 data->pdata = pdata;
898 platform_set_drvdata(pdev, data);
899 mutex_init(&data->lock);
900
901 ret = exynos_tmu_initialize(pdev);
902 if (ret) {
903 dev_err(&pdev->dev, "Failed to initialize TMU\n");
904 goto err_clk;
905 }
906
907 exynos_tmu_control(pdev, true);
908
909 /* Register the sensor with thermal management interface */
910 (&exynos_sensor_conf)->private_data = data;
911 exynos_sensor_conf.trip_data.trip_count = pdata->trigger_level0_en +
912 pdata->trigger_level1_en + pdata->trigger_level2_en +
913 pdata->trigger_level3_en;
914
915 for (i = 0; i < exynos_sensor_conf.trip_data.trip_count; i++)
916 exynos_sensor_conf.trip_data.trip_val[i] =
917 pdata->threshold + pdata->trigger_levels[i];
918
919 exynos_sensor_conf.cooling_data.freq_clip_count =
920 pdata->freq_tab_count;
921 for (i = 0; i < pdata->freq_tab_count; i++) {
922 exynos_sensor_conf.cooling_data.freq_data[i].freq_clip_max =
923 pdata->freq_tab[i].freq_clip_max;
924 exynos_sensor_conf.cooling_data.freq_data[i].temp_level =
925 pdata->freq_tab[i].temp_level;
926 }
927
928 ret = exynos_register_thermal(&exynos_sensor_conf);
929 if (ret) {
930 dev_err(&pdev->dev, "Failed to register thermal interface\n");
931 goto err_clk;
932 }
933 return 0;
934err_clk:
935 platform_set_drvdata(pdev, NULL);
936 clk_put(data->clk);
937 return ret;
938}
939
940static int __devexit exynos_tmu_remove(struct platform_device *pdev)
941{
942 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
943
944 exynos_tmu_control(pdev, false);
945
946 exynos_unregister_thermal();
947
948 clk_put(data->clk);
949
950 platform_set_drvdata(pdev, NULL);
951
952 return 0;
953}
954
955#ifdef CONFIG_PM_SLEEP
956static int exynos_tmu_suspend(struct device *dev)
957{
958 exynos_tmu_control(to_platform_device(dev), false);
959
960 return 0;
961}
962
963static int exynos_tmu_resume(struct device *dev)
964{
965 struct platform_device *pdev = to_platform_device(dev);
966
967 exynos_tmu_initialize(pdev);
968 exynos_tmu_control(pdev, true);
969
970 return 0;
971}
972
973static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
974 exynos_tmu_suspend, exynos_tmu_resume);
975#define EXYNOS_TMU_PM (&exynos_tmu_pm)
976#else
977#define EXYNOS_TMU_PM NULL
978#endif
979
980static struct platform_driver exynos_tmu_driver = {
981 .driver = {
982 .name = "exynos-tmu",
983 .owner = THIS_MODULE,
984 .pm = EXYNOS_TMU_PM,
985 .of_match_table = exynos_tmu_match,
986 },
987 .probe = exynos_tmu_probe,
988 .remove = __devexit_p(exynos_tmu_remove),
989 .id_table = exynos_tmu_driver_ids,
990};
991
992module_platform_driver(exynos_tmu_driver);
993
994MODULE_DESCRIPTION("EXYNOS TMU Driver");
995MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
996MODULE_LICENSE("GPL");
997MODULE_ALIAS("platform:exynos-tmu");
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
new file mode 100644
index 000000000000..f7a1b574a304
--- /dev/null
+++ b/drivers/thermal/rcar_thermal.c
@@ -0,0 +1,260 @@
1/*
2 * R-Car THS/TSC thermal sensor driver
3 *
4 * Copyright (C) 2012 Renesas Solutions Corp.
5 * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
19 */
20#include <linux/delay.h>
21#include <linux/err.h>
22#include <linux/io.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26#include <linux/spinlock.h>
27#include <linux/thermal.h>
28
29#define THSCR 0x2c
30#define THSSR 0x30
31
32/* THSCR */
33#define CPTAP 0xf
34
35/* THSSR */
36#define CTEMP 0x3f
37
38
39struct rcar_thermal_priv {
40 void __iomem *base;
41 struct device *dev;
42 spinlock_t lock;
43 u32 comp;
44};
45
46/*
47 * basic functions
48 */
49static u32 rcar_thermal_read(struct rcar_thermal_priv *priv, u32 reg)
50{
51 unsigned long flags;
52 u32 ret;
53
54 spin_lock_irqsave(&priv->lock, flags);
55
56 ret = ioread32(priv->base + reg);
57
58 spin_unlock_irqrestore(&priv->lock, flags);
59
60 return ret;
61}
62
63#if 0 /* no user at this point */
64static void rcar_thermal_write(struct rcar_thermal_priv *priv,
65 u32 reg, u32 data)
66{
67 unsigned long flags;
68
69 spin_lock_irqsave(&priv->lock, flags);
70
71 iowrite32(data, priv->base + reg);
72
73 spin_unlock_irqrestore(&priv->lock, flags);
74}
75#endif
76
77static void rcar_thermal_bset(struct rcar_thermal_priv *priv, u32 reg,
78 u32 mask, u32 data)
79{
80 unsigned long flags;
81 u32 val;
82
83 spin_lock_irqsave(&priv->lock, flags);
84
85 val = ioread32(priv->base + reg);
86 val &= ~mask;
87 val |= (data & mask);
88 iowrite32(val, priv->base + reg);
89
90 spin_unlock_irqrestore(&priv->lock, flags);
91}
92
93/*
94 * zone device functions
95 */
96static int rcar_thermal_get_temp(struct thermal_zone_device *zone,
97 unsigned long *temp)
98{
99 struct rcar_thermal_priv *priv = zone->devdata;
100 int val, min, max, tmp;
101
102 tmp = -200; /* default */
103 while (1) {
104 if (priv->comp < 1 || priv->comp > 12) {
105 dev_err(priv->dev,
106 "THSSR invalid data (%d)\n", priv->comp);
107 priv->comp = 4; /* for next thermal */
108 return -EINVAL;
109 }
110
111 /*
112 * THS comparator offset and the reference temperature
113 *
114 * Comparator | reference | Temperature field
115 * offset | temperature | measurement
116 * | (degrees C) | (degrees C)
117 * -------------+---------------+-------------------
118 * 1 | -45 | -45 to -30
119 * 2 | -30 | -30 to -15
120 * 3 | -15 | -15 to 0
121 * 4 | 0 | 0 to +15
122 * 5 | +15 | +15 to +30
123 * 6 | +30 | +30 to +45
124 * 7 | +45 | +45 to +60
125 * 8 | +60 | +60 to +75
126 * 9 | +75 | +75 to +90
127 * 10 | +90 | +90 to +105
128 * 11 | +105 | +105 to +120
129 * 12 | +120 | +120 to +135
130 */
131
132 /* calculate thermal limitation */
133 min = (priv->comp * 15) - 60;
134 max = min + 15;
135
136 /*
137 * we need to wait 300us after changing comparator offset
138 * to get stable temperature.
139 * see "Usage Notes" on datasheet
140 */
141 rcar_thermal_bset(priv, THSCR, CPTAP, priv->comp);
142 udelay(300);
143
144 /* calculate current temperature */
145 val = rcar_thermal_read(priv, THSSR) & CTEMP;
146 val = (val * 5) - 65;
147
148 dev_dbg(priv->dev, "comp/min/max/val = %d/%d/%d/%d\n",
149 priv->comp, min, max, val);
150
151 /*
152 * If val is same as min/max, then,
153 * it should try again on next comparator.
154 * But the val might be correct temperature.
155 * Keep it on "tmp" and compare with next val.
156 */
157 if (tmp == val)
158 break;
159
160 if (val <= min) {
161 tmp = min;
162 priv->comp--; /* try again */
163 } else if (val >= max) {
164 tmp = max;
165 priv->comp++; /* try again */
166 } else {
167 tmp = val;
168 break;
169 }
170 }
171
172 *temp = tmp;
173 return 0;
174}
175
176static struct thermal_zone_device_ops rcar_thermal_zone_ops = {
177 .get_temp = rcar_thermal_get_temp,
178};
179
180/*
181 * platform functions
182 */
183static int rcar_thermal_probe(struct platform_device *pdev)
184{
185 struct thermal_zone_device *zone;
186 struct rcar_thermal_priv *priv;
187 struct resource *res;
188 int ret;
189
190 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
191 if (!res) {
192 dev_err(&pdev->dev, "Could not get platform resource\n");
193 return -ENODEV;
194 }
195
196 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
197 if (!priv) {
198 dev_err(&pdev->dev, "Could not allocate priv\n");
199 return -ENOMEM;
200 }
201
202 priv->comp = 4; /* basic setup */
203 priv->dev = &pdev->dev;
204 spin_lock_init(&priv->lock);
205 priv->base = devm_ioremap_nocache(&pdev->dev,
206 res->start, resource_size(res));
207 if (!priv->base) {
208 dev_err(&pdev->dev, "Unable to ioremap thermal register\n");
209 ret = -ENOMEM;
210 goto error_free_priv;
211 }
212
213 zone = thermal_zone_device_register("rcar_thermal", 0, 0, priv,
214 &rcar_thermal_zone_ops, 0, 0);
215 if (IS_ERR(zone)) {
216 dev_err(&pdev->dev, "thermal zone device is NULL\n");
217 ret = PTR_ERR(zone);
218 goto error_iounmap;
219 }
220
221 platform_set_drvdata(pdev, zone);
222
223 dev_info(&pdev->dev, "proved\n");
224
225 return 0;
226
227error_iounmap:
228 devm_iounmap(&pdev->dev, priv->base);
229error_free_priv:
230 devm_kfree(&pdev->dev, priv);
231
232 return ret;
233}
234
235static int rcar_thermal_remove(struct platform_device *pdev)
236{
237 struct thermal_zone_device *zone = platform_get_drvdata(pdev);
238 struct rcar_thermal_priv *priv = zone->devdata;
239
240 thermal_zone_device_unregister(zone);
241 platform_set_drvdata(pdev, NULL);
242
243 devm_iounmap(&pdev->dev, priv->base);
244 devm_kfree(&pdev->dev, priv);
245
246 return 0;
247}
248
249static struct platform_driver rcar_thermal_driver = {
250 .driver = {
251 .name = "rcar_thermal",
252 },
253 .probe = rcar_thermal_probe,
254 .remove = rcar_thermal_remove,
255};
256module_platform_driver(rcar_thermal_driver);
257
258MODULE_LICENSE("GPL");
259MODULE_DESCRIPTION("R-Car THS/TSC thermal sensor driver");
260MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index 5f8ee39f2000..9bc969261d01 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -147,7 +147,7 @@ static int spear_thermal_probe(struct platform_device *pdev)
147 writel_relaxed(stdev->flags, stdev->thermal_base); 147 writel_relaxed(stdev->flags, stdev->thermal_base);
148 148
149 spear_thermal = thermal_zone_device_register("spear_thermal", 0, 0, 149 spear_thermal = thermal_zone_device_register("spear_thermal", 0, 0,
150 stdev, &ops, 0, 0, 0, 0); 150 stdev, &ops, 0, 0);
151 if (IS_ERR(spear_thermal)) { 151 if (IS_ERR(spear_thermal)) {
152 dev_err(&pdev->dev, "thermal zone device is NULL\n"); 152 dev_err(&pdev->dev, "thermal zone device is NULL\n");
153 ret = PTR_ERR(spear_thermal); 153 ret = PTR_ERR(spear_thermal);
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index efd81bb25e01..9ee42ca4d289 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -41,15 +41,25 @@ MODULE_AUTHOR("Zhang Rui");
41MODULE_DESCRIPTION("Generic thermal management sysfs support"); 41MODULE_DESCRIPTION("Generic thermal management sysfs support");
42MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
43 43
44struct thermal_cooling_device_instance { 44#define THERMAL_NO_TARGET -1UL
45/*
46 * This structure is used to describe the behavior of
47 * a certain cooling device on a certain trip point
48 * in a certain thermal zone
49 */
50struct thermal_instance {
45 int id; 51 int id;
46 char name[THERMAL_NAME_LENGTH]; 52 char name[THERMAL_NAME_LENGTH];
47 struct thermal_zone_device *tz; 53 struct thermal_zone_device *tz;
48 struct thermal_cooling_device *cdev; 54 struct thermal_cooling_device *cdev;
49 int trip; 55 int trip;
56 unsigned long upper; /* Highest cooling state for this trip point */
57 unsigned long lower; /* Lowest cooling state for this trip point */
58 unsigned long target; /* expected cooling state */
50 char attr_name[THERMAL_NAME_LENGTH]; 59 char attr_name[THERMAL_NAME_LENGTH];
51 struct device_attribute attr; 60 struct device_attribute attr;
52 struct list_head node; 61 struct list_head tz_node; /* node in tz->thermal_instances */
62 struct list_head cdev_node; /* node in cdev->thermal_instances */
53}; 63};
54 64
55static DEFINE_IDR(thermal_tz_idr); 65static DEFINE_IDR(thermal_tz_idr);
@@ -308,8 +318,9 @@ passive_store(struct device *dev, struct device_attribute *attr,
308 if (!strncmp("Processor", cdev->type, 318 if (!strncmp("Processor", cdev->type,
309 sizeof("Processor"))) 319 sizeof("Processor")))
310 thermal_zone_bind_cooling_device(tz, 320 thermal_zone_bind_cooling_device(tz,
311 THERMAL_TRIPS_NONE, 321 THERMAL_TRIPS_NONE, cdev,
312 cdev); 322 THERMAL_NO_LIMIT,
323 THERMAL_NO_LIMIT);
313 } 324 }
314 mutex_unlock(&thermal_list_lock); 325 mutex_unlock(&thermal_list_lock);
315 if (!tz->passive_delay) 326 if (!tz->passive_delay)
@@ -327,9 +338,6 @@ passive_store(struct device *dev, struct device_attribute *attr,
327 tz->passive_delay = 0; 338 tz->passive_delay = 0;
328 } 339 }
329 340
330 tz->tc1 = 1;
331 tz->tc2 = 1;
332
333 tz->forced_passive = state; 341 tz->forced_passive = state;
334 342
335 thermal_zone_device_update(tz); 343 thermal_zone_device_update(tz);
@@ -425,10 +433,10 @@ static ssize_t
425thermal_cooling_device_trip_point_show(struct device *dev, 433thermal_cooling_device_trip_point_show(struct device *dev,
426 struct device_attribute *attr, char *buf) 434 struct device_attribute *attr, char *buf)
427{ 435{
428 struct thermal_cooling_device_instance *instance; 436 struct thermal_instance *instance;
429 437
430 instance = 438 instance =
431 container_of(attr, struct thermal_cooling_device_instance, attr); 439 container_of(attr, struct thermal_instance, attr);
432 440
433 if (instance->trip == THERMAL_TRIPS_NONE) 441 if (instance->trip == THERMAL_TRIPS_NONE)
434 return sprintf(buf, "-1\n"); 442 return sprintf(buf, "-1\n");
@@ -590,7 +598,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
590 temp->tz = tz; 598 temp->tz = tz;
591 hwmon->count++; 599 hwmon->count++;
592 600
593 snprintf(temp->temp_input.name, THERMAL_NAME_LENGTH, 601 snprintf(temp->temp_input.name, sizeof(temp->temp_input.name),
594 "temp%d_input", hwmon->count); 602 "temp%d_input", hwmon->count);
595 temp->temp_input.attr.attr.name = temp->temp_input.name; 603 temp->temp_input.attr.attr.name = temp->temp_input.name;
596 temp->temp_input.attr.attr.mode = 0444; 604 temp->temp_input.attr.attr.mode = 0444;
@@ -603,7 +611,8 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
603 if (tz->ops->get_crit_temp) { 611 if (tz->ops->get_crit_temp) {
604 unsigned long temperature; 612 unsigned long temperature;
605 if (!tz->ops->get_crit_temp(tz, &temperature)) { 613 if (!tz->ops->get_crit_temp(tz, &temperature)) {
606 snprintf(temp->temp_crit.name, THERMAL_NAME_LENGTH, 614 snprintf(temp->temp_crit.name,
615 sizeof(temp->temp_crit.name),
607 "temp%d_crit", hwmon->count); 616 "temp%d_crit", hwmon->count);
608 temp->temp_crit.attr.attr.name = temp->temp_crit.name; 617 temp->temp_crit.attr.attr.name = temp->temp_crit.name;
609 temp->temp_crit.attr.attr.mode = 0444; 618 temp->temp_crit.attr.attr.mode = 0444;
@@ -704,74 +713,6 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
704 cancel_delayed_work(&tz->poll_queue); 713 cancel_delayed_work(&tz->poll_queue);
705} 714}
706 715
707static void thermal_zone_device_passive(struct thermal_zone_device *tz,
708 int temp, int trip_temp, int trip)
709{
710 int trend = 0;
711 struct thermal_cooling_device_instance *instance;
712 struct thermal_cooling_device *cdev;
713 long state, max_state;
714
715 /*
716 * Above Trip?
717 * -----------
718 * Calculate the thermal trend (using the passive cooling equation)
719 * and modify the performance limit for all passive cooling devices
720 * accordingly. Note that we assume symmetry.
721 */
722 if (temp >= trip_temp) {
723 tz->passive = true;
724
725 trend = (tz->tc1 * (temp - tz->last_temperature)) +
726 (tz->tc2 * (temp - trip_temp));
727
728 /* Heating up? */
729 if (trend > 0) {
730 list_for_each_entry(instance, &tz->cooling_devices,
731 node) {
732 if (instance->trip != trip)
733 continue;
734 cdev = instance->cdev;
735 cdev->ops->get_cur_state(cdev, &state);
736 cdev->ops->get_max_state(cdev, &max_state);
737 if (state++ < max_state)
738 cdev->ops->set_cur_state(cdev, state);
739 }
740 } else if (trend < 0) { /* Cooling off? */
741 list_for_each_entry(instance, &tz->cooling_devices,
742 node) {
743 if (instance->trip != trip)
744 continue;
745 cdev = instance->cdev;
746 cdev->ops->get_cur_state(cdev, &state);
747 cdev->ops->get_max_state(cdev, &max_state);
748 if (state > 0)
749 cdev->ops->set_cur_state(cdev, --state);
750 }
751 }
752 return;
753 }
754
755 /*
756 * Below Trip?
757 * -----------
758 * Implement passive cooling hysteresis to slowly increase performance
759 * and avoid thrashing around the passive trip point. Note that we
760 * assume symmetry.
761 */
762 list_for_each_entry(instance, &tz->cooling_devices, node) {
763 if (instance->trip != trip)
764 continue;
765 cdev = instance->cdev;
766 cdev->ops->get_cur_state(cdev, &state);
767 cdev->ops->get_max_state(cdev, &max_state);
768 if (state > 0)
769 cdev->ops->set_cur_state(cdev, --state);
770 if (state == 0)
771 tz->passive = false;
772 }
773}
774
775static void thermal_zone_device_check(struct work_struct *work) 716static void thermal_zone_device_check(struct work_struct *work)
776{ 717{
777 struct thermal_zone_device *tz = container_of(work, struct 718 struct thermal_zone_device *tz = container_of(work, struct
@@ -791,12 +732,14 @@ static void thermal_zone_device_check(struct work_struct *work)
791 */ 732 */
792int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, 733int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
793 int trip, 734 int trip,
794 struct thermal_cooling_device *cdev) 735 struct thermal_cooling_device *cdev,
736 unsigned long upper, unsigned long lower)
795{ 737{
796 struct thermal_cooling_device_instance *dev; 738 struct thermal_instance *dev;
797 struct thermal_cooling_device_instance *pos; 739 struct thermal_instance *pos;
798 struct thermal_zone_device *pos1; 740 struct thermal_zone_device *pos1;
799 struct thermal_cooling_device *pos2; 741 struct thermal_cooling_device *pos2;
742 unsigned long max_state;
800 int result; 743 int result;
801 744
802 if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE)) 745 if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE))
@@ -814,13 +757,26 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
814 if (tz != pos1 || cdev != pos2) 757 if (tz != pos1 || cdev != pos2)
815 return -EINVAL; 758 return -EINVAL;
816 759
760 cdev->ops->get_max_state(cdev, &max_state);
761
762 /* lower default 0, upper default max_state */
763 lower = lower == THERMAL_NO_LIMIT ? 0 : lower;
764 upper = upper == THERMAL_NO_LIMIT ? max_state : upper;
765
766 if (lower > upper || upper > max_state)
767 return -EINVAL;
768
817 dev = 769 dev =
818 kzalloc(sizeof(struct thermal_cooling_device_instance), GFP_KERNEL); 770 kzalloc(sizeof(struct thermal_instance), GFP_KERNEL);
819 if (!dev) 771 if (!dev)
820 return -ENOMEM; 772 return -ENOMEM;
821 dev->tz = tz; 773 dev->tz = tz;
822 dev->cdev = cdev; 774 dev->cdev = cdev;
823 dev->trip = trip; 775 dev->trip = trip;
776 dev->upper = upper;
777 dev->lower = lower;
778 dev->target = THERMAL_NO_TARGET;
779
824 result = get_idr(&tz->idr, &tz->lock, &dev->id); 780 result = get_idr(&tz->idr, &tz->lock, &dev->id);
825 if (result) 781 if (result)
826 goto free_mem; 782 goto free_mem;
@@ -841,13 +797,17 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
841 goto remove_symbol_link; 797 goto remove_symbol_link;
842 798
843 mutex_lock(&tz->lock); 799 mutex_lock(&tz->lock);
844 list_for_each_entry(pos, &tz->cooling_devices, node) 800 mutex_lock(&cdev->lock);
801 list_for_each_entry(pos, &tz->thermal_instances, tz_node)
845 if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { 802 if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
846 result = -EEXIST; 803 result = -EEXIST;
847 break; 804 break;
848 } 805 }
849 if (!result) 806 if (!result) {
850 list_add_tail(&dev->node, &tz->cooling_devices); 807 list_add_tail(&dev->tz_node, &tz->thermal_instances);
808 list_add_tail(&dev->cdev_node, &cdev->thermal_instances);
809 }
810 mutex_unlock(&cdev->lock);
851 mutex_unlock(&tz->lock); 811 mutex_unlock(&tz->lock);
852 812
853 if (!result) 813 if (!result)
@@ -877,16 +837,20 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
877 int trip, 837 int trip,
878 struct thermal_cooling_device *cdev) 838 struct thermal_cooling_device *cdev)
879{ 839{
880 struct thermal_cooling_device_instance *pos, *next; 840 struct thermal_instance *pos, *next;
881 841
882 mutex_lock(&tz->lock); 842 mutex_lock(&tz->lock);
883 list_for_each_entry_safe(pos, next, &tz->cooling_devices, node) { 843 mutex_lock(&cdev->lock);
844 list_for_each_entry_safe(pos, next, &tz->thermal_instances, tz_node) {
884 if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { 845 if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
885 list_del(&pos->node); 846 list_del(&pos->tz_node);
847 list_del(&pos->cdev_node);
848 mutex_unlock(&cdev->lock);
886 mutex_unlock(&tz->lock); 849 mutex_unlock(&tz->lock);
887 goto unbind; 850 goto unbind;
888 } 851 }
889 } 852 }
853 mutex_unlock(&cdev->lock);
890 mutex_unlock(&tz->lock); 854 mutex_unlock(&tz->lock);
891 855
892 return -ENODEV; 856 return -ENODEV;
@@ -934,7 +898,7 @@ thermal_cooling_device_register(char *type, void *devdata,
934 struct thermal_zone_device *pos; 898 struct thermal_zone_device *pos;
935 int result; 899 int result;
936 900
937 if (strlen(type) >= THERMAL_NAME_LENGTH) 901 if (type && strlen(type) >= THERMAL_NAME_LENGTH)
938 return ERR_PTR(-EINVAL); 902 return ERR_PTR(-EINVAL);
939 903
940 if (!ops || !ops->get_max_state || !ops->get_cur_state || 904 if (!ops || !ops->get_max_state || !ops->get_cur_state ||
@@ -951,8 +915,11 @@ thermal_cooling_device_register(char *type, void *devdata,
951 return ERR_PTR(result); 915 return ERR_PTR(result);
952 } 916 }
953 917
954 strcpy(cdev->type, type); 918 strcpy(cdev->type, type ? : "");
919 mutex_init(&cdev->lock);
920 INIT_LIST_HEAD(&cdev->thermal_instances);
955 cdev->ops = ops; 921 cdev->ops = ops;
922 cdev->updated = true;
956 cdev->device.class = &thermal_class; 923 cdev->device.class = &thermal_class;
957 cdev->devdata = devdata; 924 cdev->devdata = devdata;
958 dev_set_name(&cdev->device, "cooling_device%d", cdev->id); 925 dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
@@ -1044,6 +1011,136 @@ void thermal_cooling_device_unregister(struct
1044} 1011}
1045EXPORT_SYMBOL(thermal_cooling_device_unregister); 1012EXPORT_SYMBOL(thermal_cooling_device_unregister);
1046 1013
1014static void thermal_cdev_do_update(struct thermal_cooling_device *cdev)
1015{
1016 struct thermal_instance *instance;
1017 unsigned long target = 0;
1018
1019 /* cooling device is updated*/
1020 if (cdev->updated)
1021 return;
1022
1023 mutex_lock(&cdev->lock);
1024 /* Make sure cdev enters the deepest cooling state */
1025 list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) {
1026 if (instance->target == THERMAL_NO_TARGET)
1027 continue;
1028 if (instance->target > target)
1029 target = instance->target;
1030 }
1031 mutex_unlock(&cdev->lock);
1032 cdev->ops->set_cur_state(cdev, target);
1033 cdev->updated = true;
1034}
1035
1036static void thermal_zone_do_update(struct thermal_zone_device *tz)
1037{
1038 struct thermal_instance *instance;
1039
1040 list_for_each_entry(instance, &tz->thermal_instances, tz_node)
1041 thermal_cdev_do_update(instance->cdev);
1042}
1043
1044/*
1045 * Cooling algorithm for both active and passive cooling
1046 *
1047 * 1. if the temperature is higher than a trip point,
1048 * a. if the trend is THERMAL_TREND_RAISING, use higher cooling
1049 * state for this trip point
1050 * b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
1051 * state for this trip point
1052 *
1053 * 2. if the temperature is lower than a trip point, use lower
1054 * cooling state for this trip point
1055 *
1056 * Note that this behaves the same as the previous passive cooling
1057 * algorithm.
1058 */
1059
1060static void thermal_zone_trip_update(struct thermal_zone_device *tz,
1061 int trip, long temp)
1062{
1063 struct thermal_instance *instance;
1064 struct thermal_cooling_device *cdev = NULL;
1065 unsigned long cur_state, max_state;
1066 long trip_temp;
1067 enum thermal_trip_type trip_type;
1068 enum thermal_trend trend;
1069
1070 if (trip == THERMAL_TRIPS_NONE) {
1071 trip_temp = tz->forced_passive;
1072 trip_type = THERMAL_TRIPS_NONE;
1073 } else {
1074 tz->ops->get_trip_temp(tz, trip, &trip_temp);
1075 tz->ops->get_trip_type(tz, trip, &trip_type);
1076 }
1077
1078 if (!tz->ops->get_trend || tz->ops->get_trend(tz, trip, &trend)) {
1079 /*
1080 * compare the current temperature and previous temperature
1081 * to get the thermal trend, if no special requirement
1082 */
1083 if (tz->temperature > tz->last_temperature)
1084 trend = THERMAL_TREND_RAISING;
1085 else if (tz->temperature < tz->last_temperature)
1086 trend = THERMAL_TREND_DROPPING;
1087 else
1088 trend = THERMAL_TREND_STABLE;
1089 }
1090
1091 if (temp >= trip_temp) {
1092 list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
1093 if (instance->trip != trip)
1094 continue;
1095
1096 cdev = instance->cdev;
1097
1098 cdev->ops->get_cur_state(cdev, &cur_state);
1099 cdev->ops->get_max_state(cdev, &max_state);
1100
1101 if (trend == THERMAL_TREND_RAISING) {
1102 cur_state = cur_state < instance->upper ?
1103 (cur_state + 1) : instance->upper;
1104 } else if (trend == THERMAL_TREND_DROPPING) {
1105 cur_state = cur_state > instance->lower ?
1106 (cur_state - 1) : instance->lower;
1107 }
1108
1109 /* activate a passive thermal instance */
1110 if ((trip_type == THERMAL_TRIP_PASSIVE ||
1111 trip_type == THERMAL_TRIPS_NONE) &&
1112 instance->target == THERMAL_NO_TARGET)
1113 tz->passive++;
1114
1115 instance->target = cur_state;
1116 cdev->updated = false; /* cooling device needs update */
1117 }
1118 } else { /* below trip */
1119 list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
1120 if (instance->trip != trip)
1121 continue;
1122
1123 /* Do not use the inactive thermal instance */
1124 if (instance->target == THERMAL_NO_TARGET)
1125 continue;
1126 cdev = instance->cdev;
1127 cdev->ops->get_cur_state(cdev, &cur_state);
1128
1129 cur_state = cur_state > instance->lower ?
1130 (cur_state - 1) : THERMAL_NO_TARGET;
1131
1132 /* deactivate a passive thermal instance */
1133 if ((trip_type == THERMAL_TRIP_PASSIVE ||
1134 trip_type == THERMAL_TRIPS_NONE) &&
1135 cur_state == THERMAL_NO_TARGET)
1136 tz->passive--;
1137 instance->target = cur_state;
1138 cdev->updated = false; /* cooling device needs update */
1139 }
1140 }
1141
1142 return;
1143}
1047/** 1144/**
1048 * thermal_zone_device_update - force an update of a thermal zone's state 1145 * thermal_zone_device_update - force an update of a thermal zone's state
1049 * @ttz: the thermal zone to update 1146 * @ttz: the thermal zone to update
@@ -1054,8 +1151,6 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
1054 int count, ret = 0; 1151 int count, ret = 0;
1055 long temp, trip_temp; 1152 long temp, trip_temp;
1056 enum thermal_trip_type trip_type; 1153 enum thermal_trip_type trip_type;
1057 struct thermal_cooling_device_instance *instance;
1058 struct thermal_cooling_device *cdev;
1059 1154
1060 mutex_lock(&tz->lock); 1155 mutex_lock(&tz->lock);
1061 1156
@@ -1065,6 +1160,9 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
1065 goto leave; 1160 goto leave;
1066 } 1161 }
1067 1162
1163 tz->last_temperature = tz->temperature;
1164 tz->temperature = temp;
1165
1068 for (count = 0; count < tz->trips; count++) { 1166 for (count = 0; count < tz->trips; count++) {
1069 tz->ops->get_trip_type(tz, count, &trip_type); 1167 tz->ops->get_trip_type(tz, count, &trip_type);
1070 tz->ops->get_trip_temp(tz, count, &trip_temp); 1168 tz->ops->get_trip_temp(tz, count, &trip_temp);
@@ -1088,32 +1186,18 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
1088 tz->ops->notify(tz, count, trip_type); 1186 tz->ops->notify(tz, count, trip_type);
1089 break; 1187 break;
1090 case THERMAL_TRIP_ACTIVE: 1188 case THERMAL_TRIP_ACTIVE:
1091 list_for_each_entry(instance, &tz->cooling_devices, 1189 thermal_zone_trip_update(tz, count, temp);
1092 node) {
1093 if (instance->trip != count)
1094 continue;
1095
1096 cdev = instance->cdev;
1097
1098 if (temp >= trip_temp)
1099 cdev->ops->set_cur_state(cdev, 1);
1100 else
1101 cdev->ops->set_cur_state(cdev, 0);
1102 }
1103 break; 1190 break;
1104 case THERMAL_TRIP_PASSIVE: 1191 case THERMAL_TRIP_PASSIVE:
1105 if (temp >= trip_temp || tz->passive) 1192 if (temp >= trip_temp || tz->passive)
1106 thermal_zone_device_passive(tz, temp, 1193 thermal_zone_trip_update(tz, count, temp);
1107 trip_temp, count);
1108 break; 1194 break;
1109 } 1195 }
1110 } 1196 }
1111 1197
1112 if (tz->forced_passive) 1198 if (tz->forced_passive)
1113 thermal_zone_device_passive(tz, temp, tz->forced_passive, 1199 thermal_zone_trip_update(tz, THERMAL_TRIPS_NONE, temp);
1114 THERMAL_TRIPS_NONE); 1200 thermal_zone_do_update(tz);
1115
1116 tz->last_temperature = temp;
1117 1201
1118leave: 1202leave:
1119 if (tz->passive) 1203 if (tz->passive)
@@ -1236,8 +1320,6 @@ static void remove_trip_attrs(struct thermal_zone_device *tz)
1236 * @mask: a bit string indicating the writeablility of trip points 1320 * @mask: a bit string indicating the writeablility of trip points
1237 * @devdata: private device data 1321 * @devdata: private device data
1238 * @ops: standard thermal zone device callbacks 1322 * @ops: standard thermal zone device callbacks
1239 * @tc1: thermal coefficient 1 for passive calculations
1240 * @tc2: thermal coefficient 2 for passive calculations
1241 * @passive_delay: number of milliseconds to wait between polls when 1323 * @passive_delay: number of milliseconds to wait between polls when
1242 * performing passive cooling 1324 * performing passive cooling
1243 * @polling_delay: number of milliseconds to wait between polls when checking 1325 * @polling_delay: number of milliseconds to wait between polls when checking
@@ -1245,13 +1327,12 @@ static void remove_trip_attrs(struct thermal_zone_device *tz)
1245 * driven systems) 1327 * driven systems)
1246 * 1328 *
1247 * thermal_zone_device_unregister() must be called when the device is no 1329 * thermal_zone_device_unregister() must be called when the device is no
1248 * longer needed. The passive cooling formula uses tc1 and tc2 as described in 1330 * longer needed. The passive cooling depends on the .get_trend() return value.
1249 * section 11.1.5.1 of the ACPI specification 3.0.
1250 */ 1331 */
1251struct thermal_zone_device *thermal_zone_device_register(const char *type, 1332struct thermal_zone_device *thermal_zone_device_register(const char *type,
1252 int trips, int mask, void *devdata, 1333 int trips, int mask, void *devdata,
1253 const struct thermal_zone_device_ops *ops, 1334 const struct thermal_zone_device_ops *ops,
1254 int tc1, int tc2, int passive_delay, int polling_delay) 1335 int passive_delay, int polling_delay)
1255{ 1336{
1256 struct thermal_zone_device *tz; 1337 struct thermal_zone_device *tz;
1257 struct thermal_cooling_device *pos; 1338 struct thermal_cooling_device *pos;
@@ -1260,7 +1341,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
1260 int count; 1341 int count;
1261 int passive = 0; 1342 int passive = 0;
1262 1343
1263 if (strlen(type) >= THERMAL_NAME_LENGTH) 1344 if (type && strlen(type) >= THERMAL_NAME_LENGTH)
1264 return ERR_PTR(-EINVAL); 1345 return ERR_PTR(-EINVAL);
1265 1346
1266 if (trips > THERMAL_MAX_TRIPS || trips < 0 || mask >> trips) 1347 if (trips > THERMAL_MAX_TRIPS || trips < 0 || mask >> trips)
@@ -1273,7 +1354,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
1273 if (!tz) 1354 if (!tz)
1274 return ERR_PTR(-ENOMEM); 1355 return ERR_PTR(-ENOMEM);
1275 1356
1276 INIT_LIST_HEAD(&tz->cooling_devices); 1357 INIT_LIST_HEAD(&tz->thermal_instances);
1277 idr_init(&tz->idr); 1358 idr_init(&tz->idr);
1278 mutex_init(&tz->lock); 1359 mutex_init(&tz->lock);
1279 result = get_idr(&thermal_tz_idr, &thermal_idr_lock, &tz->id); 1360 result = get_idr(&thermal_tz_idr, &thermal_idr_lock, &tz->id);
@@ -1282,13 +1363,11 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
1282 return ERR_PTR(result); 1363 return ERR_PTR(result);
1283 } 1364 }
1284 1365
1285 strcpy(tz->type, type); 1366 strcpy(tz->type, type ? : "");
1286 tz->ops = ops; 1367 tz->ops = ops;
1287 tz->device.class = &thermal_class; 1368 tz->device.class = &thermal_class;
1288 tz->devdata = devdata; 1369 tz->devdata = devdata;
1289 tz->trips = trips; 1370 tz->trips = trips;
1290 tz->tc1 = tc1;
1291 tz->tc2 = tc2;
1292 tz->passive_delay = passive_delay; 1371 tz->passive_delay = passive_delay;
1293 tz->polling_delay = polling_delay; 1372 tz->polling_delay = polling_delay;
1294 1373
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 2944ff88fdc0..f4abfe238f98 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -478,7 +478,6 @@ static void xencons_backend_changed(struct xenbus_device *dev,
478 case XenbusStateInitialising: 478 case XenbusStateInitialising:
479 case XenbusStateInitialised: 479 case XenbusStateInitialised:
480 case XenbusStateUnknown: 480 case XenbusStateUnknown:
481 case XenbusStateClosed:
482 break; 481 break;
483 482
484 case XenbusStateInitWait: 483 case XenbusStateInitWait:
@@ -488,6 +487,10 @@ static void xencons_backend_changed(struct xenbus_device *dev,
488 xenbus_switch_state(dev, XenbusStateConnected); 487 xenbus_switch_state(dev, XenbusStateConnected);
489 break; 488 break;
490 489
490 case XenbusStateClosed:
491 if (dev->state == XenbusStateClosed)
492 break;
493 /* Missed the backend's CLOSING state -- fallthrough */
491 case XenbusStateClosing: 494 case XenbusStateClosing:
492 xenbus_frontend_closed(dev); 495 xenbus_frontend_closed(dev);
493 break; 496 break;
diff --git a/drivers/tty/serial/8250/8250_hp300.c b/drivers/tty/serial/8250/8250_hp300.c
index 8f1dd2cc00a8..f3d0edf46644 100644
--- a/drivers/tty/serial/8250/8250_hp300.c
+++ b/drivers/tty/serial/8250/8250_hp300.c
@@ -162,7 +162,7 @@ int __init hp300_setup_serial_console(void)
162static int __devinit hpdca_init_one(struct dio_dev *d, 162static int __devinit hpdca_init_one(struct dio_dev *d,
163 const struct dio_device_id *ent) 163 const struct dio_device_id *ent)
164{ 164{
165 struct uart_port port; 165 struct uart_8250_port uart;
166 int line; 166 int line;
167 167
168#ifdef CONFIG_SERIAL_8250_CONSOLE 168#ifdef CONFIG_SERIAL_8250_CONSOLE
@@ -174,19 +174,19 @@ static int __devinit hpdca_init_one(struct dio_dev *d,
174 memset(&uart, 0, sizeof(uart)); 174 memset(&uart, 0, sizeof(uart));
175 175
176 /* Memory mapped I/O */ 176 /* Memory mapped I/O */
177 port.iotype = UPIO_MEM; 177 uart.port.iotype = UPIO_MEM;
178 port.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF; 178 uart.port.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF;
179 port.irq = d->ipl; 179 uart.port.irq = d->ipl;
180 port.uartclk = HPDCA_BAUD_BASE * 16; 180 uart.port.uartclk = HPDCA_BAUD_BASE * 16;
181 port.mapbase = (d->resource.start + UART_OFFSET); 181 uart.port.mapbase = (d->resource.start + UART_OFFSET);
182 port.membase = (char *)(port.mapbase + DIO_VIRADDRBASE); 182 uart.port.membase = (char *)(uart.port.mapbase + DIO_VIRADDRBASE);
183 port.regshift = 1; 183 uart.port.regshift = 1;
184 port.dev = &d->dev; 184 uart.port.dev = &d->dev;
185 line = serial8250_register_8250_port(&uart); 185 line = serial8250_register_8250_port(&uart);
186 186
187 if (line < 0) { 187 if (line < 0) {
188 printk(KERN_NOTICE "8250_hp300: register_serial() DCA scode %d" 188 printk(KERN_NOTICE "8250_hp300: register_serial() DCA scode %d"
189 " irq %d failed\n", d->scode, port.irq); 189 " irq %d failed\n", d->scode, uart.port.irq);
190 return -ENOMEM; 190 return -ENOMEM;
191 } 191 }
192 192
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 233fbaaf2559..2a53be5f010d 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1150,7 +1150,7 @@ config SERIAL_SC26XX_CONSOLE
1150 Support for Console on SC2681/SC2692 serial ports. 1150 Support for Console on SC2681/SC2692 serial ports.
1151 1151
1152config SERIAL_SCCNXP 1152config SERIAL_SCCNXP
1153 bool "SCCNXP serial port support" 1153 tristate "SCCNXP serial port support"
1154 depends on !SERIAL_SC26XX 1154 depends on !SERIAL_SC26XX
1155 select SERIAL_CORE 1155 select SERIAL_CORE
1156 default n 1156 default n
@@ -1162,7 +1162,7 @@ config SERIAL_SCCNXP
1162 1162
1163config SERIAL_SCCNXP_CONSOLE 1163config SERIAL_SCCNXP_CONSOLE
1164 bool "Console on SCCNXP serial port" 1164 bool "Console on SCCNXP serial port"
1165 depends on SERIAL_SCCNXP 1165 depends on SERIAL_SCCNXP=y
1166 select SERIAL_CORE_CONSOLE 1166 select SERIAL_CORE_CONSOLE
1167 help 1167 help
1168 Support for console on SCCNXP serial ports. 1168 Support for console on SCCNXP serial ports.
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index c0b334327d93..10020547c60b 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -97,7 +97,8 @@ static void kgdboc_restore_input(void)
97 97
98static int kgdboc_register_kbd(char **cptr) 98static int kgdboc_register_kbd(char **cptr)
99{ 99{
100 if (strncmp(*cptr, "kbd", 3) == 0) { 100 if (strncmp(*cptr, "kbd", 3) == 0 ||
101 strncmp(*cptr, "kdb", 3) == 0) {
101 if (kdb_poll_idx < KDB_POLL_FUNC_MAX) { 102 if (kdb_poll_idx < KDB_POLL_FUNC_MAX) {
102 kdb_poll_funcs[kdb_poll_idx] = kdb_get_kbd_char; 103 kdb_poll_funcs[kdb_poll_idx] = kdb_get_kbd_char;
103 kdb_poll_idx++; 104 kdb_poll_idx++;
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 6ede6fd92b4c..6d3d26a607b9 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -671,19 +671,19 @@ serial_omap_configure_xonxoff
671 671
672 /* 672 /*
673 * IXON Flag: 673 * IXON Flag:
674 * Flow control for OMAP.TX 674 * Enable XON/XOFF flow control on output.
675 * OMAP.RX should listen for XON/XOFF 675 * Transmit XON1, XOFF1
676 */ 676 */
677 if (termios->c_iflag & IXON) 677 if (termios->c_iflag & IXON)
678 up->efr |= OMAP_UART_SW_RX; 678 up->efr |= OMAP_UART_SW_TX;
679 679
680 /* 680 /*
681 * IXOFF Flag: 681 * IXOFF Flag:
682 * Flow control for OMAP.RX 682 * Enable XON/XOFF flow control on input.
683 * OMAP.TX should send XON/XOFF 683 * Receiver compares XON1, XOFF1.
684 */ 684 */
685 if (termios->c_iflag & IXOFF) 685 if (termios->c_iflag & IXOFF)
686 up->efr |= OMAP_UART_SW_TX; 686 up->efr |= OMAP_UART_SW_RX;
687 687
688 serial_out(up, UART_EFR, up->efr | UART_EFR_ECB); 688 serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
689 serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); 689 serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index b7086d004f5f..e821068cd95b 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -971,6 +971,7 @@ static const struct platform_device_id sccnxp_id_table[] = {
971 { "sc28202", SCCNXP_TYPE_SC28202 }, 971 { "sc28202", SCCNXP_TYPE_SC28202 },
972 { "sc68681", SCCNXP_TYPE_SC68681 }, 972 { "sc68681", SCCNXP_TYPE_SC68681 },
973 { "sc68692", SCCNXP_TYPE_SC68692 }, 973 { "sc68692", SCCNXP_TYPE_SC68692 },
974 { },
974}; 975};
975MODULE_DEVICE_TABLE(platform, sccnxp_id_table); 976MODULE_DEVICE_TABLE(platform, sccnxp_id_table);
976 977
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 9be296cf7295..6ee59001d61d 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -530,7 +530,8 @@ static inline int sci_rxd_in(struct uart_port *port)
530 if (s->cfg->port_reg <= 0) 530 if (s->cfg->port_reg <= 0)
531 return 1; 531 return 1;
532 532
533 return !!__raw_readb(s->cfg->port_reg); 533 /* Cast for ARM damage */
534 return !!__raw_readb((void __iomem *)s->cfg->port_reg);
534} 535}
535 536
536/* ********************************************************************** * 537/* ********************************************************************** *
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 05728894a88c..16ee6cee07da 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -452,6 +452,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
452 NULL, /* v */ 452 NULL, /* v */
453 &sysrq_showstate_blocked_op, /* w */ 453 &sysrq_showstate_blocked_op, /* w */
454 /* x: May be registered on ppc/powerpc for xmon */ 454 /* x: May be registered on ppc/powerpc for xmon */
455 /* x: May be registered on sparc64 for global PMU dump */
455 NULL, /* x */ 456 NULL, /* x */
456 /* y: May be registered on sparc64 for global register dump */ 457 /* y: May be registered on sparc64 for global register dump */
457 NULL, /* y */ 458 NULL, /* y */
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 999ca63afdef..f87d7e8964bf 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -3442,6 +3442,19 @@ int con_debug_enter(struct vc_data *vc)
3442 kdb_set(2, setargs); 3442 kdb_set(2, setargs);
3443 } 3443 }
3444 } 3444 }
3445 if (vc->vc_cols < 999) {
3446 int colcount;
3447 char cols[4];
3448 const char *setargs[3] = {
3449 "set",
3450 "COLUMNS",
3451 cols,
3452 };
3453 if (kdbgetintenv(setargs[0], &colcount)) {
3454 snprintf(cols, 4, "%i", vc->vc_cols);
3455 kdb_set(2, setargs);
3456 }
3457 }
3445#endif /* CONFIG_KGDB_KDB */ 3458#endif /* CONFIG_KGDB_KDB */
3446 return ret; 3459 return ret;
3447} 3460}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 36f2be4def2f..6e49ec6f3adc 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -817,10 +817,6 @@ static const __u32 acm_tty_speed[] = {
817 2500000, 3000000, 3500000, 4000000 817 2500000, 3000000, 3500000, 4000000
818}; 818};
819 819
820static const __u8 acm_tty_size[] = {
821 5, 6, 7, 8
822};
823
824static void acm_tty_set_termios(struct tty_struct *tty, 820static void acm_tty_set_termios(struct tty_struct *tty,
825 struct ktermios *termios_old) 821 struct ktermios *termios_old)
826{ 822{
@@ -834,7 +830,21 @@ static void acm_tty_set_termios(struct tty_struct *tty,
834 newline.bParityType = termios->c_cflag & PARENB ? 830 newline.bParityType = termios->c_cflag & PARENB ?
835 (termios->c_cflag & PARODD ? 1 : 2) + 831 (termios->c_cflag & PARODD ? 1 : 2) +
836 (termios->c_cflag & CMSPAR ? 2 : 0) : 0; 832 (termios->c_cflag & CMSPAR ? 2 : 0) : 0;
837 newline.bDataBits = acm_tty_size[(termios->c_cflag & CSIZE) >> 4]; 833 switch (termios->c_cflag & CSIZE) {
834 case CS5:
835 newline.bDataBits = 5;
836 break;
837 case CS6:
838 newline.bDataBits = 6;
839 break;
840 case CS7:
841 newline.bDataBits = 7;
842 break;
843 case CS8:
844 default:
845 newline.bDataBits = 8;
846 break;
847 }
838 /* FIXME: Needs to clear unsupported bits in the termios */ 848 /* FIXME: Needs to clear unsupported bits in the termios */
839 acm->clocal = ((termios->c_cflag & CLOCAL) != 0); 849 acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
840 850
@@ -1233,7 +1243,7 @@ made_compressed_probe:
1233 1243
1234 if (usb_endpoint_xfer_int(epwrite)) 1244 if (usb_endpoint_xfer_int(epwrite))
1235 usb_fill_int_urb(snd->urb, usb_dev, 1245 usb_fill_int_urb(snd->urb, usb_dev,
1236 usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress), 1246 usb_sndintpipe(usb_dev, epwrite->bEndpointAddress),
1237 NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval); 1247 NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval);
1238 else 1248 else
1239 usb_fill_bulk_urb(snd->urb, usb_dev, 1249 usb_fill_bulk_urb(snd->urb, usb_dev,
@@ -1551,6 +1561,9 @@ static const struct usb_device_id acm_ids[] = {
1551 Maybe we should define a new 1561 Maybe we should define a new
1552 quirk for this. */ 1562 quirk for this. */
1553 }, 1563 },
1564 { USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */
1565 .driver_info = NO_UNION_NORMAL,
1566 },
1554 { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */ 1567 { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
1555 .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ 1568 .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
1556 }, 1569 },
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index e0356cb859b5..b78fbe222b72 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1348,6 +1348,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1348 ret = -EFAULT; 1348 ret = -EFAULT;
1349 goto error; 1349 goto error;
1350 } 1350 }
1351 uurb->buffer += u;
1351 } 1352 }
1352 totlen -= u; 1353 totlen -= u;
1353 } 1354 }
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index ddd820d25288..6056db7af410 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -367,6 +367,10 @@ static int usb_probe_interface(struct device *dev)
367 intf->condition = USB_INTERFACE_UNBOUND; 367 intf->condition = USB_INTERFACE_UNBOUND;
368 usb_cancel_queued_reset(intf); 368 usb_cancel_queued_reset(intf);
369 369
370 /* If the LPM disable succeeded, balance the ref counts. */
371 if (!lpm_disable_error)
372 usb_unlocked_enable_lpm(udev);
373
370 /* Unbound interfaces are always runtime-PM-disabled and -suspended */ 374 /* Unbound interfaces are always runtime-PM-disabled and -suspended */
371 if (driver->supports_autosuspend) 375 if (driver->supports_autosuspend)
372 pm_runtime_disable(dev); 376 pm_runtime_disable(dev);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 673ee4696262..1af04bdeaf0c 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -739,13 +739,16 @@ static void hub_tt_work(struct work_struct *work)
739 int limit = 100; 739 int limit = 100;
740 740
741 spin_lock_irqsave (&hub->tt.lock, flags); 741 spin_lock_irqsave (&hub->tt.lock, flags);
742 while (--limit && !list_empty (&hub->tt.clear_list)) { 742 while (!list_empty(&hub->tt.clear_list)) {
743 struct list_head *next; 743 struct list_head *next;
744 struct usb_tt_clear *clear; 744 struct usb_tt_clear *clear;
745 struct usb_device *hdev = hub->hdev; 745 struct usb_device *hdev = hub->hdev;
746 const struct hc_driver *drv; 746 const struct hc_driver *drv;
747 int status; 747 int status;
748 748
749 if (!hub->quiescing && --limit < 0)
750 break;
751
749 next = hub->tt.clear_list.next; 752 next = hub->tt.clear_list.next;
750 clear = list_entry (next, struct usb_tt_clear, clear_list); 753 clear = list_entry (next, struct usb_tt_clear, clear_list);
751 list_del (&clear->clear_list); 754 list_del (&clear->clear_list);
@@ -1210,7 +1213,7 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
1210 if (hub->has_indicators) 1213 if (hub->has_indicators)
1211 cancel_delayed_work_sync(&hub->leds); 1214 cancel_delayed_work_sync(&hub->leds);
1212 if (hub->tt.hub) 1215 if (hub->tt.hub)
1213 cancel_work_sync(&hub->tt.clear_work); 1216 flush_work(&hub->tt.clear_work);
1214} 1217}
1215 1218
1216/* caller has locked the hub device */ 1219/* caller has locked the hub device */
@@ -3241,8 +3244,7 @@ static int usb_req_set_sel(struct usb_device *udev, enum usb3_link_state state)
3241 (state == USB3_LPM_U2 && 3244 (state == USB3_LPM_U2 &&
3242 (u2_sel > USB3_LPM_MAX_U2_SEL_PEL || 3245 (u2_sel > USB3_LPM_MAX_U2_SEL_PEL ||
3243 u2_pel > USB3_LPM_MAX_U2_SEL_PEL))) { 3246 u2_pel > USB3_LPM_MAX_U2_SEL_PEL))) {
3244 dev_dbg(&udev->dev, "Device-initiated %s disabled due " 3247 dev_dbg(&udev->dev, "Device-initiated %s disabled due to long SEL %llu us or PEL %llu us\n",
3245 "to long SEL %llu ms or PEL %llu ms\n",
3246 usb3_lpm_names[state], u1_sel, u1_pel); 3248 usb3_lpm_names[state], u1_sel, u1_pel);
3247 return -EINVAL; 3249 return -EINVAL;
3248 } 3250 }
@@ -3320,16 +3322,6 @@ static int usb_set_device_initiated_lpm(struct usb_device *udev,
3320 3322
3321 if (enable) { 3323 if (enable) {
3322 /* 3324 /*
3323 * First, let the device know about the exit latencies
3324 * associated with the link state we're about to enable.
3325 */
3326 ret = usb_req_set_sel(udev, state);
3327 if (ret < 0) {
3328 dev_warn(&udev->dev, "Set SEL for device-initiated "
3329 "%s failed.\n", usb3_lpm_names[state]);
3330 return -EBUSY;
3331 }
3332 /*
3333 * Now send the control transfer to enable device-initiated LPM 3325 * Now send the control transfer to enable device-initiated LPM
3334 * for either U1 or U2. 3326 * for either U1 or U2.
3335 */ 3327 */
@@ -3414,7 +3406,28 @@ static int usb_set_lpm_timeout(struct usb_device *udev,
3414static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, 3406static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
3415 enum usb3_link_state state) 3407 enum usb3_link_state state)
3416{ 3408{
3417 int timeout; 3409 int timeout, ret;
3410 __u8 u1_mel = udev->bos->ss_cap->bU1devExitLat;
3411 __le16 u2_mel = udev->bos->ss_cap->bU2DevExitLat;
3412
3413 /* If the device says it doesn't have *any* exit latency to come out of
3414 * U1 or U2, it's probably lying. Assume it doesn't implement that link
3415 * state.
3416 */
3417 if ((state == USB3_LPM_U1 && u1_mel == 0) ||
3418 (state == USB3_LPM_U2 && u2_mel == 0))
3419 return;
3420
3421 /*
3422 * First, let the device know about the exit latencies
3423 * associated with the link state we're about to enable.
3424 */
3425 ret = usb_req_set_sel(udev, state);
3426 if (ret < 0) {
3427 dev_warn(&udev->dev, "Set SEL for device-initiated %s failed.\n",
3428 usb3_lpm_names[state]);
3429 return;
3430 }
3418 3431
3419 /* We allow the host controller to set the U1/U2 timeout internally 3432 /* We allow the host controller to set the U1/U2 timeout internally
3420 * first, so that it can change its schedule to account for the 3433 * first, so that it can change its schedule to account for the
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index b415c0c859d3..c14ebc975ba4 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -409,6 +409,10 @@ static void dwc3_core_exit(struct dwc3 *dwc)
409{ 409{
410 dwc3_event_buffers_cleanup(dwc); 410 dwc3_event_buffers_cleanup(dwc);
411 dwc3_free_event_buffers(dwc); 411 dwc3_free_event_buffers(dwc);
412
413 usb_phy_shutdown(dwc->usb2_phy);
414 usb_phy_shutdown(dwc->usb3_phy);
415
412} 416}
413 417
414#define DWC3_ALIGN_MASK (16 - 1) 418#define DWC3_ALIGN_MASK (16 - 1)
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index c9e729a4bf65..7b7deddf6a52 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1904,7 +1904,7 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
1904 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params); 1904 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
1905 WARN_ON_ONCE(ret); 1905 WARN_ON_ONCE(ret);
1906 dep->resource_index = 0; 1906 dep->resource_index = 0;
1907 1907 dep->flags &= ~DWC3_EP_BUSY;
1908 udelay(100); 1908 udelay(100);
1909} 1909}
1910 1910
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index dfb51a45496c..e0ff51b89529 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -952,6 +952,7 @@ endif
952config USB_G_WEBCAM 952config USB_G_WEBCAM
953 tristate "USB Webcam Gadget" 953 tristate "USB Webcam Gadget"
954 depends on VIDEO_DEV 954 depends on VIDEO_DEV
955 select USB_LIBCOMPOSITE
955 help 956 help
956 The Webcam Gadget acts as a composite USB Audio and Video Class 957 The Webcam Gadget acts as a composite USB Audio and Video Class
957 device. It provides a userspace API to process UVC control requests 958 device. It provides a userspace API to process UVC control requests
diff --git a/drivers/usb/gadget/lpc32xx_udc.c b/drivers/usb/gadget/lpc32xx_udc.c
index f696fb9b136d..21a9861dabf0 100644
--- a/drivers/usb/gadget/lpc32xx_udc.c
+++ b/drivers/usb/gadget/lpc32xx_udc.c
@@ -2930,10 +2930,10 @@ static void vbus_work(struct work_struct *work)
2930 2930
2931 /* Get the VBUS status from the transceiver */ 2931 /* Get the VBUS status from the transceiver */
2932 value = i2c_smbus_read_byte_data(udc->isp1301_i2c_client, 2932 value = i2c_smbus_read_byte_data(udc->isp1301_i2c_client,
2933 ISP1301_I2C_OTG_CONTROL_2); 2933 ISP1301_I2C_INTERRUPT_SOURCE);
2934 2934
2935 /* VBUS on or off? */ 2935 /* VBUS on or off? */
2936 if (value & OTG_B_SESS_VLD) 2936 if (value & INT_SESS_VLD)
2937 udc->vbus = 1; 2937 udc->vbus = 1;
2938 else 2938 else
2939 udc->vbus = 0; 2939 udc->vbus = 0;
diff --git a/drivers/usb/gadget/net2272.c b/drivers/usb/gadget/net2272.c
index 43ac7482fa91..c009263a47e3 100644
--- a/drivers/usb/gadget/net2272.c
+++ b/drivers/usb/gadget/net2272.c
@@ -2069,8 +2069,10 @@ static irqreturn_t net2272_irq(int irq, void *_dev)
2069#if defined(PLX_PCI_RDK2) 2069#if defined(PLX_PCI_RDK2)
2070 /* see if PCI int for us by checking irqstat */ 2070 /* see if PCI int for us by checking irqstat */
2071 intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT); 2071 intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
2072 if (!intcsr & (1 << NET2272_PCI_IRQ)) 2072 if (!intcsr & (1 << NET2272_PCI_IRQ)) {
2073 spin_unlock(&dev->lock);
2073 return IRQ_NONE; 2074 return IRQ_NONE;
2075 }
2074 /* check dma interrupts */ 2076 /* check dma interrupts */
2075#endif 2077#endif
2076 /* Platform/devcice interrupt handler */ 2078 /* Platform/devcice interrupt handler */
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
index eaa1005377fc..97e68b38cfdf 100644
--- a/drivers/usb/gadget/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/tcm_usb_gadget.c
@@ -1472,16 +1472,6 @@ static int usbg_queue_tm_rsp(struct se_cmd *se_cmd)
1472 return 0; 1472 return 0;
1473} 1473}
1474 1474
1475static u16 usbg_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
1476{
1477 return 0;
1478}
1479
1480static u16 usbg_get_fabric_sense_len(void)
1481{
1482 return 0;
1483}
1484
1485static const char *usbg_check_wwn(const char *name) 1475static const char *usbg_check_wwn(const char *name)
1486{ 1476{
1487 const char *n; 1477 const char *n;
@@ -1822,7 +1812,7 @@ static ssize_t tcm_usbg_tpg_store_nexus(
1822 ret = tcm_usbg_drop_nexus(tpg); 1812 ret = tcm_usbg_drop_nexus(tpg);
1823 return (!ret) ? count : ret; 1813 return (!ret) ? count : ret;
1824 } 1814 }
1825 if (strlen(page) > USBG_NAMELEN) { 1815 if (strlen(page) >= USBG_NAMELEN) {
1826 pr_err("Emulated NAA Sas Address: %s, exceeds" 1816 pr_err("Emulated NAA Sas Address: %s, exceeds"
1827 " max: %d\n", page, USBG_NAMELEN); 1817 " max: %d\n", page, USBG_NAMELEN);
1828 return -EINVAL; 1818 return -EINVAL;
@@ -1907,8 +1897,6 @@ static struct target_core_fabric_ops usbg_ops = {
1907 .queue_data_in = usbg_send_read_response, 1897 .queue_data_in = usbg_send_read_response,
1908 .queue_status = usbg_send_status_response, 1898 .queue_status = usbg_send_status_response,
1909 .queue_tm_rsp = usbg_queue_tm_rsp, 1899 .queue_tm_rsp = usbg_queue_tm_rsp,
1910 .get_fabric_sense_len = usbg_get_fabric_sense_len,
1911 .set_fabric_sense_len = usbg_set_fabric_sense_len,
1912 .check_stop_free = usbg_check_stop_free, 1900 .check_stop_free = usbg_check_stop_free,
1913 1901
1914 .fabric_make_wwn = usbg_make_tport, 1902 .fabric_make_wwn = usbg_make_tport,
@@ -1968,7 +1956,6 @@ static void usbg_deregister_configfs(void)
1968static struct usb_interface_descriptor bot_intf_desc = { 1956static struct usb_interface_descriptor bot_intf_desc = {
1969 .bLength = sizeof(bot_intf_desc), 1957 .bLength = sizeof(bot_intf_desc),
1970 .bDescriptorType = USB_DT_INTERFACE, 1958 .bDescriptorType = USB_DT_INTERFACE,
1971 .bAlternateSetting = 0,
1972 .bNumEndpoints = 2, 1959 .bNumEndpoints = 2,
1973 .bAlternateSetting = USB_G_ALT_INT_BBB, 1960 .bAlternateSetting = USB_G_ALT_INT_BBB,
1974 .bInterfaceClass = USB_CLASS_MASS_STORAGE, 1961 .bInterfaceClass = USB_CLASS_MASS_STORAGE,
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 9bfde82078ec..0d2f35ca93f1 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -222,7 +222,7 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
222 222
223 if (pdata->controller_ver < 0) { 223 if (pdata->controller_ver < 0) {
224 dev_warn(hcd->self.controller, "Could not get controller version\n"); 224 dev_warn(hcd->self.controller, "Could not get controller version\n");
225 return; 225 return -ENODEV;
226 } 226 }
227 227
228 portsc = ehci_readl(ehci, &ehci->regs->port_status[port_offset]); 228 portsc = ehci_readl(ehci, &ehci->regs->port_status[port_offset]);
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 8e7eca62f169..9c2717d66730 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -160,7 +160,7 @@ static const struct hc_driver ehci_orion_hc_driver = {
160 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, 160 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
161}; 161};
162 162
163static void __init 163static void __devinit
164ehci_orion_conf_mbus_windows(struct usb_hcd *hcd, 164ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
165 const struct mbus_dram_target_info *dram) 165 const struct mbus_dram_target_info *dram)
166{ 166{
diff --git a/drivers/usb/host/ehci-vt8500.c b/drivers/usb/host/ehci-vt8500.c
index 96722bfebc84..d3c9a3e397b9 100644
--- a/drivers/usb/host/ehci-vt8500.c
+++ b/drivers/usb/host/ehci-vt8500.c
@@ -85,6 +85,8 @@ static const struct hc_driver vt8500_ehci_hc_driver = {
85 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, 85 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
86}; 86};
87 87
88static u64 vt8500_ehci_dma_mask = DMA_BIT_MASK(32);
89
88static int vt8500_ehci_drv_probe(struct platform_device *pdev) 90static int vt8500_ehci_drv_probe(struct platform_device *pdev)
89{ 91{
90 struct usb_hcd *hcd; 92 struct usb_hcd *hcd;
@@ -95,6 +97,14 @@ static int vt8500_ehci_drv_probe(struct platform_device *pdev)
95 if (usb_disabled()) 97 if (usb_disabled())
96 return -ENODEV; 98 return -ENODEV;
97 99
100 /*
101 * Right now device-tree probed devices don't get dma_mask set.
102 * Since shared usb code relies on it, set it here for now.
103 * Once we have dma capability bindings this can go away.
104 */
105 if (!pdev->dev.dma_mask)
106 pdev->dev.dma_mask = &vt8500_ehci_dma_mask;
107
98 if (pdev->resource[1].flags != IORESOURCE_IRQ) { 108 if (pdev->resource[1].flags != IORESOURCE_IRQ) {
99 pr_debug("resource[1] is not IORESOURCE_IRQ"); 109 pr_debug("resource[1] is not IORESOURCE_IRQ");
100 return -ENOMEM; 110 return -ENOMEM;
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 966d1484ee79..39f9e4a9a2d3 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -545,7 +545,14 @@ static const struct dmi_system_id __devinitconst ehci_dmi_nohandoff_table[] = {
545 /* Pegatron Lucid (Ordissimo AIRIS) */ 545 /* Pegatron Lucid (Ordissimo AIRIS) */
546 .matches = { 546 .matches = {
547 DMI_MATCH(DMI_BOARD_NAME, "M11JB"), 547 DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
548 DMI_MATCH(DMI_BIOS_VERSION, "Lucid-GE-133"), 548 DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
549 },
550 },
551 {
552 /* Pegatron Lucid (Ordissimo) */
553 .matches = {
554 DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"),
555 DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
549 }, 556 },
550 }, 557 },
551 { } 558 { }
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index e4780491df4a..68ebf20e1519 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -60,6 +60,7 @@ static const struct hc_driver uhci_platform_hc_driver = {
60 .hub_control = uhci_hub_control, 60 .hub_control = uhci_hub_control,
61}; 61};
62 62
63static u64 platform_uhci_dma_mask = DMA_BIT_MASK(32);
63 64
64static int __devinit uhci_hcd_platform_probe(struct platform_device *pdev) 65static int __devinit uhci_hcd_platform_probe(struct platform_device *pdev)
65{ 66{
@@ -71,6 +72,14 @@ static int __devinit uhci_hcd_platform_probe(struct platform_device *pdev)
71 if (usb_disabled()) 72 if (usb_disabled())
72 return -ENODEV; 73 return -ENODEV;
73 74
75 /*
76 * Right now device-tree probed devices don't get dma_mask set.
77 * Since shared usb code relies on it, set it here for now.
78 * Once we have dma capability bindings this can go away.
79 */
80 if (!pdev->dev.dma_mask)
81 pdev->dev.dma_mask = &platform_uhci_dma_mask;
82
74 hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev, 83 hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev,
75 pdev->name); 84 pdev->name);
76 if (!hcd) 85 if (!hcd)
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 4b436f5a4171..5f3a7c74aa8d 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -544,7 +544,6 @@ void xhci_dbg_ctx(struct xhci_hcd *xhci,
544 int i; 544 int i;
545 /* Fields are 32 bits wide, DMA addresses are in bytes */ 545 /* Fields are 32 bits wide, DMA addresses are in bytes */
546 int field_size = 32 / 8; 546 int field_size = 32 / 8;
547 struct xhci_slot_ctx *slot_ctx;
548 dma_addr_t dma = ctx->dma; 547 dma_addr_t dma = ctx->dma;
549 int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); 548 int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
550 549
@@ -570,7 +569,6 @@ void xhci_dbg_ctx(struct xhci_hcd *xhci,
570 dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma); 569 dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma);
571 } 570 }
572 571
573 slot_ctx = xhci_get_slot_ctx(xhci, ctx);
574 xhci_dbg_slot_ctx(xhci, ctx); 572 xhci_dbg_slot_ctx(xhci, ctx);
575 xhci_dbg_ep_ctx(xhci, ctx, last_ep); 573 xhci_dbg_ep_ctx(xhci, ctx, last_ep);
576} 574}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index aa90ad4d4fd5..a686cf4905bb 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -151,9 +151,8 @@ static void xhci_usb3_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
151 if (portsc & PORT_DEV_REMOVE) 151 if (portsc & PORT_DEV_REMOVE)
152 port_removable |= 1 << (i + 1); 152 port_removable |= 1 << (i + 1);
153 } 153 }
154 memset(&desc->u.ss.DeviceRemovable, 154
155 (__force __u16) cpu_to_le16(port_removable), 155 desc->u.ss.DeviceRemovable = cpu_to_le16(port_removable);
156 sizeof(__u16));
157} 156}
158 157
159static void xhci_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci, 158static void xhci_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
@@ -809,11 +808,13 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
809 temp = xhci_readl(xhci, port_array[wIndex]); 808 temp = xhci_readl(xhci, port_array[wIndex]);
810 xhci_dbg(xhci, "set port power, actual port %d status = 0x%x\n", wIndex, temp); 809 xhci_dbg(xhci, "set port power, actual port %d status = 0x%x\n", wIndex, temp);
811 810
811 spin_unlock_irqrestore(&xhci->lock, flags);
812 temp = usb_acpi_power_manageable(hcd->self.root_hub, 812 temp = usb_acpi_power_manageable(hcd->self.root_hub,
813 wIndex); 813 wIndex);
814 if (temp) 814 if (temp)
815 usb_acpi_set_power_state(hcd->self.root_hub, 815 usb_acpi_set_power_state(hcd->self.root_hub,
816 wIndex, true); 816 wIndex, true);
817 spin_lock_irqsave(&xhci->lock, flags);
817 break; 818 break;
818 case USB_PORT_FEAT_RESET: 819 case USB_PORT_FEAT_RESET:
819 temp = (temp | PORT_RESET); 820 temp = (temp | PORT_RESET);
@@ -917,11 +918,13 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
917 xhci_writel(xhci, temp & ~PORT_POWER, 918 xhci_writel(xhci, temp & ~PORT_POWER,
918 port_array[wIndex]); 919 port_array[wIndex]);
919 920
921 spin_unlock_irqrestore(&xhci->lock, flags);
920 temp = usb_acpi_power_manageable(hcd->self.root_hub, 922 temp = usb_acpi_power_manageable(hcd->self.root_hub,
921 wIndex); 923 wIndex);
922 if (temp) 924 if (temp)
923 usb_acpi_set_power_state(hcd->self.root_hub, 925 usb_acpi_set_power_state(hcd->self.root_hub,
924 wIndex, false); 926 wIndex, false);
927 spin_lock_irqsave(&xhci->lock, flags);
925 break; 928 break;
926 default: 929 default:
927 goto error; 930 goto error;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index c6ebb176dc4f..4e1a8946b8d1 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1228,6 +1228,17 @@ static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd)
1228 cur_seg = find_trb_seg(xhci->cmd_ring->first_seg, 1228 cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
1229 xhci->cmd_ring->dequeue, &cycle_state); 1229 xhci->cmd_ring->dequeue, &cycle_state);
1230 1230
1231 if (!cur_seg) {
1232 xhci_warn(xhci, "Command ring mismatch, dequeue = %p %llx (dma)\n",
1233 xhci->cmd_ring->dequeue,
1234 (unsigned long long)
1235 xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1236 xhci->cmd_ring->dequeue));
1237 xhci_debug_ring(xhci, xhci->cmd_ring);
1238 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
1239 return;
1240 }
1241
1231 /* find the command trb matched by cd from command ring */ 1242 /* find the command trb matched by cd from command ring */
1232 for (cmd_trb = xhci->cmd_ring->dequeue; 1243 for (cmd_trb = xhci->cmd_ring->dequeue;
1233 cmd_trb != xhci->cmd_ring->enqueue; 1244 cmd_trb != xhci->cmd_ring->enqueue;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 8d7fcbbe6ade..c9e419f29b74 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -479,7 +479,8 @@ static bool compliance_mode_recovery_timer_quirk_check(void)
479 479
480 if (strstr(dmi_product_name, "Z420") || 480 if (strstr(dmi_product_name, "Z420") ||
481 strstr(dmi_product_name, "Z620") || 481 strstr(dmi_product_name, "Z620") ||
482 strstr(dmi_product_name, "Z820")) 482 strstr(dmi_product_name, "Z820") ||
483 strstr(dmi_product_name, "Z1"))
483 return true; 484 return true;
484 485
485 return false; 486 return false;
@@ -1626,7 +1627,6 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1626 struct xhci_hcd *xhci; 1627 struct xhci_hcd *xhci;
1627 struct xhci_container_ctx *in_ctx, *out_ctx; 1628 struct xhci_container_ctx *in_ctx, *out_ctx;
1628 unsigned int ep_index; 1629 unsigned int ep_index;
1629 struct xhci_ep_ctx *ep_ctx;
1630 struct xhci_slot_ctx *slot_ctx; 1630 struct xhci_slot_ctx *slot_ctx;
1631 struct xhci_input_control_ctx *ctrl_ctx; 1631 struct xhci_input_control_ctx *ctrl_ctx;
1632 u32 added_ctxs; 1632 u32 added_ctxs;
@@ -1662,7 +1662,6 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1662 out_ctx = virt_dev->out_ctx; 1662 out_ctx = virt_dev->out_ctx;
1663 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1663 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1664 ep_index = xhci_get_endpoint_index(&ep->desc); 1664 ep_index = xhci_get_endpoint_index(&ep->desc);
1665 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1666 1665
1667 /* If this endpoint is already in use, and the upper layers are trying 1666 /* If this endpoint is already in use, and the upper layers are trying
1668 * to add it again without dropping it, reject the addition. 1667 * to add it again without dropping it, reject the addition.
@@ -1816,6 +1815,8 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1816 case COMP_EBADSLT: 1815 case COMP_EBADSLT:
1817 dev_warn(&udev->dev, "WARN: slot not enabled for" 1816 dev_warn(&udev->dev, "WARN: slot not enabled for"
1818 "evaluate context command.\n"); 1817 "evaluate context command.\n");
1818 ret = -EINVAL;
1819 break;
1819 case COMP_CTX_STATE: 1820 case COMP_CTX_STATE:
1820 dev_warn(&udev->dev, "WARN: invalid context state for " 1821 dev_warn(&udev->dev, "WARN: invalid context state for "
1821 "evaluate context command.\n"); 1822 "evaluate context command.\n");
@@ -4020,7 +4021,7 @@ int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4020static unsigned long long xhci_service_interval_to_ns( 4021static unsigned long long xhci_service_interval_to_ns(
4021 struct usb_endpoint_descriptor *desc) 4022 struct usb_endpoint_descriptor *desc)
4022{ 4023{
4023 return (1 << (desc->bInterval - 1)) * 125 * 1000; 4024 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4024} 4025}
4025 4026
4026static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev, 4027static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
@@ -4141,7 +4142,7 @@ static u16 xhci_calculate_intel_u2_timeout(struct usb_device *udev,
4141 (xhci_service_interval_to_ns(desc) > timeout_ns)) 4142 (xhci_service_interval_to_ns(desc) > timeout_ns))
4142 timeout_ns = xhci_service_interval_to_ns(desc); 4143 timeout_ns = xhci_service_interval_to_ns(desc);
4143 4144
4144 u2_del_ns = udev->bos->ss_cap->bU2DevExitLat * 1000; 4145 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4145 if (u2_del_ns > timeout_ns) 4146 if (u2_del_ns > timeout_ns)
4146 timeout_ns = u2_del_ns; 4147 timeout_ns = u2_del_ns;
4147 4148
diff --git a/drivers/usb/misc/ezusb.c b/drivers/usb/misc/ezusb.c
index 4223d761223d..6589268a6515 100644
--- a/drivers/usb/misc/ezusb.c
+++ b/drivers/usb/misc/ezusb.c
@@ -158,3 +158,4 @@ int ezusb_fx2_ihex_firmware_download(struct usb_device *dev,
158} 158}
159EXPORT_SYMBOL_GPL(ezusb_fx2_ihex_firmware_download); 159EXPORT_SYMBOL_GPL(ezusb_fx2_ihex_firmware_download);
160 160
161MODULE_LICENSE("GPL");
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
index 457f25e62c51..c964d6af178b 100644
--- a/drivers/usb/musb/am35x.c
+++ b/drivers/usb/musb/am35x.c
@@ -305,6 +305,12 @@ static irqreturn_t am35x_musb_interrupt(int irq, void *hci)
305 ret = IRQ_HANDLED; 305 ret = IRQ_HANDLED;
306 } 306 }
307 307
308 /* Drop spurious RX and TX if device is disconnected */
309 if (musb->int_usb & MUSB_INTR_DISCONNECT) {
310 musb->int_tx = 0;
311 musb->int_rx = 0;
312 }
313
308 if (musb->int_tx || musb->int_rx || musb->int_usb) 314 if (musb->int_tx || musb->int_rx || musb->int_usb)
309 ret |= musb_interrupt(musb); 315 ret |= musb_interrupt(musb);
310 316
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 444346e1e10d..ff5f112053d2 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -458,11 +458,11 @@ static int __devinit dsps_create_musb_pdev(struct dsps_glue *glue, u8 id)
458 struct platform_device *musb; 458 struct platform_device *musb;
459 struct resource *res; 459 struct resource *res;
460 struct resource resources[2]; 460 struct resource resources[2];
461 char res_name[10]; 461 char res_name[11];
462 int ret, musbid; 462 int ret, musbid;
463 463
464 /* get memory resource */ 464 /* get memory resource */
465 sprintf(res_name, "musb%d", id); 465 snprintf(res_name, sizeof(res_name), "musb%d", id);
466 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); 466 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
467 if (!res) { 467 if (!res) {
468 dev_err(dev, "%s get mem resource failed\n", res_name); 468 dev_err(dev, "%s get mem resource failed\n", res_name);
@@ -473,7 +473,7 @@ static int __devinit dsps_create_musb_pdev(struct dsps_glue *glue, u8 id)
473 resources[0] = *res; 473 resources[0] = *res;
474 474
475 /* get irq resource */ 475 /* get irq resource */
476 sprintf(res_name, "musb%d-irq", id); 476 snprintf(res_name, sizeof(res_name), "musb%d-irq", id);
477 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name); 477 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
478 if (!res) { 478 if (!res) {
479 dev_err(dev, "%s get irq resource failed\n", res_name); 479 dev_err(dev, "%s get irq resource failed\n", res_name);
@@ -530,7 +530,7 @@ static int __devinit dsps_create_musb_pdev(struct dsps_glue *glue, u8 id)
530 530
531 of_property_read_u32(np, "num-eps", (u32 *)&config->num_eps); 531 of_property_read_u32(np, "num-eps", (u32 *)&config->num_eps);
532 of_property_read_u32(np, "ram-bits", (u32 *)&config->ram_bits); 532 of_property_read_u32(np, "ram-bits", (u32 *)&config->ram_bits);
533 sprintf(res_name, "port%d-mode", id); 533 snprintf(res_name, sizeof(res_name), "port%d-mode", id);
534 of_property_read_u32(np, res_name, (u32 *)&pdata->mode); 534 of_property_read_u32(np, res_name, (u32 *)&pdata->mode);
535 of_property_read_u32(np, "power", (u32 *)&pdata->power); 535 of_property_read_u32(np, "power", (u32 *)&pdata->power);
536 config->multipoint = of_property_read_bool(np, "multipoint"); 536 config->multipoint = of_property_read_bool(np, "multipoint");
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 143c4e9e1be4..c021b202c0f3 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -795,6 +795,7 @@ static void xfer_work(struct work_struct *work)
795 dev_dbg(dev, " %s %d (%d/ %d)\n", 795 dev_dbg(dev, " %s %d (%d/ %d)\n",
796 fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero); 796 fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
797 797
798 usbhs_pipe_enable(pipe);
798 usbhsf_dma_start(pipe, fifo); 799 usbhsf_dma_start(pipe, fifo);
799 dma_async_issue_pending(chan); 800 dma_async_issue_pending(chan);
800} 801}
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index 35c5208f3249..61933a90e5bf 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -273,9 +273,9 @@ static irqreturn_t usbhs_interrupt(int irq, void *data)
273 usbhs_write(priv, INTSTS0, ~irq_state.intsts0 & INTSTS0_MAGIC); 273 usbhs_write(priv, INTSTS0, ~irq_state.intsts0 & INTSTS0_MAGIC);
274 usbhs_write(priv, INTSTS1, ~irq_state.intsts1 & INTSTS1_MAGIC); 274 usbhs_write(priv, INTSTS1, ~irq_state.intsts1 & INTSTS1_MAGIC);
275 275
276 usbhs_write(priv, BRDYSTS, 0); 276 usbhs_write(priv, BRDYSTS, ~irq_state.brdysts);
277 usbhs_write(priv, NRDYSTS, 0); 277 usbhs_write(priv, NRDYSTS, ~irq_state.nrdysts);
278 usbhs_write(priv, BEMPSTS, 0); 278 usbhs_write(priv, BEMPSTS, ~irq_state.bempsts);
279 279
280 /* 280 /*
281 * call irq callback functions 281 * call irq callback functions
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index 9b69a1323294..069cd765400c 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -334,6 +334,11 @@ static void usbhsh_pipe_detach(struct usbhsh_hpriv *hpriv,
334 struct device *dev = usbhs_priv_to_dev(priv); 334 struct device *dev = usbhs_priv_to_dev(priv);
335 unsigned long flags; 335 unsigned long flags;
336 336
337 if (unlikely(!uep)) {
338 dev_err(dev, "no uep\n");
339 return;
340 }
341
337 /******************** spin lock ********************/ 342 /******************** spin lock ********************/
338 usbhs_lock(priv, flags); 343 usbhs_lock(priv, flags);
339 344
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
index 08786c06dcf1..3d80c7b1fd1b 100644
--- a/drivers/usb/renesas_usbhs/pipe.h
+++ b/drivers/usb/renesas_usbhs/pipe.h
@@ -54,7 +54,7 @@ struct usbhs_pipe_info {
54 * pipe list 54 * pipe list
55 */ 55 */
56#define __usbhs_for_each_pipe(start, pos, info, i) \ 56#define __usbhs_for_each_pipe(start, pos, info, i) \
57 for (i = start, pos = (info)->pipe; \ 57 for (i = start, pos = (info)->pipe + i; \
58 i < (info)->size; \ 58 i < (info)->size; \
59 i++, pos = (info)->pipe + i) 59 i++, pos = (info)->pipe + i)
60 60
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index cf2522c397d3..bd50a8a41a0f 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -125,9 +125,6 @@ static inline int calc_divisor(int bps)
125 125
126static int ark3116_attach(struct usb_serial *serial) 126static int ark3116_attach(struct usb_serial *serial)
127{ 127{
128 struct usb_serial_port *port = serial->port[0];
129 struct ark3116_private *priv;
130
131 /* make sure we have our end-points */ 128 /* make sure we have our end-points */
132 if ((serial->num_bulk_in == 0) || 129 if ((serial->num_bulk_in == 0) ||
133 (serial->num_bulk_out == 0) || 130 (serial->num_bulk_out == 0) ||
@@ -142,8 +139,15 @@ static int ark3116_attach(struct usb_serial *serial)
142 return -EINVAL; 139 return -EINVAL;
143 } 140 }
144 141
145 priv = kzalloc(sizeof(struct ark3116_private), 142 return 0;
146 GFP_KERNEL); 143}
144
145static int ark3116_port_probe(struct usb_serial_port *port)
146{
147 struct usb_serial *serial = port->serial;
148 struct ark3116_private *priv;
149
150 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
147 if (!priv) 151 if (!priv)
148 return -ENOMEM; 152 return -ENOMEM;
149 153
@@ -198,18 +202,15 @@ static int ark3116_attach(struct usb_serial *serial)
198 return 0; 202 return 0;
199} 203}
200 204
201static void ark3116_release(struct usb_serial *serial) 205static int ark3116_port_remove(struct usb_serial_port *port)
202{ 206{
203 struct usb_serial_port *port = serial->port[0];
204 struct ark3116_private *priv = usb_get_serial_port_data(port); 207 struct ark3116_private *priv = usb_get_serial_port_data(port);
205 208
206 /* device is closed, so URBs and DMA should be down */ 209 /* device is closed, so URBs and DMA should be down */
207
208 usb_set_serial_port_data(port, NULL);
209
210 mutex_destroy(&priv->hw_lock); 210 mutex_destroy(&priv->hw_lock);
211
212 kfree(priv); 211 kfree(priv);
212
213 return 0;
213} 214}
214 215
215static void ark3116_init_termios(struct tty_struct *tty) 216static void ark3116_init_termios(struct tty_struct *tty)
@@ -723,7 +724,8 @@ static struct usb_serial_driver ark3116_device = {
723 .id_table = id_table, 724 .id_table = id_table,
724 .num_ports = 1, 725 .num_ports = 1,
725 .attach = ark3116_attach, 726 .attach = ark3116_attach,
726 .release = ark3116_release, 727 .port_probe = ark3116_port_probe,
728 .port_remove = ark3116_port_remove,
727 .set_termios = ark3116_set_termios, 729 .set_termios = ark3116_set_termios,
728 .init_termios = ark3116_init_termios, 730 .init_termios = ark3116_init_termios,
729 .ioctl = ark3116_ioctl, 731 .ioctl = ark3116_ioctl,
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index 99449424193f..ea29556f0d72 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -45,8 +45,8 @@
45#define DRIVER_DESC "USB Belkin Serial converter driver" 45#define DRIVER_DESC "USB Belkin Serial converter driver"
46 46
47/* function prototypes for a Belkin USB Serial Adapter F5U103 */ 47/* function prototypes for a Belkin USB Serial Adapter F5U103 */
48static int belkin_sa_startup(struct usb_serial *serial); 48static int belkin_sa_port_probe(struct usb_serial_port *port);
49static void belkin_sa_release(struct usb_serial *serial); 49static int belkin_sa_port_remove(struct usb_serial_port *port);
50static int belkin_sa_open(struct tty_struct *tty, 50static int belkin_sa_open(struct tty_struct *tty,
51 struct usb_serial_port *port); 51 struct usb_serial_port *port);
52static void belkin_sa_close(struct usb_serial_port *port); 52static void belkin_sa_close(struct usb_serial_port *port);
@@ -88,8 +88,8 @@ static struct usb_serial_driver belkin_device = {
88 .break_ctl = belkin_sa_break_ctl, 88 .break_ctl = belkin_sa_break_ctl,
89 .tiocmget = belkin_sa_tiocmget, 89 .tiocmget = belkin_sa_tiocmget,
90 .tiocmset = belkin_sa_tiocmset, 90 .tiocmset = belkin_sa_tiocmset,
91 .attach = belkin_sa_startup, 91 .port_probe = belkin_sa_port_probe,
92 .release = belkin_sa_release, 92 .port_remove = belkin_sa_port_remove,
93}; 93};
94 94
95static struct usb_serial_driver * const serial_drivers[] = { 95static struct usb_serial_driver * const serial_drivers[] = {
@@ -118,17 +118,15 @@ struct belkin_sa_private {
118 (c), BELKIN_SA_SET_REQUEST_TYPE, \ 118 (c), BELKIN_SA_SET_REQUEST_TYPE, \
119 (v), 0, NULL, 0, WDR_TIMEOUT) 119 (v), 0, NULL, 0, WDR_TIMEOUT)
120 120
121/* do some startup allocations not currently performed by usb_serial_probe() */ 121static int belkin_sa_port_probe(struct usb_serial_port *port)
122static int belkin_sa_startup(struct usb_serial *serial)
123{ 122{
124 struct usb_device *dev = serial->dev; 123 struct usb_device *dev = port->serial->dev;
125 struct belkin_sa_private *priv; 124 struct belkin_sa_private *priv;
126 125
127 /* allocate the private data structure */
128 priv = kmalloc(sizeof(struct belkin_sa_private), GFP_KERNEL); 126 priv = kmalloc(sizeof(struct belkin_sa_private), GFP_KERNEL);
129 if (!priv) 127 if (!priv)
130 return -1; /* error */ 128 return -ENOMEM;
131 /* set initial values for control structures */ 129
132 spin_lock_init(&priv->lock); 130 spin_lock_init(&priv->lock);
133 priv->control_state = 0; 131 priv->control_state = 0;
134 priv->last_lsr = 0; 132 priv->last_lsr = 0;
@@ -140,18 +138,19 @@ static int belkin_sa_startup(struct usb_serial *serial)
140 le16_to_cpu(dev->descriptor.bcdDevice), 138 le16_to_cpu(dev->descriptor.bcdDevice),
141 priv->bad_flow_control); 139 priv->bad_flow_control);
142 140
143 init_waitqueue_head(&serial->port[0]->write_wait); 141 usb_set_serial_port_data(port, priv);
144 usb_set_serial_port_data(serial->port[0], priv);
145 142
146 return 0; 143 return 0;
147} 144}
148 145
149static void belkin_sa_release(struct usb_serial *serial) 146static int belkin_sa_port_remove(struct usb_serial_port *port)
150{ 147{
151 int i; 148 struct belkin_sa_private *priv;
152 149
153 for (i = 0; i < serial->num_ports; ++i) 150 priv = usb_get_serial_port_data(port);
154 kfree(usb_get_serial_port_data(serial->port[i])); 151 kfree(priv);
152
153 return 0;
155} 154}
156 155
157static int belkin_sa_open(struct tty_struct *tty, 156static int belkin_sa_open(struct tty_struct *tty,
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index e9c7046ae355..d255f66e708e 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -242,13 +242,11 @@ out: kfree(buffer);
242 return r; 242 return r;
243} 243}
244 244
245/* allocate private data */ 245static int ch341_port_probe(struct usb_serial_port *port)
246static int ch341_attach(struct usb_serial *serial)
247{ 246{
248 struct ch341_private *priv; 247 struct ch341_private *priv;
249 int r; 248 int r;
250 249
251 /* private data */
252 priv = kzalloc(sizeof(struct ch341_private), GFP_KERNEL); 250 priv = kzalloc(sizeof(struct ch341_private), GFP_KERNEL);
253 if (!priv) 251 if (!priv)
254 return -ENOMEM; 252 return -ENOMEM;
@@ -258,17 +256,27 @@ static int ch341_attach(struct usb_serial *serial)
258 priv->baud_rate = DEFAULT_BAUD_RATE; 256 priv->baud_rate = DEFAULT_BAUD_RATE;
259 priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR; 257 priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
260 258
261 r = ch341_configure(serial->dev, priv); 259 r = ch341_configure(port->serial->dev, priv);
262 if (r < 0) 260 if (r < 0)
263 goto error; 261 goto error;
264 262
265 usb_set_serial_port_data(serial->port[0], priv); 263 usb_set_serial_port_data(port, priv);
266 return 0; 264 return 0;
267 265
268error: kfree(priv); 266error: kfree(priv);
269 return r; 267 return r;
270} 268}
271 269
270static int ch341_port_remove(struct usb_serial_port *port)
271{
272 struct ch341_private *priv;
273
274 priv = usb_get_serial_port_data(port);
275 kfree(priv);
276
277 return 0;
278}
279
272static int ch341_carrier_raised(struct usb_serial_port *port) 280static int ch341_carrier_raised(struct usb_serial_port *port)
273{ 281{
274 struct ch341_private *priv = usb_get_serial_port_data(port); 282 struct ch341_private *priv = usb_get_serial_port_data(port);
@@ -304,7 +312,7 @@ static void ch341_close(struct usb_serial_port *port)
304static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port) 312static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
305{ 313{
306 struct usb_serial *serial = port->serial; 314 struct usb_serial *serial = port->serial;
307 struct ch341_private *priv = usb_get_serial_port_data(serial->port[0]); 315 struct ch341_private *priv = usb_get_serial_port_data(port);
308 int r; 316 int r;
309 317
310 priv->baud_rate = DEFAULT_BAUD_RATE; 318 priv->baud_rate = DEFAULT_BAUD_RATE;
@@ -608,7 +616,8 @@ static struct usb_serial_driver ch341_device = {
608 .tiocmget = ch341_tiocmget, 616 .tiocmget = ch341_tiocmget,
609 .tiocmset = ch341_tiocmset, 617 .tiocmset = ch341_tiocmset,
610 .read_int_callback = ch341_read_int_callback, 618 .read_int_callback = ch341_read_int_callback,
611 .attach = ch341_attach, 619 .port_probe = ch341_port_probe,
620 .port_remove = ch341_port_remove,
612 .reset_resume = ch341_reset_resume, 621 .reset_resume = ch341_reset_resume,
613}; 622};
614 623
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 28af5acc3360..eb033fc92a15 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -162,7 +162,7 @@ static const struct usb_device_id id_table[] = {
162 162
163MODULE_DEVICE_TABLE(usb, id_table); 163MODULE_DEVICE_TABLE(usb, id_table);
164 164
165struct cp210x_port_private { 165struct cp210x_serial_private {
166 __u8 bInterfaceNumber; 166 __u8 bInterfaceNumber;
167}; 167};
168 168
@@ -276,7 +276,7 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
276 unsigned int *data, int size) 276 unsigned int *data, int size)
277{ 277{
278 struct usb_serial *serial = port->serial; 278 struct usb_serial *serial = port->serial;
279 struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); 279 struct cp210x_serial_private *spriv = usb_get_serial_data(serial);
280 __le32 *buf; 280 __le32 *buf;
281 int result, i, length; 281 int result, i, length;
282 282
@@ -292,7 +292,7 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
292 /* Issue the request, attempting to read 'size' bytes */ 292 /* Issue the request, attempting to read 'size' bytes */
293 result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 293 result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
294 request, REQTYPE_INTERFACE_TO_HOST, 0x0000, 294 request, REQTYPE_INTERFACE_TO_HOST, 0x0000,
295 port_priv->bInterfaceNumber, buf, size, 295 spriv->bInterfaceNumber, buf, size,
296 USB_CTRL_GET_TIMEOUT); 296 USB_CTRL_GET_TIMEOUT);
297 297
298 /* Convert data into an array of integers */ 298 /* Convert data into an array of integers */
@@ -323,7 +323,7 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
323 unsigned int *data, int size) 323 unsigned int *data, int size)
324{ 324{
325 struct usb_serial *serial = port->serial; 325 struct usb_serial *serial = port->serial;
326 struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); 326 struct cp210x_serial_private *spriv = usb_get_serial_data(serial);
327 __le32 *buf; 327 __le32 *buf;
328 int result, i, length; 328 int result, i, length;
329 329
@@ -345,13 +345,13 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
345 result = usb_control_msg(serial->dev, 345 result = usb_control_msg(serial->dev,
346 usb_sndctrlpipe(serial->dev, 0), 346 usb_sndctrlpipe(serial->dev, 0),
347 request, REQTYPE_HOST_TO_INTERFACE, 0x0000, 347 request, REQTYPE_HOST_TO_INTERFACE, 0x0000,
348 port_priv->bInterfaceNumber, buf, size, 348 spriv->bInterfaceNumber, buf, size,
349 USB_CTRL_SET_TIMEOUT); 349 USB_CTRL_SET_TIMEOUT);
350 } else { 350 } else {
351 result = usb_control_msg(serial->dev, 351 result = usb_control_msg(serial->dev,
352 usb_sndctrlpipe(serial->dev, 0), 352 usb_sndctrlpipe(serial->dev, 0),
353 request, REQTYPE_HOST_TO_INTERFACE, data[0], 353 request, REQTYPE_HOST_TO_INTERFACE, data[0],
354 port_priv->bInterfaceNumber, NULL, 0, 354 spriv->bInterfaceNumber, NULL, 0,
355 USB_CTRL_SET_TIMEOUT); 355 USB_CTRL_SET_TIMEOUT);
356 } 356 }
357 357
@@ -845,36 +845,30 @@ static void cp210x_break_ctl (struct tty_struct *tty, int break_state)
845 845
846static int cp210x_startup(struct usb_serial *serial) 846static int cp210x_startup(struct usb_serial *serial)
847{ 847{
848 struct cp210x_port_private *port_priv; 848 struct usb_host_interface *cur_altsetting;
849 int i; 849 struct cp210x_serial_private *spriv;
850 850
851 /* cp210x buffers behave strangely unless device is reset */ 851 /* cp210x buffers behave strangely unless device is reset */
852 usb_reset_device(serial->dev); 852 usb_reset_device(serial->dev);
853 853
854 for (i = 0; i < serial->num_ports; i++) { 854 spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
855 port_priv = kzalloc(sizeof(*port_priv), GFP_KERNEL); 855 if (!spriv)
856 if (!port_priv) 856 return -ENOMEM;
857 return -ENOMEM;
858 857
859 port_priv->bInterfaceNumber = 858 cur_altsetting = serial->interface->cur_altsetting;
860 serial->interface->cur_altsetting->desc.bInterfaceNumber; 859 spriv->bInterfaceNumber = cur_altsetting->desc.bInterfaceNumber;
861 860
862 usb_set_serial_port_data(serial->port[i], port_priv); 861 usb_set_serial_data(serial, spriv);
863 }
864 862
865 return 0; 863 return 0;
866} 864}
867 865
868static void cp210x_release(struct usb_serial *serial) 866static void cp210x_release(struct usb_serial *serial)
869{ 867{
870 struct cp210x_port_private *port_priv; 868 struct cp210x_serial_private *spriv;
871 int i;
872 869
873 for (i = 0; i < serial->num_ports; i++) { 870 spriv = usb_get_serial_data(serial);
874 port_priv = usb_get_serial_port_data(serial->port[i]); 871 kfree(spriv);
875 kfree(port_priv);
876 usb_set_serial_port_data(serial->port[i], NULL);
877 }
878} 872}
879 873
880module_usb_serial_driver(serial_drivers, id_table); 874module_usb_serial_driver(serial_drivers, id_table);
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 2a7aecc72237..4ee77dcbe690 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -55,9 +55,9 @@
55#define CYBERJACK_PRODUCT_ID 0x0100 55#define CYBERJACK_PRODUCT_ID 0x0100
56 56
57/* Function prototypes */ 57/* Function prototypes */
58static int cyberjack_startup(struct usb_serial *serial);
59static void cyberjack_disconnect(struct usb_serial *serial); 58static void cyberjack_disconnect(struct usb_serial *serial);
60static void cyberjack_release(struct usb_serial *serial); 59static int cyberjack_port_probe(struct usb_serial_port *port);
60static int cyberjack_port_remove(struct usb_serial_port *port);
61static int cyberjack_open(struct tty_struct *tty, 61static int cyberjack_open(struct tty_struct *tty,
62 struct usb_serial_port *port); 62 struct usb_serial_port *port);
63static void cyberjack_close(struct usb_serial_port *port); 63static void cyberjack_close(struct usb_serial_port *port);
@@ -83,9 +83,9 @@ static struct usb_serial_driver cyberjack_device = {
83 .description = "Reiner SCT Cyberjack USB card reader", 83 .description = "Reiner SCT Cyberjack USB card reader",
84 .id_table = id_table, 84 .id_table = id_table,
85 .num_ports = 1, 85 .num_ports = 1,
86 .attach = cyberjack_startup,
87 .disconnect = cyberjack_disconnect, 86 .disconnect = cyberjack_disconnect,
88 .release = cyberjack_release, 87 .port_probe = cyberjack_port_probe,
88 .port_remove = cyberjack_port_remove,
89 .open = cyberjack_open, 89 .open = cyberjack_open,
90 .close = cyberjack_close, 90 .close = cyberjack_close,
91 .write = cyberjack_write, 91 .write = cyberjack_write,
@@ -107,56 +107,45 @@ struct cyberjack_private {
107 short wrsent; /* Data already sent */ 107 short wrsent; /* Data already sent */
108}; 108};
109 109
110/* do some startup allocations not currently performed by usb_serial_probe() */ 110static int cyberjack_port_probe(struct usb_serial_port *port)
111static int cyberjack_startup(struct usb_serial *serial)
112{ 111{
113 struct cyberjack_private *priv; 112 struct cyberjack_private *priv;
114 int i; 113 int result;
115 114
116 /* allocate the private data structure */
117 priv = kmalloc(sizeof(struct cyberjack_private), GFP_KERNEL); 115 priv = kmalloc(sizeof(struct cyberjack_private), GFP_KERNEL);
118 if (!priv) 116 if (!priv)
119 return -ENOMEM; 117 return -ENOMEM;
120 118
121 /* set initial values */
122 spin_lock_init(&priv->lock); 119 spin_lock_init(&priv->lock);
123 priv->rdtodo = 0; 120 priv->rdtodo = 0;
124 priv->wrfilled = 0; 121 priv->wrfilled = 0;
125 priv->wrsent = 0; 122 priv->wrsent = 0;
126 usb_set_serial_port_data(serial->port[0], priv);
127 123
128 init_waitqueue_head(&serial->port[0]->write_wait); 124 usb_set_serial_port_data(port, priv);
129 125
130 for (i = 0; i < serial->num_ports; ++i) { 126 result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
131 int result; 127 if (result)
132 result = usb_submit_urb(serial->port[i]->interrupt_in_urb, 128 dev_err(&port->dev, "usb_submit_urb(read int) failed\n");
133 GFP_KERNEL);
134 if (result)
135 dev_err(&serial->dev->dev,
136 "usb_submit_urb(read int) failed\n");
137 dev_dbg(&serial->dev->dev, "%s - usb_submit_urb(int urb)\n",
138 __func__);
139 }
140 129
141 return 0; 130 return 0;
142} 131}
143 132
144static void cyberjack_disconnect(struct usb_serial *serial) 133static int cyberjack_port_remove(struct usb_serial_port *port)
145{ 134{
146 int i; 135 struct cyberjack_private *priv;
147 136
148 for (i = 0; i < serial->num_ports; ++i) 137 priv = usb_get_serial_port_data(port);
149 usb_kill_urb(serial->port[i]->interrupt_in_urb); 138 kfree(priv);
139
140 return 0;
150} 141}
151 142
152static void cyberjack_release(struct usb_serial *serial) 143static void cyberjack_disconnect(struct usb_serial *serial)
153{ 144{
154 int i; 145 int i;
155 146
156 for (i = 0; i < serial->num_ports; ++i) { 147 for (i = 0; i < serial->num_ports; ++i)
157 /* My special items, the standard routines free my urbs */ 148 usb_kill_urb(serial->port[i]->interrupt_in_urb);
158 kfree(usb_get_serial_port_data(serial->port[i]));
159 }
160} 149}
161 150
162static int cyberjack_open(struct tty_struct *tty, 151static int cyberjack_open(struct tty_struct *tty,
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 1befce21e173..f0da1279c114 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -123,10 +123,10 @@ struct cypress_private {
123}; 123};
124 124
125/* function prototypes for the Cypress USB to serial device */ 125/* function prototypes for the Cypress USB to serial device */
126static int cypress_earthmate_startup(struct usb_serial *serial); 126static int cypress_earthmate_port_probe(struct usb_serial_port *port);
127static int cypress_hidcom_startup(struct usb_serial *serial); 127static int cypress_hidcom_port_probe(struct usb_serial_port *port);
128static int cypress_ca42v2_startup(struct usb_serial *serial); 128static int cypress_ca42v2_port_probe(struct usb_serial_port *port);
129static void cypress_release(struct usb_serial *serial); 129static int cypress_port_remove(struct usb_serial_port *port);
130static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port); 130static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port);
131static void cypress_close(struct usb_serial_port *port); 131static void cypress_close(struct usb_serial_port *port);
132static void cypress_dtr_rts(struct usb_serial_port *port, int on); 132static void cypress_dtr_rts(struct usb_serial_port *port, int on);
@@ -156,8 +156,8 @@ static struct usb_serial_driver cypress_earthmate_device = {
156 .description = "DeLorme Earthmate USB", 156 .description = "DeLorme Earthmate USB",
157 .id_table = id_table_earthmate, 157 .id_table = id_table_earthmate,
158 .num_ports = 1, 158 .num_ports = 1,
159 .attach = cypress_earthmate_startup, 159 .port_probe = cypress_earthmate_port_probe,
160 .release = cypress_release, 160 .port_remove = cypress_port_remove,
161 .open = cypress_open, 161 .open = cypress_open,
162 .close = cypress_close, 162 .close = cypress_close,
163 .dtr_rts = cypress_dtr_rts, 163 .dtr_rts = cypress_dtr_rts,
@@ -182,8 +182,8 @@ static struct usb_serial_driver cypress_hidcom_device = {
182 .description = "HID->COM RS232 Adapter", 182 .description = "HID->COM RS232 Adapter",
183 .id_table = id_table_cyphidcomrs232, 183 .id_table = id_table_cyphidcomrs232,
184 .num_ports = 1, 184 .num_ports = 1,
185 .attach = cypress_hidcom_startup, 185 .port_probe = cypress_hidcom_port_probe,
186 .release = cypress_release, 186 .port_remove = cypress_port_remove,
187 .open = cypress_open, 187 .open = cypress_open,
188 .close = cypress_close, 188 .close = cypress_close,
189 .dtr_rts = cypress_dtr_rts, 189 .dtr_rts = cypress_dtr_rts,
@@ -208,8 +208,8 @@ static struct usb_serial_driver cypress_ca42v2_device = {
208 .description = "Nokia CA-42 V2 Adapter", 208 .description = "Nokia CA-42 V2 Adapter",
209 .id_table = id_table_nokiaca42v2, 209 .id_table = id_table_nokiaca42v2,
210 .num_ports = 1, 210 .num_ports = 1,
211 .attach = cypress_ca42v2_startup, 211 .port_probe = cypress_ca42v2_port_probe,
212 .release = cypress_release, 212 .port_remove = cypress_port_remove,
213 .open = cypress_open, 213 .open = cypress_open,
214 .close = cypress_close, 214 .close = cypress_close,
215 .dtr_rts = cypress_dtr_rts, 215 .dtr_rts = cypress_dtr_rts,
@@ -438,10 +438,10 @@ static void cypress_set_dead(struct usb_serial_port *port)
438 *****************************************************************************/ 438 *****************************************************************************/
439 439
440 440
441static int generic_startup(struct usb_serial *serial) 441static int cypress_generic_port_probe(struct usb_serial_port *port)
442{ 442{
443 struct usb_serial *serial = port->serial;
443 struct cypress_private *priv; 444 struct cypress_private *priv;
444 struct usb_serial_port *port = serial->port[0];
445 445
446 priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL); 446 priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL);
447 if (!priv) 447 if (!priv)
@@ -490,14 +490,16 @@ static int generic_startup(struct usb_serial *serial)
490} 490}
491 491
492 492
493static int cypress_earthmate_startup(struct usb_serial *serial) 493static int cypress_earthmate_port_probe(struct usb_serial_port *port)
494{ 494{
495 struct usb_serial *serial = port->serial;
495 struct cypress_private *priv; 496 struct cypress_private *priv;
496 struct usb_serial_port *port = serial->port[0]; 497 int ret;
497 498
498 if (generic_startup(serial)) { 499 ret = cypress_generic_port_probe(port);
500 if (ret) {
499 dev_dbg(&port->dev, "%s - Failed setting up port\n", __func__); 501 dev_dbg(&port->dev, "%s - Failed setting up port\n", __func__);
500 return 1; 502 return ret;
501 } 503 }
502 504
503 priv = usb_get_serial_port_data(port); 505 priv = usb_get_serial_port_data(port);
@@ -518,56 +520,53 @@ static int cypress_earthmate_startup(struct usb_serial *serial)
518 } 520 }
519 521
520 return 0; 522 return 0;
521} /* cypress_earthmate_startup */ 523}
522
523 524
524static int cypress_hidcom_startup(struct usb_serial *serial) 525static int cypress_hidcom_port_probe(struct usb_serial_port *port)
525{ 526{
526 struct cypress_private *priv; 527 struct cypress_private *priv;
527 struct usb_serial_port *port = serial->port[0]; 528 int ret;
528 529
529 if (generic_startup(serial)) { 530 ret = cypress_generic_port_probe(port);
531 if (ret) {
530 dev_dbg(&port->dev, "%s - Failed setting up port\n", __func__); 532 dev_dbg(&port->dev, "%s - Failed setting up port\n", __func__);
531 return 1; 533 return ret;
532 } 534 }
533 535
534 priv = usb_get_serial_port_data(port); 536 priv = usb_get_serial_port_data(port);
535 priv->chiptype = CT_CYPHIDCOM; 537 priv->chiptype = CT_CYPHIDCOM;
536 538
537 return 0; 539 return 0;
538} /* cypress_hidcom_startup */ 540}
539
540 541
541static int cypress_ca42v2_startup(struct usb_serial *serial) 542static int cypress_ca42v2_port_probe(struct usb_serial_port *port)
542{ 543{
543 struct cypress_private *priv; 544 struct cypress_private *priv;
544 struct usb_serial_port *port = serial->port[0]; 545 int ret;
545 546
546 if (generic_startup(serial)) { 547 ret = cypress_generic_port_probe(port);
548 if (ret) {
547 dev_dbg(&port->dev, "%s - Failed setting up port\n", __func__); 549 dev_dbg(&port->dev, "%s - Failed setting up port\n", __func__);
548 return 1; 550 return ret;
549 } 551 }
550 552
551 priv = usb_get_serial_port_data(port); 553 priv = usb_get_serial_port_data(port);
552 priv->chiptype = CT_CA42V2; 554 priv->chiptype = CT_CA42V2;
553 555
554 return 0; 556 return 0;
555} /* cypress_ca42v2_startup */ 557}
556
557 558
558static void cypress_release(struct usb_serial *serial) 559static int cypress_port_remove(struct usb_serial_port *port)
559{ 560{
560 struct cypress_private *priv; 561 struct cypress_private *priv;
561 562
562 /* all open ports are closed at this point */ 563 priv = usb_get_serial_port_data(port);
563 priv = usb_get_serial_port_data(serial->port[0]);
564 564
565 if (priv) { 565 kfifo_free(&priv->write_fifo);
566 kfifo_free(&priv->write_fifo); 566 kfree(priv);
567 kfree(priv);
568 }
569}
570 567
568 return 0;
569}
571 570
572static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port) 571static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port)
573{ 572{
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index c86f68c6b078..b50fa1c6d885 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -244,6 +244,8 @@ static int digi_startup_device(struct usb_serial *serial);
244static int digi_startup(struct usb_serial *serial); 244static int digi_startup(struct usb_serial *serial);
245static void digi_disconnect(struct usb_serial *serial); 245static void digi_disconnect(struct usb_serial *serial);
246static void digi_release(struct usb_serial *serial); 246static void digi_release(struct usb_serial *serial);
247static int digi_port_probe(struct usb_serial_port *port);
248static int digi_port_remove(struct usb_serial_port *port);
247static void digi_read_bulk_callback(struct urb *urb); 249static void digi_read_bulk_callback(struct urb *urb);
248static int digi_read_inb_callback(struct urb *urb); 250static int digi_read_inb_callback(struct urb *urb);
249static int digi_read_oob_callback(struct urb *urb); 251static int digi_read_oob_callback(struct urb *urb);
@@ -294,6 +296,8 @@ static struct usb_serial_driver digi_acceleport_2_device = {
294 .attach = digi_startup, 296 .attach = digi_startup,
295 .disconnect = digi_disconnect, 297 .disconnect = digi_disconnect,
296 .release = digi_release, 298 .release = digi_release,
299 .port_probe = digi_port_probe,
300 .port_remove = digi_port_remove,
297}; 301};
298 302
299static struct usb_serial_driver digi_acceleport_4_device = { 303static struct usb_serial_driver digi_acceleport_4_device = {
@@ -320,6 +324,8 @@ static struct usb_serial_driver digi_acceleport_4_device = {
320 .attach = digi_startup, 324 .attach = digi_startup,
321 .disconnect = digi_disconnect, 325 .disconnect = digi_disconnect,
322 .release = digi_release, 326 .release = digi_release,
327 .port_probe = digi_port_probe,
328 .port_remove = digi_port_remove,
323}; 329};
324 330
325static struct usb_serial_driver * const serial_drivers[] = { 331static struct usb_serial_driver * const serial_drivers[] = {
@@ -1240,59 +1246,50 @@ static int digi_startup_device(struct usb_serial *serial)
1240 return ret; 1246 return ret;
1241} 1247}
1242 1248
1243 1249static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
1244static int digi_startup(struct usb_serial *serial)
1245{ 1250{
1246
1247 int i;
1248 struct digi_port *priv; 1251 struct digi_port *priv;
1249 struct digi_serial *serial_priv;
1250 1252
1251 /* allocate the private data structures for all ports */ 1253 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1252 /* number of regular ports + 1 for the out-of-band port */ 1254 if (!priv)
1253 for (i = 0; i < serial->type->num_ports + 1; i++) { 1255 return -ENOMEM;
1254 /* allocate port private structure */
1255 priv = kmalloc(sizeof(struct digi_port), GFP_KERNEL);
1256 if (priv == NULL) {
1257 while (--i >= 0)
1258 kfree(usb_get_serial_port_data(serial->port[i]));
1259 return 1; /* error */
1260 }
1261 1256
1262 /* initialize port private structure */ 1257 spin_lock_init(&priv->dp_port_lock);
1263 spin_lock_init(&priv->dp_port_lock); 1258 priv->dp_port_num = port_num;
1264 priv->dp_port_num = i; 1259 init_waitqueue_head(&priv->dp_modem_change_wait);
1265 priv->dp_out_buf_len = 0; 1260 init_waitqueue_head(&priv->dp_transmit_idle_wait);
1266 priv->dp_write_urb_in_use = 0; 1261 init_waitqueue_head(&priv->dp_flush_wait);
1267 priv->dp_modem_signals = 0; 1262 init_waitqueue_head(&priv->dp_close_wait);
1268 init_waitqueue_head(&priv->dp_modem_change_wait); 1263 INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
1269 priv->dp_transmit_idle = 0; 1264 priv->dp_port = port;
1270 init_waitqueue_head(&priv->dp_transmit_idle_wait);
1271 priv->dp_throttled = 0;
1272 priv->dp_throttle_restart = 0;
1273 init_waitqueue_head(&priv->dp_flush_wait);
1274 init_waitqueue_head(&priv->dp_close_wait);
1275 INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
1276 priv->dp_port = serial->port[i];
1277 /* initialize write wait queue for this port */
1278 init_waitqueue_head(&serial->port[i]->write_wait);
1279
1280 usb_set_serial_port_data(serial->port[i], priv);
1281 }
1282 1265
1283 /* allocate serial private structure */ 1266 init_waitqueue_head(&port->write_wait);
1284 serial_priv = kmalloc(sizeof(struct digi_serial), GFP_KERNEL); 1267
1285 if (serial_priv == NULL) { 1268 usb_set_serial_port_data(port, priv);
1286 for (i = 0; i < serial->type->num_ports + 1; i++) 1269
1287 kfree(usb_get_serial_port_data(serial->port[i])); 1270 return 0;
1288 return 1; /* error */ 1271}
1289 } 1272
1273static int digi_startup(struct usb_serial *serial)
1274{
1275 struct digi_serial *serial_priv;
1276 int ret;
1277
1278 serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
1279 if (!serial_priv)
1280 return -ENOMEM;
1290 1281
1291 /* initialize serial private structure */
1292 spin_lock_init(&serial_priv->ds_serial_lock); 1282 spin_lock_init(&serial_priv->ds_serial_lock);
1293 serial_priv->ds_oob_port_num = serial->type->num_ports; 1283 serial_priv->ds_oob_port_num = serial->type->num_ports;
1294 serial_priv->ds_oob_port = serial->port[serial_priv->ds_oob_port_num]; 1284 serial_priv->ds_oob_port = serial->port[serial_priv->ds_oob_port_num];
1295 serial_priv->ds_device_started = 0; 1285
1286 ret = digi_port_init(serial_priv->ds_oob_port,
1287 serial_priv->ds_oob_port_num);
1288 if (ret) {
1289 kfree(serial_priv);
1290 return ret;
1291 }
1292
1296 usb_set_serial_data(serial, serial_priv); 1293 usb_set_serial_data(serial, serial_priv);
1297 1294
1298 return 0; 1295 return 0;
@@ -1313,15 +1310,35 @@ static void digi_disconnect(struct usb_serial *serial)
1313 1310
1314static void digi_release(struct usb_serial *serial) 1311static void digi_release(struct usb_serial *serial)
1315{ 1312{
1316 int i; 1313 struct digi_serial *serial_priv;
1314 struct digi_port *priv;
1315
1316 serial_priv = usb_get_serial_data(serial);
1317
1318 priv = usb_get_serial_port_data(serial_priv->ds_oob_port);
1319 kfree(priv);
1317 1320
1318 /* free the private data structures for all ports */ 1321 kfree(serial_priv);
1319 /* number of regular ports + 1 for the out-of-band port */
1320 for (i = 0; i < serial->type->num_ports + 1; i++)
1321 kfree(usb_get_serial_port_data(serial->port[i]));
1322 kfree(usb_get_serial_data(serial));
1323} 1322}
1324 1323
1324static int digi_port_probe(struct usb_serial_port *port)
1325{
1326 unsigned port_num;
1327
1328 port_num = port->number - port->serial->minor;
1329
1330 return digi_port_init(port, port_num);
1331}
1332
1333static int digi_port_remove(struct usb_serial_port *port)
1334{
1335 struct digi_port *priv;
1336
1337 priv = usb_get_serial_port_data(port);
1338 kfree(priv);
1339
1340 return 0;
1341}
1325 1342
1326static void digi_read_bulk_callback(struct urb *urb) 1343static void digi_read_bulk_callback(struct urb *urb)
1327{ 1344{
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 244477107e2f..6e4eb57d0177 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -318,39 +318,30 @@ static int f81232_ioctl(struct tty_struct *tty,
318 return -ENOIOCTLCMD; 318 return -ENOIOCTLCMD;
319} 319}
320 320
321static int f81232_startup(struct usb_serial *serial) 321static int f81232_port_probe(struct usb_serial_port *port)
322{ 322{
323 struct f81232_private *priv; 323 struct f81232_private *priv;
324 int i;
325 324
326 for (i = 0; i < serial->num_ports; ++i) { 325 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
327 priv = kzalloc(sizeof(struct f81232_private), GFP_KERNEL); 326 if (!priv)
328 if (!priv) 327 return -ENOMEM;
329 goto cleanup;
330 spin_lock_init(&priv->lock);
331 init_waitqueue_head(&priv->delta_msr_wait);
332 usb_set_serial_port_data(serial->port[i], priv);
333 }
334 return 0;
335 328
336cleanup: 329 spin_lock_init(&priv->lock);
337 for (--i; i >= 0; --i) { 330 init_waitqueue_head(&priv->delta_msr_wait);
338 priv = usb_get_serial_port_data(serial->port[i]); 331
339 kfree(priv); 332 usb_set_serial_port_data(port, priv);
340 usb_set_serial_port_data(serial->port[i], NULL); 333
341 } 334 return 0;
342 return -ENOMEM;
343} 335}
344 336
345static void f81232_release(struct usb_serial *serial) 337static int f81232_port_remove(struct usb_serial_port *port)
346{ 338{
347 int i;
348 struct f81232_private *priv; 339 struct f81232_private *priv;
349 340
350 for (i = 0; i < serial->num_ports; ++i) { 341 priv = usb_get_serial_port_data(port);
351 priv = usb_get_serial_port_data(serial->port[i]); 342 kfree(priv);
352 kfree(priv); 343
353 } 344 return 0;
354} 345}
355 346
356static struct usb_serial_driver f81232_device = { 347static struct usb_serial_driver f81232_device = {
@@ -373,8 +364,8 @@ static struct usb_serial_driver f81232_device = {
373 .tiocmset = f81232_tiocmset, 364 .tiocmset = f81232_tiocmset,
374 .process_read_urb = f81232_process_read_urb, 365 .process_read_urb = f81232_process_read_urb,
375 .read_int_callback = f81232_read_int_callback, 366 .read_int_callback = f81232_read_int_callback,
376 .attach = f81232_startup, 367 .port_probe = f81232_port_probe,
377 .release = f81232_release, 368 .port_remove = f81232_port_remove,
378}; 369};
379 370
380static struct usb_serial_driver * const serial_drivers[] = { 371static struct usb_serial_driver * const serial_drivers[] = {
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 3ee92648c02d..203358d7e7bc 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1405,11 +1405,10 @@ static void timeout_handler(unsigned long data)
1405 1405
1406 1406
1407 1407
1408static int garmin_attach(struct usb_serial *serial) 1408static int garmin_port_probe(struct usb_serial_port *port)
1409{ 1409{
1410 int status = 0; 1410 int status;
1411 struct usb_serial_port *port = serial->port[0]; 1411 struct garmin_data *garmin_data_p;
1412 struct garmin_data *garmin_data_p = NULL;
1413 1412
1414 garmin_data_p = kzalloc(sizeof(struct garmin_data), GFP_KERNEL); 1413 garmin_data_p = kzalloc(sizeof(struct garmin_data), GFP_KERNEL);
1415 if (garmin_data_p == NULL) { 1414 if (garmin_data_p == NULL) {
@@ -1434,22 +1433,14 @@ static int garmin_attach(struct usb_serial *serial)
1434} 1433}
1435 1434
1436 1435
1437static void garmin_disconnect(struct usb_serial *serial) 1436static int garmin_port_remove(struct usb_serial_port *port)
1438{ 1437{
1439 struct usb_serial_port *port = serial->port[0];
1440 struct garmin_data *garmin_data_p = usb_get_serial_port_data(port); 1438 struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
1441 1439
1442 usb_kill_urb(port->interrupt_in_urb); 1440 usb_kill_urb(port->interrupt_in_urb);
1443 del_timer_sync(&garmin_data_p->timer); 1441 del_timer_sync(&garmin_data_p->timer);
1444}
1445
1446
1447static void garmin_release(struct usb_serial *serial)
1448{
1449 struct usb_serial_port *port = serial->port[0];
1450 struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
1451
1452 kfree(garmin_data_p); 1442 kfree(garmin_data_p);
1443 return 0;
1453} 1444}
1454 1445
1455 1446
@@ -1466,9 +1457,8 @@ static struct usb_serial_driver garmin_device = {
1466 .close = garmin_close, 1457 .close = garmin_close,
1467 .throttle = garmin_throttle, 1458 .throttle = garmin_throttle,
1468 .unthrottle = garmin_unthrottle, 1459 .unthrottle = garmin_unthrottle,
1469 .attach = garmin_attach, 1460 .port_probe = garmin_port_probe,
1470 .disconnect = garmin_disconnect, 1461 .port_remove = garmin_port_remove,
1471 .release = garmin_release,
1472 .write = garmin_write, 1462 .write = garmin_write,
1473 .write_room = garmin_write_room, 1463 .write_room = garmin_write_room,
1474 .write_bulk_callback = garmin_write_bulk_callback, 1464 .write_bulk_callback = garmin_write_bulk_callback,
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 8e6faaf3580c..5acc0d13864a 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -225,6 +225,8 @@ static int edge_get_icount(struct tty_struct *tty,
225static int edge_startup(struct usb_serial *serial); 225static int edge_startup(struct usb_serial *serial);
226static void edge_disconnect(struct usb_serial *serial); 226static void edge_disconnect(struct usb_serial *serial);
227static void edge_release(struct usb_serial *serial); 227static void edge_release(struct usb_serial *serial);
228static int edge_port_probe(struct usb_serial_port *port);
229static int edge_port_remove(struct usb_serial_port *port);
228 230
229#include "io_tables.h" /* all of the devices that this driver supports */ 231#include "io_tables.h" /* all of the devices that this driver supports */
230 232
@@ -2875,10 +2877,9 @@ static void load_application_firmware(struct edgeport_serial *edge_serial)
2875static int edge_startup(struct usb_serial *serial) 2877static int edge_startup(struct usb_serial *serial)
2876{ 2878{
2877 struct edgeport_serial *edge_serial; 2879 struct edgeport_serial *edge_serial;
2878 struct edgeport_port *edge_port;
2879 struct usb_device *dev; 2880 struct usb_device *dev;
2880 struct device *ddev = &serial->dev->dev; 2881 struct device *ddev = &serial->dev->dev;
2881 int i, j; 2882 int i;
2882 int response; 2883 int response;
2883 bool interrupt_in_found; 2884 bool interrupt_in_found;
2884 bool bulk_in_found; 2885 bool bulk_in_found;
@@ -2961,25 +2962,6 @@ static int edge_startup(struct usb_serial *serial)
2961 /* we set up the pointers to the endpoints in the edge_open function, 2962 /* we set up the pointers to the endpoints in the edge_open function,
2962 * as the structures aren't created yet. */ 2963 * as the structures aren't created yet. */
2963 2964
2964 /* set up our port private structures */
2965 for (i = 0; i < serial->num_ports; ++i) {
2966 edge_port = kzalloc(sizeof(struct edgeport_port), GFP_KERNEL);
2967 if (edge_port == NULL) {
2968 dev_err(ddev, "%s - Out of memory\n", __func__);
2969 for (j = 0; j < i; ++j) {
2970 kfree(usb_get_serial_port_data(serial->port[j]));
2971 usb_set_serial_port_data(serial->port[j],
2972 NULL);
2973 }
2974 usb_set_serial_data(serial, NULL);
2975 kfree(edge_serial);
2976 return -ENOMEM;
2977 }
2978 spin_lock_init(&edge_port->ep_lock);
2979 edge_port->port = serial->port[i];
2980 usb_set_serial_port_data(serial->port[i], edge_port);
2981 }
2982
2983 response = 0; 2965 response = 0;
2984 2966
2985 if (edge_serial->is_epic) { 2967 if (edge_serial->is_epic) {
@@ -3120,14 +3102,36 @@ static void edge_disconnect(struct usb_serial *serial)
3120static void edge_release(struct usb_serial *serial) 3102static void edge_release(struct usb_serial *serial)
3121{ 3103{
3122 struct edgeport_serial *edge_serial = usb_get_serial_data(serial); 3104 struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
3123 int i;
3124
3125 for (i = 0; i < serial->num_ports; ++i)
3126 kfree(usb_get_serial_port_data(serial->port[i]));
3127 3105
3128 kfree(edge_serial); 3106 kfree(edge_serial);
3129} 3107}
3130 3108
3109static int edge_port_probe(struct usb_serial_port *port)
3110{
3111 struct edgeport_port *edge_port;
3112
3113 edge_port = kzalloc(sizeof(*edge_port), GFP_KERNEL);
3114 if (!edge_port)
3115 return -ENOMEM;
3116
3117 spin_lock_init(&edge_port->ep_lock);
3118 edge_port->port = port;
3119
3120 usb_set_serial_port_data(port, edge_port);
3121
3122 return 0;
3123}
3124
3125static int edge_port_remove(struct usb_serial_port *port)
3126{
3127 struct edgeport_port *edge_port;
3128
3129 edge_port = usb_get_serial_port_data(port);
3130 kfree(edge_port);
3131
3132 return 0;
3133}
3134
3131module_usb_serial_driver(serial_drivers, id_table_combined); 3135module_usb_serial_driver(serial_drivers, id_table_combined);
3132 3136
3133MODULE_AUTHOR(DRIVER_AUTHOR); 3137MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/usb/serial/io_tables.h b/drivers/usb/serial/io_tables.h
index 350afddb55ba..1511dd0ad324 100644
--- a/drivers/usb/serial/io_tables.h
+++ b/drivers/usb/serial/io_tables.h
@@ -110,6 +110,8 @@ static struct usb_serial_driver edgeport_2port_device = {
110 .attach = edge_startup, 110 .attach = edge_startup,
111 .disconnect = edge_disconnect, 111 .disconnect = edge_disconnect,
112 .release = edge_release, 112 .release = edge_release,
113 .port_probe = edge_port_probe,
114 .port_remove = edge_port_remove,
113 .ioctl = edge_ioctl, 115 .ioctl = edge_ioctl,
114 .set_termios = edge_set_termios, 116 .set_termios = edge_set_termios,
115 .tiocmget = edge_tiocmget, 117 .tiocmget = edge_tiocmget,
@@ -139,6 +141,8 @@ static struct usb_serial_driver edgeport_4port_device = {
139 .attach = edge_startup, 141 .attach = edge_startup,
140 .disconnect = edge_disconnect, 142 .disconnect = edge_disconnect,
141 .release = edge_release, 143 .release = edge_release,
144 .port_probe = edge_port_probe,
145 .port_remove = edge_port_remove,
142 .ioctl = edge_ioctl, 146 .ioctl = edge_ioctl,
143 .set_termios = edge_set_termios, 147 .set_termios = edge_set_termios,
144 .tiocmget = edge_tiocmget, 148 .tiocmget = edge_tiocmget,
@@ -168,6 +172,8 @@ static struct usb_serial_driver edgeport_8port_device = {
168 .attach = edge_startup, 172 .attach = edge_startup,
169 .disconnect = edge_disconnect, 173 .disconnect = edge_disconnect,
170 .release = edge_release, 174 .release = edge_release,
175 .port_probe = edge_port_probe,
176 .port_remove = edge_port_remove,
171 .ioctl = edge_ioctl, 177 .ioctl = edge_ioctl,
172 .set_termios = edge_set_termios, 178 .set_termios = edge_set_termios,
173 .tiocmget = edge_tiocmget, 179 .tiocmget = edge_tiocmget,
@@ -197,6 +203,8 @@ static struct usb_serial_driver epic_device = {
197 .attach = edge_startup, 203 .attach = edge_startup,
198 .disconnect = edge_disconnect, 204 .disconnect = edge_disconnect,
199 .release = edge_release, 205 .release = edge_release,
206 .port_probe = edge_port_probe,
207 .port_remove = edge_port_remove,
200 .ioctl = edge_ioctl, 208 .ioctl = edge_ioctl,
201 .set_termios = edge_set_termios, 209 .set_termios = edge_set_termios,
202 .tiocmget = edge_tiocmget, 210 .tiocmget = edge_tiocmget,
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index a2209cd45093..60023c2d2a31 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2532,12 +2532,7 @@ static void edge_break(struct tty_struct *tty, int break_state)
2532static int edge_startup(struct usb_serial *serial) 2532static int edge_startup(struct usb_serial *serial)
2533{ 2533{
2534 struct edgeport_serial *edge_serial; 2534 struct edgeport_serial *edge_serial;
2535 struct edgeport_port *edge_port;
2536 struct usb_device *dev;
2537 int status; 2535 int status;
2538 int i;
2539
2540 dev = serial->dev;
2541 2536
2542 /* create our private serial structure */ 2537 /* create our private serial structure */
2543 edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL); 2538 edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL);
@@ -2555,40 +2550,7 @@ static int edge_startup(struct usb_serial *serial)
2555 return status; 2550 return status;
2556 } 2551 }
2557 2552
2558 /* set up our port private structures */
2559 for (i = 0; i < serial->num_ports; ++i) {
2560 edge_port = kzalloc(sizeof(struct edgeport_port), GFP_KERNEL);
2561 if (edge_port == NULL) {
2562 dev_err(&serial->dev->dev, "%s - Out of memory\n",
2563 __func__);
2564 goto cleanup;
2565 }
2566 spin_lock_init(&edge_port->ep_lock);
2567 if (kfifo_alloc(&edge_port->write_fifo, EDGE_OUT_BUF_SIZE,
2568 GFP_KERNEL)) {
2569 dev_err(&serial->dev->dev, "%s - Out of memory\n",
2570 __func__);
2571 kfree(edge_port);
2572 goto cleanup;
2573 }
2574 edge_port->port = serial->port[i];
2575 edge_port->edge_serial = edge_serial;
2576 usb_set_serial_port_data(serial->port[i], edge_port);
2577 edge_port->bUartMode = default_uart_mode;
2578 }
2579
2580 return 0; 2553 return 0;
2581
2582cleanup:
2583 for (--i; i >= 0; --i) {
2584 edge_port = usb_get_serial_port_data(serial->port[i]);
2585 kfifo_free(&edge_port->write_fifo);
2586 kfree(edge_port);
2587 usb_set_serial_port_data(serial->port[i], NULL);
2588 }
2589 kfree(edge_serial);
2590 usb_set_serial_data(serial, NULL);
2591 return -ENOMEM;
2592} 2554}
2593 2555
2594static void edge_disconnect(struct usb_serial *serial) 2556static void edge_disconnect(struct usb_serial *serial)
@@ -2597,17 +2559,54 @@ static void edge_disconnect(struct usb_serial *serial)
2597 2559
2598static void edge_release(struct usb_serial *serial) 2560static void edge_release(struct usb_serial *serial)
2599{ 2561{
2600 int i; 2562 kfree(usb_get_serial_data(serial));
2563}
2564
2565static int edge_port_probe(struct usb_serial_port *port)
2566{
2601 struct edgeport_port *edge_port; 2567 struct edgeport_port *edge_port;
2568 int ret;
2602 2569
2603 for (i = 0; i < serial->num_ports; ++i) { 2570 edge_port = kzalloc(sizeof(*edge_port), GFP_KERNEL);
2604 edge_port = usb_get_serial_port_data(serial->port[i]); 2571 if (!edge_port)
2572 return -ENOMEM;
2573
2574 ret = kfifo_alloc(&edge_port->write_fifo, EDGE_OUT_BUF_SIZE,
2575 GFP_KERNEL);
2576 if (ret) {
2577 kfree(edge_port);
2578 return -ENOMEM;
2579 }
2580
2581 spin_lock_init(&edge_port->ep_lock);
2582 edge_port->port = port;
2583 edge_port->edge_serial = usb_get_serial_data(port->serial);
2584 edge_port->bUartMode = default_uart_mode;
2585
2586 usb_set_serial_port_data(port, edge_port);
2587
2588 ret = edge_create_sysfs_attrs(port);
2589 if (ret) {
2605 kfifo_free(&edge_port->write_fifo); 2590 kfifo_free(&edge_port->write_fifo);
2606 kfree(edge_port); 2591 kfree(edge_port);
2592 return ret;
2607 } 2593 }
2608 kfree(usb_get_serial_data(serial)); 2594
2595 return 0;
2609} 2596}
2610 2597
2598static int edge_port_remove(struct usb_serial_port *port)
2599{
2600 struct edgeport_port *edge_port;
2601
2602 edge_port = usb_get_serial_port_data(port);
2603
2604 edge_remove_sysfs_attrs(port);
2605 kfifo_free(&edge_port->write_fifo);
2606 kfree(edge_port);
2607
2608 return 0;
2609}
2611 2610
2612/* Sysfs Attributes */ 2611/* Sysfs Attributes */
2613 2612
@@ -2667,8 +2666,8 @@ static struct usb_serial_driver edgeport_1port_device = {
2667 .attach = edge_startup, 2666 .attach = edge_startup,
2668 .disconnect = edge_disconnect, 2667 .disconnect = edge_disconnect,
2669 .release = edge_release, 2668 .release = edge_release,
2670 .port_probe = edge_create_sysfs_attrs, 2669 .port_probe = edge_port_probe,
2671 .port_remove = edge_remove_sysfs_attrs, 2670 .port_remove = edge_port_remove,
2672 .ioctl = edge_ioctl, 2671 .ioctl = edge_ioctl,
2673 .set_termios = edge_set_termios, 2672 .set_termios = edge_set_termios,
2674 .tiocmget = edge_tiocmget, 2673 .tiocmget = edge_tiocmget,
@@ -2698,8 +2697,8 @@ static struct usb_serial_driver edgeport_2port_device = {
2698 .attach = edge_startup, 2697 .attach = edge_startup,
2699 .disconnect = edge_disconnect, 2698 .disconnect = edge_disconnect,
2700 .release = edge_release, 2699 .release = edge_release,
2701 .port_probe = edge_create_sysfs_attrs, 2700 .port_probe = edge_port_probe,
2702 .port_remove = edge_remove_sysfs_attrs, 2701 .port_remove = edge_port_remove,
2703 .ioctl = edge_ioctl, 2702 .ioctl = edge_ioctl,
2704 .set_termios = edge_set_termios, 2703 .set_termios = edge_set_termios,
2705 .tiocmget = edge_tiocmget, 2704 .tiocmget = edge_tiocmget,
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index 20a132ec39e2..4264821a3b34 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -203,8 +203,7 @@ static int ipw_open(struct tty_struct *tty, struct usb_serial_port *port)
203 return 0; 203 return 0;
204} 204}
205 205
206/* fake probe - only to allocate data structures */ 206static int ipw_attach(struct usb_serial *serial)
207static int ipw_probe(struct usb_serial *serial, const struct usb_device_id *id)
208{ 207{
209 struct usb_wwan_intf_private *data; 208 struct usb_wwan_intf_private *data;
210 209
@@ -303,9 +302,9 @@ static struct usb_serial_driver ipw_device = {
303 .num_ports = 1, 302 .num_ports = 1,
304 .open = ipw_open, 303 .open = ipw_open,
305 .close = ipw_close, 304 .close = ipw_close,
306 .probe = ipw_probe, 305 .attach = ipw_attach,
307 .attach = usb_wwan_startup,
308 .release = ipw_release, 306 .release = ipw_release,
307 .port_probe = usb_wwan_port_probe,
309 .port_remove = usb_wwan_port_remove, 308 .port_remove = usb_wwan_port_remove,
310 .dtr_rts = ipw_dtr_rts, 309 .dtr_rts = ipw_dtr_rts,
311 .write = usb_wwan_write, 310 .write = usb_wwan_write,
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 01da3ea36e89..cd5533e81de7 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -53,6 +53,8 @@ static int iuu_cardout;
53static bool xmas; 53static bool xmas;
54static int vcc_default = 5; 54static int vcc_default = 5;
55 55
56static int iuu_create_sysfs_attrs(struct usb_serial_port *port);
57static int iuu_remove_sysfs_attrs(struct usb_serial_port *port);
56static void read_rxcmd_callback(struct urb *urb); 58static void read_rxcmd_callback(struct urb *urb);
57 59
58struct iuu_private { 60struct iuu_private {
@@ -72,63 +74,55 @@ struct iuu_private {
72 u32 clk; 74 u32 clk;
73}; 75};
74 76
75 77static int iuu_port_probe(struct usb_serial_port *port)
76static void iuu_free_buf(struct iuu_private *priv)
77{
78 kfree(priv->buf);
79 kfree(priv->writebuf);
80}
81
82static int iuu_alloc_buf(struct usb_serial *serial, struct iuu_private *priv)
83{
84 priv->buf = kzalloc(256, GFP_KERNEL);
85 priv->writebuf = kzalloc(256, GFP_KERNEL);
86 if (!priv->buf || !priv->writebuf) {
87 iuu_free_buf(priv);
88 dev_dbg(&serial->dev->dev, "%s problem allocation buffer\n", __func__);
89 return -ENOMEM;
90 }
91 dev_dbg(&serial->dev->dev, "%s - Privates buffers allocation success\n", __func__);
92 return 0;
93}
94
95static int iuu_startup(struct usb_serial *serial)
96{ 78{
97 struct iuu_private *priv; 79 struct iuu_private *priv;
80 int ret;
98 81
99 priv = kzalloc(sizeof(struct iuu_private), GFP_KERNEL); 82 priv = kzalloc(sizeof(struct iuu_private), GFP_KERNEL);
100 dev_dbg(&serial->dev->dev, "%s- priv allocation success\n", __func__);
101 if (!priv) 83 if (!priv)
102 return -ENOMEM; 84 return -ENOMEM;
103 if (iuu_alloc_buf(serial, priv)) { 85
86 priv->buf = kzalloc(256, GFP_KERNEL);
87 if (!priv->buf) {
88 kfree(priv);
89 return -ENOMEM;
90 }
91
92 priv->writebuf = kzalloc(256, GFP_KERNEL);
93 if (!priv->writebuf) {
94 kfree(priv->buf);
104 kfree(priv); 95 kfree(priv);
105 return -ENOMEM; 96 return -ENOMEM;
106 } 97 }
98
107 priv->vcc = vcc_default; 99 priv->vcc = vcc_default;
108 spin_lock_init(&priv->lock); 100 spin_lock_init(&priv->lock);
109 init_waitqueue_head(&priv->delta_msr_wait); 101 init_waitqueue_head(&priv->delta_msr_wait);
110 usb_set_serial_port_data(serial->port[0], priv); 102
103 usb_set_serial_port_data(port, priv);
104
105 ret = iuu_create_sysfs_attrs(port);
106 if (ret) {
107 kfree(priv->writebuf);
108 kfree(priv->buf);
109 kfree(priv);
110 return ret;
111 }
112
111 return 0; 113 return 0;
112} 114}
113 115
114/* Release function */ 116static int iuu_port_remove(struct usb_serial_port *port)
115static void iuu_release(struct usb_serial *serial)
116{ 117{
117 struct usb_serial_port *port = serial->port[0];
118 struct iuu_private *priv = usb_get_serial_port_data(port); 118 struct iuu_private *priv = usb_get_serial_port_data(port);
119 if (!port)
120 return;
121 119
122 if (priv) { 120 iuu_remove_sysfs_attrs(port);
123 iuu_free_buf(priv); 121 kfree(priv->writebuf);
124 dev_dbg(&port->dev, "%s - I will free all\n", __func__); 122 kfree(priv->buf);
125 usb_set_serial_port_data(port, NULL); 123 kfree(priv);
126
127 dev_dbg(&port->dev, "%s - priv is not anymore in port structure\n", __func__);
128 kfree(priv);
129 124
130 dev_dbg(&port->dev, "%s priv is now kfree\n", __func__); 125 return 0;
131 }
132} 126}
133 127
134static int iuu_tiocmset(struct tty_struct *tty, 128static int iuu_tiocmset(struct tty_struct *tty,
@@ -1215,8 +1209,6 @@ static struct usb_serial_driver iuu_device = {
1215 .num_ports = 1, 1209 .num_ports = 1,
1216 .bulk_in_size = 512, 1210 .bulk_in_size = 512,
1217 .bulk_out_size = 512, 1211 .bulk_out_size = 512,
1218 .port_probe = iuu_create_sysfs_attrs,
1219 .port_remove = iuu_remove_sysfs_attrs,
1220 .open = iuu_open, 1212 .open = iuu_open,
1221 .close = iuu_close, 1213 .close = iuu_close,
1222 .write = iuu_uart_write, 1214 .write = iuu_uart_write,
@@ -1225,8 +1217,8 @@ static struct usb_serial_driver iuu_device = {
1225 .tiocmset = iuu_tiocmset, 1217 .tiocmset = iuu_tiocmset,
1226 .set_termios = iuu_set_termios, 1218 .set_termios = iuu_set_termios,
1227 .init_termios = iuu_init_termios, 1219 .init_termios = iuu_init_termios,
1228 .attach = iuu_startup, 1220 .port_probe = iuu_port_probe,
1229 .release = iuu_release, 1221 .port_remove = iuu_port_remove,
1230}; 1222};
1231 1223
1232static struct usb_serial_driver * const serial_drivers[] = { 1224static struct usb_serial_driver * const serial_drivers[] = {
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 29c943d737d0..7179b0c5f814 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -1374,13 +1374,9 @@ static struct callbacks {
1374 data in device_details */ 1374 data in device_details */
1375static void keyspan_setup_urbs(struct usb_serial *serial) 1375static void keyspan_setup_urbs(struct usb_serial *serial)
1376{ 1376{
1377 int i, j;
1378 struct keyspan_serial_private *s_priv; 1377 struct keyspan_serial_private *s_priv;
1379 const struct keyspan_device_details *d_details; 1378 const struct keyspan_device_details *d_details;
1380 struct usb_serial_port *port;
1381 struct keyspan_port_private *p_priv;
1382 struct callbacks *cback; 1379 struct callbacks *cback;
1383 int endp;
1384 1380
1385 s_priv = usb_get_serial_data(serial); 1381 s_priv = usb_get_serial_data(serial);
1386 d_details = s_priv->device_details; 1382 d_details = s_priv->device_details;
@@ -1404,45 +1400,6 @@ static void keyspan_setup_urbs(struct usb_serial *serial)
1404 (serial, d_details->glocont_endpoint, USB_DIR_OUT, 1400 (serial, d_details->glocont_endpoint, USB_DIR_OUT,
1405 serial, s_priv->glocont_buf, GLOCONT_BUFLEN, 1401 serial, s_priv->glocont_buf, GLOCONT_BUFLEN,
1406 cback->glocont_callback); 1402 cback->glocont_callback);
1407
1408 /* Setup endpoints for each port specific thing */
1409 for (i = 0; i < d_details->num_ports; i++) {
1410 port = serial->port[i];
1411 p_priv = usb_get_serial_port_data(port);
1412
1413 /* Do indat endpoints first, once for each flip */
1414 endp = d_details->indat_endpoints[i];
1415 for (j = 0; j <= d_details->indat_endp_flip; ++j, ++endp) {
1416 p_priv->in_urbs[j] = keyspan_setup_urb
1417 (serial, endp, USB_DIR_IN, port,
1418 p_priv->in_buffer[j], 64,
1419 cback->indat_callback);
1420 }
1421 for (; j < 2; ++j)
1422 p_priv->in_urbs[j] = NULL;
1423
1424 /* outdat endpoints also have flip */
1425 endp = d_details->outdat_endpoints[i];
1426 for (j = 0; j <= d_details->outdat_endp_flip; ++j, ++endp) {
1427 p_priv->out_urbs[j] = keyspan_setup_urb
1428 (serial, endp, USB_DIR_OUT, port,
1429 p_priv->out_buffer[j], 64,
1430 cback->outdat_callback);
1431 }
1432 for (; j < 2; ++j)
1433 p_priv->out_urbs[j] = NULL;
1434
1435 /* inack endpoint */
1436 p_priv->inack_urb = keyspan_setup_urb
1437 (serial, d_details->inack_endpoints[i], USB_DIR_IN,
1438 port, p_priv->inack_buffer, 1, cback->inack_callback);
1439
1440 /* outcont endpoint */
1441 p_priv->outcont_urb = keyspan_setup_urb
1442 (serial, d_details->outcont_endpoints[i], USB_DIR_OUT,
1443 port, p_priv->outcont_buffer, 64,
1444 cback->outcont_callback);
1445 }
1446} 1403}
1447 1404
1448/* usa19 function doesn't require prescaler */ 1405/* usa19 function doesn't require prescaler */
@@ -2407,9 +2364,7 @@ static void keyspan_send_setup(struct usb_serial_port *port, int reset_port)
2407static int keyspan_startup(struct usb_serial *serial) 2364static int keyspan_startup(struct usb_serial *serial)
2408{ 2365{
2409 int i, err; 2366 int i, err;
2410 struct usb_serial_port *port;
2411 struct keyspan_serial_private *s_priv; 2367 struct keyspan_serial_private *s_priv;
2412 struct keyspan_port_private *p_priv;
2413 const struct keyspan_device_details *d_details; 2368 const struct keyspan_device_details *d_details;
2414 2369
2415 for (i = 0; (d_details = keyspan_devices[i]) != NULL; ++i) 2370 for (i = 0; (d_details = keyspan_devices[i]) != NULL; ++i)
@@ -2432,19 +2387,6 @@ static int keyspan_startup(struct usb_serial *serial)
2432 s_priv->device_details = d_details; 2387 s_priv->device_details = d_details;
2433 usb_set_serial_data(serial, s_priv); 2388 usb_set_serial_data(serial, s_priv);
2434 2389
2435 /* Now setup per port private data */
2436 for (i = 0; i < serial->num_ports; i++) {
2437 port = serial->port[i];
2438 p_priv = kzalloc(sizeof(struct keyspan_port_private),
2439 GFP_KERNEL);
2440 if (!p_priv) {
2441 dev_dbg(&port->dev, "%s - kmalloc for keyspan_port_private (%d) failed!.\n", __func__, i);
2442 return 1;
2443 }
2444 p_priv->device_details = d_details;
2445 usb_set_serial_port_data(port, p_priv);
2446 }
2447
2448 keyspan_setup_urbs(serial); 2390 keyspan_setup_urbs(serial);
2449 2391
2450 if (s_priv->instat_urb != NULL) { 2392 if (s_priv->instat_urb != NULL) {
@@ -2463,59 +2405,112 @@ static int keyspan_startup(struct usb_serial *serial)
2463 2405
2464static void keyspan_disconnect(struct usb_serial *serial) 2406static void keyspan_disconnect(struct usb_serial *serial)
2465{ 2407{
2466 int i, j; 2408 struct keyspan_serial_private *s_priv;
2467 struct usb_serial_port *port;
2468 struct keyspan_serial_private *s_priv;
2469 struct keyspan_port_private *p_priv;
2470 2409
2471 s_priv = usb_get_serial_data(serial); 2410 s_priv = usb_get_serial_data(serial);
2472 2411
2473 /* Stop reading/writing urbs */
2474 stop_urb(s_priv->instat_urb); 2412 stop_urb(s_priv->instat_urb);
2475 stop_urb(s_priv->glocont_urb); 2413 stop_urb(s_priv->glocont_urb);
2476 stop_urb(s_priv->indat_urb); 2414 stop_urb(s_priv->indat_urb);
2477 for (i = 0; i < serial->num_ports; ++i) { 2415}
2478 port = serial->port[i]; 2416
2479 p_priv = usb_get_serial_port_data(port); 2417static void keyspan_release(struct usb_serial *serial)
2480 stop_urb(p_priv->inack_urb); 2418{
2481 stop_urb(p_priv->outcont_urb); 2419 struct keyspan_serial_private *s_priv;
2482 for (j = 0; j < 2; j++) { 2420
2483 stop_urb(p_priv->in_urbs[j]); 2421 s_priv = usb_get_serial_data(serial);
2484 stop_urb(p_priv->out_urbs[j]);
2485 }
2486 }
2487 2422
2488 /* Now free them */
2489 usb_free_urb(s_priv->instat_urb); 2423 usb_free_urb(s_priv->instat_urb);
2490 usb_free_urb(s_priv->indat_urb); 2424 usb_free_urb(s_priv->indat_urb);
2491 usb_free_urb(s_priv->glocont_urb); 2425 usb_free_urb(s_priv->glocont_urb);
2492 for (i = 0; i < serial->num_ports; ++i) { 2426
2493 port = serial->port[i]; 2427 kfree(s_priv);
2494 p_priv = usb_get_serial_port_data(port);
2495 usb_free_urb(p_priv->inack_urb);
2496 usb_free_urb(p_priv->outcont_urb);
2497 for (j = 0; j < 2; j++) {
2498 usb_free_urb(p_priv->in_urbs[j]);
2499 usb_free_urb(p_priv->out_urbs[j]);
2500 }
2501 }
2502} 2428}
2503 2429
2504static void keyspan_release(struct usb_serial *serial) 2430static int keyspan_port_probe(struct usb_serial_port *port)
2505{ 2431{
2506 int i; 2432 struct usb_serial *serial = port->serial;
2507 struct usb_serial_port *port; 2433 struct keyspan_port_private *s_priv;
2508 struct keyspan_serial_private *s_priv; 2434 struct keyspan_port_private *p_priv;
2435 const struct keyspan_device_details *d_details;
2436 struct callbacks *cback;
2437 int endp;
2438 int port_num;
2439 int i;
2509 2440
2510 s_priv = usb_get_serial_data(serial); 2441 s_priv = usb_get_serial_data(serial);
2442 d_details = s_priv->device_details;
2511 2443
2512 kfree(s_priv); 2444 p_priv = kzalloc(sizeof(*p_priv), GFP_KERNEL);
2445 if (!p_priv)
2446 return -ENOMEM;
2513 2447
2514 /* Now free per port private data */ 2448 s_priv = usb_get_serial_data(port->serial);
2515 for (i = 0; i < serial->num_ports; i++) { 2449 p_priv->device_details = d_details;
2516 port = serial->port[i]; 2450
2517 kfree(usb_get_serial_port_data(port)); 2451 /* Setup values for the various callback routines */
2452 cback = &keyspan_callbacks[d_details->msg_format];
2453
2454 port_num = port->number - port->serial->minor;
2455
2456 /* Do indat endpoints first, once for each flip */
2457 endp = d_details->indat_endpoints[port_num];
2458 for (i = 0; i <= d_details->indat_endp_flip; ++i, ++endp) {
2459 p_priv->in_urbs[i] = keyspan_setup_urb(serial, endp,
2460 USB_DIR_IN, port,
2461 p_priv->in_buffer[i], 64,
2462 cback->indat_callback);
2463 }
2464 /* outdat endpoints also have flip */
2465 endp = d_details->outdat_endpoints[port_num];
2466 for (i = 0; i <= d_details->outdat_endp_flip; ++i, ++endp) {
2467 p_priv->out_urbs[i] = keyspan_setup_urb(serial, endp,
2468 USB_DIR_OUT, port,
2469 p_priv->out_buffer[i], 64,
2470 cback->outdat_callback);
2471 }
2472 /* inack endpoint */
2473 p_priv->inack_urb = keyspan_setup_urb(serial,
2474 d_details->inack_endpoints[port_num],
2475 USB_DIR_IN, port,
2476 p_priv->inack_buffer, 1,
2477 cback->inack_callback);
2478 /* outcont endpoint */
2479 p_priv->outcont_urb = keyspan_setup_urb(serial,
2480 d_details->outcont_endpoints[port_num],
2481 USB_DIR_OUT, port,
2482 p_priv->outcont_buffer, 64,
2483 cback->outcont_callback);
2484
2485 usb_set_serial_port_data(port, p_priv);
2486
2487 return 0;
2488}
2489
2490static int keyspan_port_remove(struct usb_serial_port *port)
2491{
2492 struct keyspan_port_private *p_priv;
2493 int i;
2494
2495 p_priv = usb_get_serial_port_data(port);
2496
2497 stop_urb(p_priv->inack_urb);
2498 stop_urb(p_priv->outcont_urb);
2499 for (i = 0; i < 2; i++) {
2500 stop_urb(p_priv->in_urbs[i]);
2501 stop_urb(p_priv->out_urbs[i]);
2502 }
2503
2504 usb_free_urb(p_priv->inack_urb);
2505 usb_free_urb(p_priv->outcont_urb);
2506 for (i = 0; i < 2; i++) {
2507 usb_free_urb(p_priv->in_urbs[i]);
2508 usb_free_urb(p_priv->out_urbs[i]);
2518 } 2509 }
2510
2511 kfree(p_priv);
2512
2513 return 0;
2519} 2514}
2520 2515
2521MODULE_AUTHOR(DRIVER_AUTHOR); 2516MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
index 0a8a40b5711e..0273dda303a4 100644
--- a/drivers/usb/serial/keyspan.h
+++ b/drivers/usb/serial/keyspan.h
@@ -42,6 +42,8 @@ static void keyspan_dtr_rts (struct usb_serial_port *port, int on);
42static int keyspan_startup (struct usb_serial *serial); 42static int keyspan_startup (struct usb_serial *serial);
43static void keyspan_disconnect (struct usb_serial *serial); 43static void keyspan_disconnect (struct usb_serial *serial);
44static void keyspan_release (struct usb_serial *serial); 44static void keyspan_release (struct usb_serial *serial);
45static int keyspan_port_probe(struct usb_serial_port *port);
46static int keyspan_port_remove(struct usb_serial_port *port);
45static int keyspan_write_room (struct tty_struct *tty); 47static int keyspan_write_room (struct tty_struct *tty);
46 48
47static int keyspan_write (struct tty_struct *tty, 49static int keyspan_write (struct tty_struct *tty,
@@ -567,6 +569,8 @@ static struct usb_serial_driver keyspan_1port_device = {
567 .attach = keyspan_startup, 569 .attach = keyspan_startup,
568 .disconnect = keyspan_disconnect, 570 .disconnect = keyspan_disconnect,
569 .release = keyspan_release, 571 .release = keyspan_release,
572 .port_probe = keyspan_port_probe,
573 .port_remove = keyspan_port_remove,
570}; 574};
571 575
572static struct usb_serial_driver keyspan_2port_device = { 576static struct usb_serial_driver keyspan_2port_device = {
@@ -589,6 +593,8 @@ static struct usb_serial_driver keyspan_2port_device = {
589 .attach = keyspan_startup, 593 .attach = keyspan_startup,
590 .disconnect = keyspan_disconnect, 594 .disconnect = keyspan_disconnect,
591 .release = keyspan_release, 595 .release = keyspan_release,
596 .port_probe = keyspan_port_probe,
597 .port_remove = keyspan_port_remove,
592}; 598};
593 599
594static struct usb_serial_driver keyspan_4port_device = { 600static struct usb_serial_driver keyspan_4port_device = {
@@ -611,6 +617,8 @@ static struct usb_serial_driver keyspan_4port_device = {
611 .attach = keyspan_startup, 617 .attach = keyspan_startup,
612 .disconnect = keyspan_disconnect, 618 .disconnect = keyspan_disconnect,
613 .release = keyspan_release, 619 .release = keyspan_release,
620 .port_probe = keyspan_port_probe,
621 .port_remove = keyspan_port_remove,
614}; 622};
615 623
616static struct usb_serial_driver * const serial_drivers[] = { 624static struct usb_serial_driver * const serial_drivers[] = {
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index ca43ecb4a2bd..bb87e29c4ac2 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -713,29 +713,33 @@ MODULE_FIRMWARE("keyspan_pda/keyspan_pda.fw");
713MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw"); 713MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw");
714#endif 714#endif
715 715
716static int keyspan_pda_startup(struct usb_serial *serial) 716static int keyspan_pda_port_probe(struct usb_serial_port *port)
717{ 717{
718 718
719 struct keyspan_pda_private *priv; 719 struct keyspan_pda_private *priv;
720 720
721 /* allocate the private data structures for all ports. Well, for all
722 one ports. */
723
724 priv = kmalloc(sizeof(struct keyspan_pda_private), GFP_KERNEL); 721 priv = kmalloc(sizeof(struct keyspan_pda_private), GFP_KERNEL);
725 if (!priv) 722 if (!priv)
726 return 1; /* error */ 723 return -ENOMEM;
727 usb_set_serial_port_data(serial->port[0], priv); 724
728 init_waitqueue_head(&serial->port[0]->write_wait);
729 INIT_WORK(&priv->wakeup_work, keyspan_pda_wakeup_write); 725 INIT_WORK(&priv->wakeup_work, keyspan_pda_wakeup_write);
730 INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle); 726 INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle);
731 priv->serial = serial; 727 priv->serial = port->serial;
732 priv->port = serial->port[0]; 728 priv->port = port;
729
730 usb_set_serial_port_data(port, priv);
731
733 return 0; 732 return 0;
734} 733}
735 734
736static void keyspan_pda_release(struct usb_serial *serial) 735static int keyspan_pda_port_remove(struct usb_serial_port *port)
737{ 736{
738 kfree(usb_get_serial_port_data(serial->port[0])); 737 struct keyspan_pda_private *priv;
738
739 priv = usb_get_serial_port_data(port);
740 kfree(priv);
741
742 return 0;
739} 743}
740 744
741#ifdef KEYSPAN 745#ifdef KEYSPAN
@@ -786,8 +790,8 @@ static struct usb_serial_driver keyspan_pda_device = {
786 .break_ctl = keyspan_pda_break_ctl, 790 .break_ctl = keyspan_pda_break_ctl,
787 .tiocmget = keyspan_pda_tiocmget, 791 .tiocmget = keyspan_pda_tiocmget,
788 .tiocmset = keyspan_pda_tiocmset, 792 .tiocmset = keyspan_pda_tiocmset,
789 .attach = keyspan_pda_startup, 793 .port_probe = keyspan_pda_port_probe,
790 .release = keyspan_pda_release, 794 .port_remove = keyspan_pda_port_remove,
791}; 795};
792 796
793static struct usb_serial_driver * const serial_drivers[] = { 797static struct usb_serial_driver * const serial_drivers[] = {
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 3f6d7376c02d..1f4517864cd2 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -60,8 +60,8 @@
60/* 60/*
61 * Function prototypes 61 * Function prototypes
62 */ 62 */
63static int klsi_105_startup(struct usb_serial *serial); 63static int klsi_105_port_probe(struct usb_serial_port *port);
64static void klsi_105_release(struct usb_serial *serial); 64static int klsi_105_port_remove(struct usb_serial_port *port);
65static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port); 65static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port);
66static void klsi_105_close(struct usb_serial_port *port); 66static void klsi_105_close(struct usb_serial_port *port);
67static void klsi_105_set_termios(struct tty_struct *tty, 67static void klsi_105_set_termios(struct tty_struct *tty,
@@ -99,8 +99,8 @@ static struct usb_serial_driver kl5kusb105d_device = {
99 /*.break_ctl = klsi_105_break_ctl,*/ 99 /*.break_ctl = klsi_105_break_ctl,*/
100 .tiocmget = klsi_105_tiocmget, 100 .tiocmget = klsi_105_tiocmget,
101 .tiocmset = klsi_105_tiocmset, 101 .tiocmset = klsi_105_tiocmset,
102 .attach = klsi_105_startup, 102 .port_probe = klsi_105_port_probe,
103 .release = klsi_105_release, 103 .port_remove = klsi_105_port_remove,
104 .throttle = usb_serial_generic_throttle, 104 .throttle = usb_serial_generic_throttle,
105 .unthrottle = usb_serial_generic_unthrottle, 105 .unthrottle = usb_serial_generic_unthrottle,
106 .process_read_urb = klsi_105_process_read_urb, 106 .process_read_urb = klsi_105_process_read_urb,
@@ -223,60 +223,40 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
223 * Driver's tty interface functions 223 * Driver's tty interface functions
224 */ 224 */
225 225
226static int klsi_105_startup(struct usb_serial *serial) 226static int klsi_105_port_probe(struct usb_serial_port *port)
227{ 227{
228 struct klsi_105_private *priv; 228 struct klsi_105_private *priv;
229 int i;
230 229
231 /* check if we support the product id (see keyspan.c) 230 priv = kmalloc(sizeof(*priv), GFP_KERNEL);
232 * FIXME 231 if (!priv)
233 */ 232 return -ENOMEM;
234 233
235 /* allocate the private data structure */ 234 /* set initial values for control structures */
236 for (i = 0; i < serial->num_ports; i++) { 235 priv->cfg.pktlen = 5;
237 priv = kmalloc(sizeof(struct klsi_105_private), 236 priv->cfg.baudrate = kl5kusb105a_sio_b9600;
238 GFP_KERNEL); 237 priv->cfg.databits = kl5kusb105a_dtb_8;
239 if (!priv) { 238 priv->cfg.unknown1 = 0;
240 dev_dbg(&serial->interface->dev, 239 priv->cfg.unknown2 = 1;
241 "%s - kmalloc for klsi_105_private failed.\n",
242 __func__);
243 i--;
244 goto err_cleanup;
245 }
246 /* set initial values for control structures */
247 priv->cfg.pktlen = 5;
248 priv->cfg.baudrate = kl5kusb105a_sio_b9600;
249 priv->cfg.databits = kl5kusb105a_dtb_8;
250 priv->cfg.unknown1 = 0;
251 priv->cfg.unknown2 = 1;
252 240
253 priv->line_state = 0; 241 priv->line_state = 0;
254 242
255 usb_set_serial_port_data(serial->port[i], priv); 243 spin_lock_init(&priv->lock);
256 244
257 spin_lock_init(&priv->lock); 245 /* priv->termios is left uninitialized until port opening */
258 246
259 /* priv->termios is left uninitialized until port opening */ 247 usb_set_serial_port_data(port, priv);
260 init_waitqueue_head(&serial->port[i]->write_wait);
261 }
262 248
263 return 0; 249 return 0;
264
265err_cleanup:
266 for (; i >= 0; i--) {
267 priv = usb_get_serial_port_data(serial->port[i]);
268 kfree(priv);
269 usb_set_serial_port_data(serial->port[i], NULL);
270 }
271 return -ENOMEM;
272} 250}
273 251
274static void klsi_105_release(struct usb_serial *serial) 252static int klsi_105_port_remove(struct usb_serial_port *port)
275{ 253{
276 int i; 254 struct klsi_105_private *priv;
255
256 priv = usb_get_serial_port_data(port);
257 kfree(priv);
277 258
278 for (i = 0; i < serial->num_ports; ++i) 259 return 0;
279 kfree(usb_get_serial_port_data(serial->port[i]));
280} 260}
281 261
282static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) 262static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 5c4d2fbd4e11..c9ca7a5b12e0 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -54,8 +54,8 @@
54 54
55 55
56/* Function prototypes */ 56/* Function prototypes */
57static int kobil_startup(struct usb_serial *serial); 57static int kobil_port_probe(struct usb_serial_port *probe);
58static void kobil_release(struct usb_serial *serial); 58static int kobil_port_remove(struct usb_serial_port *probe);
59static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port); 59static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port);
60static void kobil_close(struct usb_serial_port *port); 60static void kobil_close(struct usb_serial_port *port);
61static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port, 61static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
@@ -89,8 +89,8 @@ static struct usb_serial_driver kobil_device = {
89 .description = "KOBIL USB smart card terminal", 89 .description = "KOBIL USB smart card terminal",
90 .id_table = id_table, 90 .id_table = id_table,
91 .num_ports = 1, 91 .num_ports = 1,
92 .attach = kobil_startup, 92 .port_probe = kobil_port_probe,
93 .release = kobil_release, 93 .port_remove = kobil_port_remove,
94 .ioctl = kobil_ioctl, 94 .ioctl = kobil_ioctl,
95 .set_termios = kobil_set_termios, 95 .set_termios = kobil_set_termios,
96 .init_termios = kobil_init_termios, 96 .init_termios = kobil_init_termios,
@@ -117,9 +117,10 @@ struct kobil_private {
117}; 117};
118 118
119 119
120static int kobil_startup(struct usb_serial *serial) 120static int kobil_port_probe(struct usb_serial_port *port)
121{ 121{
122 int i; 122 int i;
123 struct usb_serial *serial = port->serial;
123 struct kobil_private *priv; 124 struct kobil_private *priv;
124 struct usb_device *pdev; 125 struct usb_device *pdev;
125 struct usb_host_config *actconfig; 126 struct usb_host_config *actconfig;
@@ -149,7 +150,7 @@ static int kobil_startup(struct usb_serial *serial)
149 dev_dbg(&serial->dev->dev, "KOBIL KAAN SIM detected\n"); 150 dev_dbg(&serial->dev->dev, "KOBIL KAAN SIM detected\n");
150 break; 151 break;
151 } 152 }
152 usb_set_serial_port_data(serial->port[0], priv); 153 usb_set_serial_port_data(port, priv);
153 154
154 /* search for the necessary endpoints */ 155 /* search for the necessary endpoints */
155 pdev = serial->dev; 156 pdev = serial->dev;
@@ -179,12 +180,14 @@ static int kobil_startup(struct usb_serial *serial)
179} 180}
180 181
181 182
182static void kobil_release(struct usb_serial *serial) 183static int kobil_port_remove(struct usb_serial_port *port)
183{ 184{
184 int i; 185 struct kobil_private *priv;
185 186
186 for (i = 0; i < serial->num_ports; ++i) 187 priv = usb_get_serial_port_data(port);
187 kfree(usb_get_serial_port_data(serial->port[i])); 188 kfree(priv);
189
190 return 0;
188} 191}
189 192
190static void kobil_init_termios(struct tty_struct *tty) 193static void kobil_init_termios(struct tty_struct *tty)
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index f3947712e137..8a2081004107 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -49,7 +49,8 @@
49 * Function prototypes 49 * Function prototypes
50 */ 50 */
51static int mct_u232_startup(struct usb_serial *serial); 51static int mct_u232_startup(struct usb_serial *serial);
52static void mct_u232_release(struct usb_serial *serial); 52static int mct_u232_port_probe(struct usb_serial_port *port);
53static int mct_u232_port_remove(struct usb_serial_port *remove);
53static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port); 54static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port);
54static void mct_u232_close(struct usb_serial_port *port); 55static void mct_u232_close(struct usb_serial_port *port);
55static void mct_u232_dtr_rts(struct usb_serial_port *port, int on); 56static void mct_u232_dtr_rts(struct usb_serial_port *port, int on);
@@ -99,7 +100,8 @@ static struct usb_serial_driver mct_u232_device = {
99 .tiocmget = mct_u232_tiocmget, 100 .tiocmget = mct_u232_tiocmget,
100 .tiocmset = mct_u232_tiocmset, 101 .tiocmset = mct_u232_tiocmset,
101 .attach = mct_u232_startup, 102 .attach = mct_u232_startup,
102 .release = mct_u232_release, 103 .port_probe = mct_u232_port_probe,
104 .port_remove = mct_u232_port_remove,
103 .ioctl = mct_u232_ioctl, 105 .ioctl = mct_u232_ioctl,
104 .get_icount = mct_u232_get_icount, 106 .get_icount = mct_u232_get_icount,
105}; 107};
@@ -388,18 +390,8 @@ static void mct_u232_msr_to_state(struct usb_serial_port *port,
388 390
389static int mct_u232_startup(struct usb_serial *serial) 391static int mct_u232_startup(struct usb_serial *serial)
390{ 392{
391 struct mct_u232_private *priv;
392 struct usb_serial_port *port, *rport; 393 struct usb_serial_port *port, *rport;
393 394
394 priv = kzalloc(sizeof(struct mct_u232_private), GFP_KERNEL);
395 if (!priv)
396 return -ENOMEM;
397 spin_lock_init(&priv->lock);
398 init_waitqueue_head(&priv->msr_wait);
399 usb_set_serial_port_data(serial->port[0], priv);
400
401 init_waitqueue_head(&serial->port[0]->write_wait);
402
403 /* Puh, that's dirty */ 395 /* Puh, that's dirty */
404 port = serial->port[0]; 396 port = serial->port[0];
405 rport = serial->port[1]; 397 rport = serial->port[1];
@@ -412,18 +404,31 @@ static int mct_u232_startup(struct usb_serial *serial)
412 return 0; 404 return 0;
413} /* mct_u232_startup */ 405} /* mct_u232_startup */
414 406
407static int mct_u232_port_probe(struct usb_serial_port *port)
408{
409 struct mct_u232_private *priv;
410
411 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
412 if (!priv)
413 return -ENOMEM;
414
415 spin_lock_init(&priv->lock);
416 init_waitqueue_head(&priv->msr_wait);
417
418 usb_set_serial_port_data(port, priv);
415 419
416static void mct_u232_release(struct usb_serial *serial) 420 return 0;
421}
422
423static int mct_u232_port_remove(struct usb_serial_port *port)
417{ 424{
418 struct mct_u232_private *priv; 425 struct mct_u232_private *priv;
419 int i;
420 426
421 for (i = 0; i < serial->num_ports; ++i) { 427 priv = usb_get_serial_port_data(port);
422 /* My special items, the standard routines free my urbs */ 428 kfree(priv);
423 priv = usb_get_serial_port_data(serial->port[i]); 429
424 kfree(priv); 430 return 0;
425 } 431}
426} /* mct_u232_release */
427 432
428static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port) 433static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port)
429{ 434{
@@ -515,12 +520,14 @@ static void mct_u232_dtr_rts(struct usb_serial_port *port, int on)
515 520
516static void mct_u232_close(struct usb_serial_port *port) 521static void mct_u232_close(struct usb_serial_port *port)
517{ 522{
518 if (port->serial->dev) { 523 /*
519 /* shutdown our urbs */ 524 * Must kill the read urb as it is actually an interrupt urb, which
520 usb_kill_urb(port->write_urb); 525 * generic close thus fails to kill.
521 usb_kill_urb(port->read_urb); 526 */
522 usb_kill_urb(port->interrupt_in_urb); 527 usb_kill_urb(port->read_urb);
523 } 528 usb_kill_urb(port->interrupt_in_urb);
529
530 usb_serial_generic_close(port);
524} /* mct_u232_close */ 531} /* mct_u232_close */
525 532
526 533
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index 0b257ddffbdb..6f29c74eb769 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -179,16 +179,13 @@ static void metrousb_cleanup(struct usb_serial_port *port)
179{ 179{
180 dev_dbg(&port->dev, "%s\n", __func__); 180 dev_dbg(&port->dev, "%s\n", __func__);
181 181
182 if (port->serial->dev) { 182 usb_unlink_urb(port->interrupt_in_urb);
183 /* Shutdown any interrupt in urbs. */ 183 usb_kill_urb(port->interrupt_in_urb);
184 if (port->interrupt_in_urb) { 184
185 usb_unlink_urb(port->interrupt_in_urb); 185 mutex_lock(&port->serial->disc_mutex);
186 usb_kill_urb(port->interrupt_in_urb); 186 if (!port->serial->disconnected)
187 }
188
189 /* Send deactivate cmd to device */
190 metrousb_send_unidirectional_cmd(UNI_CMD_CLOSE, port); 187 metrousb_send_unidirectional_cmd(UNI_CMD_CLOSE, port);
191 } 188 mutex_unlock(&port->serial->disc_mutex);
192} 189}
193 190
194static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port) 191static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port)
@@ -271,51 +268,27 @@ static int metrousb_set_modem_ctrl(struct usb_serial *serial, unsigned int contr
271 return retval; 268 return retval;
272} 269}
273 270
274static void metrousb_shutdown(struct usb_serial *serial) 271static int metrousb_port_probe(struct usb_serial_port *port)
275{ 272{
276 int i = 0; 273 struct metrousb_private *metro_priv;
277 274
278 dev_dbg(&serial->dev->dev, "%s\n", __func__); 275 metro_priv = kzalloc(sizeof(*metro_priv), GFP_KERNEL);
276 if (!metro_priv)
277 return -ENOMEM;
279 278
280 /* Stop reading and writing on all ports. */ 279 spin_lock_init(&metro_priv->lock);
281 for (i = 0; i < serial->num_ports; ++i) {
282 /* Close any open urbs. */
283 metrousb_cleanup(serial->port[i]);
284 280
285 /* Free memory. */ 281 usb_set_serial_port_data(port, metro_priv);
286 kfree(usb_get_serial_port_data(serial->port[i]));
287 usb_set_serial_port_data(serial->port[i], NULL);
288 282
289 dev_dbg(&serial->dev->dev, "%s - freed port number=%d\n", 283 return 0;
290 __func__, serial->port[i]->number);
291 }
292} 284}
293 285
294static int metrousb_startup(struct usb_serial *serial) 286static int metrousb_port_remove(struct usb_serial_port *port)
295{ 287{
296 struct metrousb_private *metro_priv; 288 struct metrousb_private *metro_priv;
297 struct usb_serial_port *port;
298 int i = 0;
299 289
300 dev_dbg(&serial->dev->dev, "%s\n", __func__); 290 metro_priv = usb_get_serial_port_data(port);
301 291 kfree(metro_priv);
302 /* Loop through the serial ports setting up the private structures.
303 * Currently we only use one port. */
304 for (i = 0; i < serial->num_ports; ++i) {
305 port = serial->port[i];
306
307 /* Declare memory. */
308 metro_priv = kzalloc(sizeof(struct metrousb_private), GFP_KERNEL);
309 if (!metro_priv)
310 return -ENOMEM;
311
312 /* Initialize memory. */
313 spin_lock_init(&metro_priv->lock);
314 usb_set_serial_port_data(port, metro_priv);
315
316 dev_dbg(&serial->dev->dev, "%s - port number=%d\n ",
317 __func__, port->number);
318 }
319 292
320 return 0; 293 return 0;
321} 294}
@@ -414,8 +387,8 @@ static struct usb_serial_driver metrousb_device = {
414 .close = metrousb_cleanup, 387 .close = metrousb_cleanup,
415 .read_int_callback = metrousb_read_int_callback, 388 .read_int_callback = metrousb_read_int_callback,
416 .write_int_callback = metrousb_write_int_callback, 389 .write_int_callback = metrousb_write_int_callback,
417 .attach = metrousb_startup, 390 .port_probe = metrousb_port_probe,
418 .release = metrousb_shutdown, 391 .port_remove = metrousb_port_remove,
419 .throttle = metrousb_throttle, 392 .throttle = metrousb_throttle,
420 .unthrottle = metrousb_unthrottle, 393 .unthrottle = metrousb_unthrottle,
421 .tiocmget = metrousb_tiocmget, 394 .tiocmget = metrousb_tiocmget,
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 1bf1ad066666..75267421aad8 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1966,9 +1966,7 @@ static int mos7720_ioctl(struct tty_struct *tty,
1966 1966
1967static int mos7720_startup(struct usb_serial *serial) 1967static int mos7720_startup(struct usb_serial *serial)
1968{ 1968{
1969 struct moschip_port *mos7720_port;
1970 struct usb_device *dev; 1969 struct usb_device *dev;
1971 int i;
1972 char data; 1970 char data;
1973 u16 product; 1971 u16 product;
1974 int ret_val; 1972 int ret_val;
@@ -1999,29 +1997,6 @@ static int mos7720_startup(struct usb_serial *serial)
1999 serial->port[1]->interrupt_in_buffer = NULL; 1997 serial->port[1]->interrupt_in_buffer = NULL;
2000 } 1998 }
2001 1999
2002
2003 /* set up serial port private structures */
2004 for (i = 0; i < serial->num_ports; ++i) {
2005 mos7720_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL);
2006 if (mos7720_port == NULL) {
2007 dev_err(&dev->dev, "%s - Out of memory\n", __func__);
2008 return -ENOMEM;
2009 }
2010
2011 /* Initialize all port interrupt end point to port 0 int
2012 * endpoint. Our device has only one interrupt endpoint
2013 * common to all ports */
2014 serial->port[i]->interrupt_in_endpointAddress =
2015 serial->port[0]->interrupt_in_endpointAddress;
2016
2017 mos7720_port->port = serial->port[i];
2018 usb_set_serial_port_data(serial->port[i], mos7720_port);
2019
2020 dev_dbg(&dev->dev, "port number is %d\n", serial->port[i]->number);
2021 dev_dbg(&dev->dev, "serial number is %d\n", serial->minor);
2022 }
2023
2024
2025 /* setting configuration feature to one */ 2000 /* setting configuration feature to one */
2026 usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 2001 usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
2027 (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5*HZ); 2002 (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5*HZ);
@@ -2049,8 +2024,6 @@ static int mos7720_startup(struct usb_serial *serial)
2049 2024
2050static void mos7720_release(struct usb_serial *serial) 2025static void mos7720_release(struct usb_serial *serial)
2051{ 2026{
2052 int i;
2053
2054#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT 2027#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
2055 /* close the parallel port */ 2028 /* close the parallel port */
2056 2029
@@ -2089,9 +2062,36 @@ static void mos7720_release(struct usb_serial *serial)
2089 kref_put(&mos_parport->ref_count, destroy_mos_parport); 2062 kref_put(&mos_parport->ref_count, destroy_mos_parport);
2090 } 2063 }
2091#endif 2064#endif
2092 /* free private structure allocated for serial port */ 2065}
2093 for (i = 0; i < serial->num_ports; ++i) 2066
2094 kfree(usb_get_serial_port_data(serial->port[i])); 2067static int mos7720_port_probe(struct usb_serial_port *port)
2068{
2069 struct moschip_port *mos7720_port;
2070
2071 mos7720_port = kzalloc(sizeof(*mos7720_port), GFP_KERNEL);
2072 if (!mos7720_port)
2073 return -ENOMEM;
2074
2075 /* Initialize all port interrupt end point to port 0 int endpoint.
2076 * Our device has only one interrupt endpoint common to all ports.
2077 */
2078 port->interrupt_in_endpointAddress =
2079 port->serial->port[0]->interrupt_in_endpointAddress;
2080 mos7720_port->port = port;
2081
2082 usb_set_serial_port_data(port, mos7720_port);
2083
2084 return 0;
2085}
2086
2087static int mos7720_port_remove(struct usb_serial_port *port)
2088{
2089 struct moschip_port *mos7720_port;
2090
2091 mos7720_port = usb_get_serial_port_data(port);
2092 kfree(mos7720_port);
2093
2094 return 0;
2095} 2095}
2096 2096
2097static struct usb_serial_driver moschip7720_2port_driver = { 2097static struct usb_serial_driver moschip7720_2port_driver = {
@@ -2109,6 +2109,8 @@ static struct usb_serial_driver moschip7720_2port_driver = {
2109 .probe = mos77xx_probe, 2109 .probe = mos77xx_probe,
2110 .attach = mos7720_startup, 2110 .attach = mos7720_startup,
2111 .release = mos7720_release, 2111 .release = mos7720_release,
2112 .port_probe = mos7720_port_probe,
2113 .port_remove = mos7720_port_remove,
2112 .ioctl = mos7720_ioctl, 2114 .ioctl = mos7720_ioctl,
2113 .tiocmget = mos7720_tiocmget, 2115 .tiocmget = mos7720_tiocmget,
2114 .tiocmset = mos7720_tiocmset, 2116 .tiocmset = mos7720_tiocmset,
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index d6d4eeca8c68..1cf3375ec1af 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -218,12 +218,10 @@ struct moschip_port {
218 int port_num; /*Actual port number in the device(1,2,etc) */ 218 int port_num; /*Actual port number in the device(1,2,etc) */
219 struct urb *write_urb; /* write URB for this port */ 219 struct urb *write_urb; /* write URB for this port */
220 struct urb *read_urb; /* read URB for this port */ 220 struct urb *read_urb; /* read URB for this port */
221 struct urb *int_urb;
222 __u8 shadowLCR; /* last LCR value received */ 221 __u8 shadowLCR; /* last LCR value received */
223 __u8 shadowMCR; /* last MCR value received */ 222 __u8 shadowMCR; /* last MCR value received */
224 char open; 223 char open;
225 char open_ports; 224 char open_ports;
226 char zombie;
227 wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ 225 wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */
228 wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */ 226 wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */
229 int delta_msr_cond; 227 int delta_msr_cond;
@@ -478,7 +476,6 @@ static void mos7840_control_callback(struct urb *urb)
478 struct moschip_port *mos7840_port; 476 struct moschip_port *mos7840_port;
479 struct device *dev = &urb->dev->dev; 477 struct device *dev = &urb->dev->dev;
480 __u8 regval = 0x0; 478 __u8 regval = 0x0;
481 int result = 0;
482 int status = urb->status; 479 int status = urb->status;
483 480
484 mos7840_port = urb->context; 481 mos7840_port = urb->context;
@@ -495,7 +492,7 @@ static void mos7840_control_callback(struct urb *urb)
495 return; 492 return;
496 default: 493 default:
497 dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status); 494 dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status);
498 goto exit; 495 return;
499 } 496 }
500 497
501 dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length); 498 dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
@@ -508,16 +505,6 @@ static void mos7840_control_callback(struct urb *urb)
508 mos7840_handle_new_msr(mos7840_port, regval); 505 mos7840_handle_new_msr(mos7840_port, regval);
509 else if (mos7840_port->MsrLsr == 1) 506 else if (mos7840_port->MsrLsr == 1)
510 mos7840_handle_new_lsr(mos7840_port, regval); 507 mos7840_handle_new_lsr(mos7840_port, regval);
511
512exit:
513 spin_lock(&mos7840_port->pool_lock);
514 if (!mos7840_port->zombie)
515 result = usb_submit_urb(mos7840_port->int_urb, GFP_ATOMIC);
516 spin_unlock(&mos7840_port->pool_lock);
517 if (result) {
518 dev_err(dev, "%s - Error %d submitting interrupt urb\n",
519 __func__, result);
520 }
521} 508}
522 509
523static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg, 510static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
@@ -686,14 +673,7 @@ static void mos7840_interrupt_callback(struct urb *urb)
686 wreg = MODEM_STATUS_REGISTER; 673 wreg = MODEM_STATUS_REGISTER;
687 break; 674 break;
688 } 675 }
689 spin_lock(&mos7840_port->pool_lock); 676 rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data);
690 if (!mos7840_port->zombie) {
691 rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data);
692 } else {
693 spin_unlock(&mos7840_port->pool_lock);
694 return;
695 }
696 spin_unlock(&mos7840_port->pool_lock);
697 } 677 }
698 } 678 }
699 } 679 }
@@ -2347,309 +2327,249 @@ static int mos7840_calc_num_ports(struct usb_serial *serial)
2347 return mos7840_num_ports; 2327 return mos7840_num_ports;
2348} 2328}
2349 2329
2350/**************************************************************************** 2330static int mos7840_port_probe(struct usb_serial_port *port)
2351 * mos7840_startup
2352 ****************************************************************************/
2353
2354static int mos7840_startup(struct usb_serial *serial)
2355{ 2331{
2332 struct usb_serial *serial = port->serial;
2356 struct moschip_port *mos7840_port; 2333 struct moschip_port *mos7840_port;
2357 struct usb_device *dev; 2334 int status;
2358 int i, status; 2335 int pnum;
2359 __u16 Data; 2336 __u16 Data;
2360 2337
2361 dev = serial->dev;
2362
2363 /* we set up the pointers to the endpoints in the mos7840_open * 2338 /* we set up the pointers to the endpoints in the mos7840_open *
2364 * function, as the structures aren't created yet. */ 2339 * function, as the structures aren't created yet. */
2365 2340
2366 /* set up port private structures */ 2341 pnum = port->number - serial->minor;
2367 for (i = 0; i < serial->num_ports; ++i) {
2368 dev_dbg(&dev->dev, "mos7840_startup: configuring port %d............\n", i);
2369 mos7840_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL);
2370 if (mos7840_port == NULL) {
2371 dev_err(&dev->dev, "%s - Out of memory\n", __func__);
2372 status = -ENOMEM;
2373 i--; /* don't follow NULL pointer cleaning up */
2374 goto error;
2375 }
2376
2377 /* Initialize all port interrupt end point to port 0 int
2378 * endpoint. Our device has only one interrupt end point
2379 * common to all port */
2380
2381 mos7840_port->port = serial->port[i];
2382 mos7840_set_port_private(serial->port[i], mos7840_port);
2383 spin_lock_init(&mos7840_port->pool_lock);
2384
2385 /* minor is not initialised until later by
2386 * usb-serial.c:get_free_serial() and cannot therefore be used
2387 * to index device instances */
2388 mos7840_port->port_num = i + 1;
2389 dev_dbg(&dev->dev, "serial->port[i]->number = %d\n", serial->port[i]->number);
2390 dev_dbg(&dev->dev, "serial->port[i]->serial->minor = %d\n", serial->port[i]->serial->minor);
2391 dev_dbg(&dev->dev, "mos7840_port->port_num = %d\n", mos7840_port->port_num);
2392 dev_dbg(&dev->dev, "serial->minor = %d\n", serial->minor);
2393
2394 if (mos7840_port->port_num == 1) {
2395 mos7840_port->SpRegOffset = 0x0;
2396 mos7840_port->ControlRegOffset = 0x1;
2397 mos7840_port->DcrRegOffset = 0x4;
2398 } else if ((mos7840_port->port_num == 2)
2399 && (serial->num_ports == 4)) {
2400 mos7840_port->SpRegOffset = 0x8;
2401 mos7840_port->ControlRegOffset = 0x9;
2402 mos7840_port->DcrRegOffset = 0x16;
2403 } else if ((mos7840_port->port_num == 2)
2404 && (serial->num_ports == 2)) {
2405 mos7840_port->SpRegOffset = 0xa;
2406 mos7840_port->ControlRegOffset = 0xb;
2407 mos7840_port->DcrRegOffset = 0x19;
2408 } else if ((mos7840_port->port_num == 3)
2409 && (serial->num_ports == 4)) {
2410 mos7840_port->SpRegOffset = 0xa;
2411 mos7840_port->ControlRegOffset = 0xb;
2412 mos7840_port->DcrRegOffset = 0x19;
2413 } else if ((mos7840_port->port_num == 4)
2414 && (serial->num_ports == 4)) {
2415 mos7840_port->SpRegOffset = 0xc;
2416 mos7840_port->ControlRegOffset = 0xd;
2417 mos7840_port->DcrRegOffset = 0x1c;
2418 }
2419 mos7840_dump_serial_port(serial->port[i], mos7840_port);
2420 mos7840_set_port_private(serial->port[i], mos7840_port);
2421 2342
2422 /* enable rx_disable bit in control register */ 2343 dev_dbg(&port->dev, "mos7840_startup: configuring port %d\n", pnum);
2423 status = mos7840_get_reg_sync(serial->port[i], 2344 mos7840_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL);
2424 mos7840_port->ControlRegOffset, &Data); 2345 if (mos7840_port == NULL) {
2425 if (status < 0) { 2346 dev_err(&port->dev, "%s - Out of memory\n", __func__);
2426 dev_dbg(&dev->dev, "Reading ControlReg failed status-0x%x\n", status); 2347 return -ENOMEM;
2427 break; 2348 }
2428 } else
2429 dev_dbg(&dev->dev, "ControlReg Reading success val is %x, status%d\n", Data, status);
2430 Data |= 0x08; /* setting driver done bit */
2431 Data |= 0x04; /* sp1_bit to have cts change reflect in
2432 modem status reg */
2433
2434 /* Data |= 0x20; //rx_disable bit */
2435 status = mos7840_set_reg_sync(serial->port[i],
2436 mos7840_port->ControlRegOffset, Data);
2437 if (status < 0) {
2438 dev_dbg(&dev->dev, "Writing ControlReg failed(rx_disable) status-0x%x\n", status);
2439 break;
2440 } else
2441 dev_dbg(&dev->dev, "ControlReg Writing success(rx_disable) status%d\n", status);
2442 2349
2443 /* Write default values in DCR (i.e 0x01 in DCR0, 0x05 in DCR2 2350 /* Initialize all port interrupt end point to port 0 int
2444 and 0x24 in DCR3 */ 2351 * endpoint. Our device has only one interrupt end point
2445 Data = 0x01; 2352 * common to all port */
2446 status = mos7840_set_reg_sync(serial->port[i], 2353
2447 (__u16) (mos7840_port->DcrRegOffset + 0), Data); 2354 mos7840_port->port = port;
2448 if (status < 0) { 2355 mos7840_set_port_private(port, mos7840_port);
2449 dev_dbg(&dev->dev, "Writing DCR0 failed status-0x%x\n", status); 2356 spin_lock_init(&mos7840_port->pool_lock);
2450 break; 2357
2451 } else 2358 /* minor is not initialised until later by
2452 dev_dbg(&dev->dev, "DCR0 Writing success status%d\n", status); 2359 * usb-serial.c:get_free_serial() and cannot therefore be used
2360 * to index device instances */
2361 mos7840_port->port_num = pnum + 1;
2362 dev_dbg(&port->dev, "port->number = %d\n", port->number);
2363 dev_dbg(&port->dev, "port->serial->minor = %d\n", port->serial->minor);
2364 dev_dbg(&port->dev, "mos7840_port->port_num = %d\n", mos7840_port->port_num);
2365 dev_dbg(&port->dev, "serial->minor = %d\n", serial->minor);
2366
2367 if (mos7840_port->port_num == 1) {
2368 mos7840_port->SpRegOffset = 0x0;
2369 mos7840_port->ControlRegOffset = 0x1;
2370 mos7840_port->DcrRegOffset = 0x4;
2371 } else if ((mos7840_port->port_num == 2) && (serial->num_ports == 4)) {
2372 mos7840_port->SpRegOffset = 0x8;
2373 mos7840_port->ControlRegOffset = 0x9;
2374 mos7840_port->DcrRegOffset = 0x16;
2375 } else if ((mos7840_port->port_num == 2) && (serial->num_ports == 2)) {
2376 mos7840_port->SpRegOffset = 0xa;
2377 mos7840_port->ControlRegOffset = 0xb;
2378 mos7840_port->DcrRegOffset = 0x19;
2379 } else if ((mos7840_port->port_num == 3) && (serial->num_ports == 4)) {
2380 mos7840_port->SpRegOffset = 0xa;
2381 mos7840_port->ControlRegOffset = 0xb;
2382 mos7840_port->DcrRegOffset = 0x19;
2383 } else if ((mos7840_port->port_num == 4) && (serial->num_ports == 4)) {
2384 mos7840_port->SpRegOffset = 0xc;
2385 mos7840_port->ControlRegOffset = 0xd;
2386 mos7840_port->DcrRegOffset = 0x1c;
2387 }
2388 mos7840_dump_serial_port(port, mos7840_port);
2389 mos7840_set_port_private(port, mos7840_port);
2390
2391 /* enable rx_disable bit in control register */
2392 status = mos7840_get_reg_sync(port,
2393 mos7840_port->ControlRegOffset, &Data);
2394 if (status < 0) {
2395 dev_dbg(&port->dev, "Reading ControlReg failed status-0x%x\n", status);
2396 goto out;
2397 } else
2398 dev_dbg(&port->dev, "ControlReg Reading success val is %x, status%d\n", Data, status);
2399 Data |= 0x08; /* setting driver done bit */
2400 Data |= 0x04; /* sp1_bit to have cts change reflect in
2401 modem status reg */
2453 2402
2454 Data = 0x05; 2403 /* Data |= 0x20; //rx_disable bit */
2455 status = mos7840_set_reg_sync(serial->port[i], 2404 status = mos7840_set_reg_sync(port,
2456 (__u16) (mos7840_port->DcrRegOffset + 1), Data); 2405 mos7840_port->ControlRegOffset, Data);
2457 if (status < 0) { 2406 if (status < 0) {
2458 dev_dbg(&dev->dev, "Writing DCR1 failed status-0x%x\n", status); 2407 dev_dbg(&port->dev, "Writing ControlReg failed(rx_disable) status-0x%x\n", status);
2459 break; 2408 goto out;
2460 } else 2409 } else
2461 dev_dbg(&dev->dev, "DCR1 Writing success status%d\n", status); 2410 dev_dbg(&port->dev, "ControlReg Writing success(rx_disable) status%d\n", status);
2462 2411
2463 Data = 0x24; 2412 /* Write default values in DCR (i.e 0x01 in DCR0, 0x05 in DCR2
2464 status = mos7840_set_reg_sync(serial->port[i], 2413 and 0x24 in DCR3 */
2465 (__u16) (mos7840_port->DcrRegOffset + 2), Data); 2414 Data = 0x01;
2466 if (status < 0) { 2415 status = mos7840_set_reg_sync(port,
2467 dev_dbg(&dev->dev, "Writing DCR2 failed status-0x%x\n", status); 2416 (__u16) (mos7840_port->DcrRegOffset + 0), Data);
2468 break; 2417 if (status < 0) {
2469 } else 2418 dev_dbg(&port->dev, "Writing DCR0 failed status-0x%x\n", status);
2470 dev_dbg(&dev->dev, "DCR2 Writing success status%d\n", status); 2419 goto out;
2420 } else
2421 dev_dbg(&port->dev, "DCR0 Writing success status%d\n", status);
2471 2422
2472 /* write values in clkstart0x0 and clkmulti 0x20 */ 2423 Data = 0x05;
2473 Data = 0x0; 2424 status = mos7840_set_reg_sync(port,
2474 status = mos7840_set_reg_sync(serial->port[i], 2425 (__u16) (mos7840_port->DcrRegOffset + 1), Data);
2475 CLK_START_VALUE_REGISTER, Data); 2426 if (status < 0) {
2476 if (status < 0) { 2427 dev_dbg(&port->dev, "Writing DCR1 failed status-0x%x\n", status);
2477 dev_dbg(&dev->dev, "Writing CLK_START_VALUE_REGISTER failed status-0x%x\n", status); 2428 goto out;
2478 break; 2429 } else
2479 } else 2430 dev_dbg(&port->dev, "DCR1 Writing success status%d\n", status);
2480 dev_dbg(&dev->dev, "CLK_START_VALUE_REGISTER Writing success status%d\n", status);
2481 2431
2482 Data = 0x20; 2432 Data = 0x24;
2483 status = mos7840_set_reg_sync(serial->port[i], 2433 status = mos7840_set_reg_sync(port,
2484 CLK_MULTI_REGISTER, Data); 2434 (__u16) (mos7840_port->DcrRegOffset + 2), Data);
2485 if (status < 0) { 2435 if (status < 0) {
2486 dev_dbg(&dev->dev, "Writing CLK_MULTI_REGISTER failed status-0x%x\n", status); 2436 dev_dbg(&port->dev, "Writing DCR2 failed status-0x%x\n", status);
2487 goto error; 2437 goto out;
2488 } else 2438 } else
2489 dev_dbg(&dev->dev, "CLK_MULTI_REGISTER Writing success status%d\n", status); 2439 dev_dbg(&port->dev, "DCR2 Writing success status%d\n", status);
2490 2440
2491 /* write value 0x0 to scratchpad register */ 2441 /* write values in clkstart0x0 and clkmulti 0x20 */
2492 Data = 0x00; 2442 Data = 0x0;
2493 status = mos7840_set_uart_reg(serial->port[i], 2443 status = mos7840_set_reg_sync(port, CLK_START_VALUE_REGISTER, Data);
2494 SCRATCH_PAD_REGISTER, Data); 2444 if (status < 0) {
2495 if (status < 0) { 2445 dev_dbg(&port->dev, "Writing CLK_START_VALUE_REGISTER failed status-0x%x\n", status);
2496 dev_dbg(&dev->dev, "Writing SCRATCH_PAD_REGISTER failed status-0x%x\n", status); 2446 goto out;
2497 break; 2447 } else
2498 } else 2448 dev_dbg(&port->dev, "CLK_START_VALUE_REGISTER Writing success status%d\n", status);
2499 dev_dbg(&dev->dev, "SCRATCH_PAD_REGISTER Writing success status%d\n", status);
2500 2449
2501 /* Zero Length flag register */ 2450 Data = 0x20;
2502 if ((mos7840_port->port_num != 1) 2451 status = mos7840_set_reg_sync(port, CLK_MULTI_REGISTER, Data);
2503 && (serial->num_ports == 2)) { 2452 if (status < 0) {
2453 dev_dbg(&port->dev, "Writing CLK_MULTI_REGISTER failed status-0x%x\n", status);
2454 goto error;
2455 } else
2456 dev_dbg(&port->dev, "CLK_MULTI_REGISTER Writing success status%d\n", status);
2504 2457
2505 Data = 0xff; 2458 /* write value 0x0 to scratchpad register */
2506 status = mos7840_set_reg_sync(serial->port[i], 2459 Data = 0x00;
2507 (__u16) (ZLP_REG1 + 2460 status = mos7840_set_uart_reg(port, SCRATCH_PAD_REGISTER, Data);
2508 ((__u16)mos7840_port->port_num)), Data); 2461 if (status < 0) {
2509 dev_dbg(&dev->dev, "ZLIP offset %x\n", 2462 dev_dbg(&port->dev, "Writing SCRATCH_PAD_REGISTER failed status-0x%x\n", status);
2463 goto out;
2464 } else
2465 dev_dbg(&port->dev, "SCRATCH_PAD_REGISTER Writing success status%d\n", status);
2466
2467 /* Zero Length flag register */
2468 if ((mos7840_port->port_num != 1) && (serial->num_ports == 2)) {
2469 Data = 0xff;
2470 status = mos7840_set_reg_sync(port,
2471 (__u16) (ZLP_REG1 +
2472 ((__u16)mos7840_port->port_num)), Data);
2473 dev_dbg(&port->dev, "ZLIP offset %x\n",
2510 (__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num))); 2474 (__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num)));
2511 if (status < 0) { 2475 if (status < 0) {
2512 dev_dbg(&dev->dev, "Writing ZLP_REG%d failed status-0x%x\n", i + 2, status); 2476 dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 2, status);
2513 break; 2477 goto out;
2514 } else 2478 } else
2515 dev_dbg(&dev->dev, "ZLP_REG%d Writing success status%d\n", i + 2, status); 2479 dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 2, status);
2516 } else { 2480 } else {
2517 Data = 0xff; 2481 Data = 0xff;
2518 status = mos7840_set_reg_sync(serial->port[i], 2482 status = mos7840_set_reg_sync(port,
2519 (__u16) (ZLP_REG1 + 2483 (__u16) (ZLP_REG1 +
2520 ((__u16)mos7840_port->port_num) - 0x1), Data); 2484 ((__u16)mos7840_port->port_num) - 0x1), Data);
2521 dev_dbg(&dev->dev, "ZLIP offset %x\n", 2485 dev_dbg(&port->dev, "ZLIP offset %x\n",
2522 (__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num) - 0x1)); 2486 (__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num) - 0x1));
2523 if (status < 0) { 2487 if (status < 0) {
2524 dev_dbg(&dev->dev, "Writing ZLP_REG%d failed status-0x%x\n", i + 1, status); 2488 dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 1, status);
2525 break; 2489 goto out;
2526 } else 2490 } else
2527 dev_dbg(&dev->dev, "ZLP_REG%d Writing success status%d\n", i + 1, status); 2491 dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 1, status);
2528 2492
2529 } 2493 }
2530 mos7840_port->control_urb = usb_alloc_urb(0, GFP_KERNEL); 2494 mos7840_port->control_urb = usb_alloc_urb(0, GFP_KERNEL);
2531 mos7840_port->ctrl_buf = kmalloc(16, GFP_KERNEL); 2495 mos7840_port->ctrl_buf = kmalloc(16, GFP_KERNEL);
2532 mos7840_port->dr = kmalloc(sizeof(struct usb_ctrlrequest), 2496 mos7840_port->dr = kmalloc(sizeof(struct usb_ctrlrequest),
2533 GFP_KERNEL); 2497 GFP_KERNEL);
2534 if (!mos7840_port->control_urb || !mos7840_port->ctrl_buf || 2498 if (!mos7840_port->control_urb || !mos7840_port->ctrl_buf ||
2535 !mos7840_port->dr) { 2499 !mos7840_port->dr) {
2536 status = -ENOMEM; 2500 status = -ENOMEM;
2537 goto error; 2501 goto error;
2538 } 2502 }
2539 2503
2540 mos7840_port->has_led = false; 2504 mos7840_port->has_led = false;
2541 2505
2542 /* Initialize LED timers */ 2506 /* Initialize LED timers */
2543 if (device_type == MOSCHIP_DEVICE_ID_7810) { 2507 if (device_type == MOSCHIP_DEVICE_ID_7810) {
2544 mos7840_port->has_led = true; 2508 mos7840_port->has_led = true;
2545 2509
2546 init_timer(&mos7840_port->led_timer1); 2510 init_timer(&mos7840_port->led_timer1);
2547 mos7840_port->led_timer1.function = mos7840_led_off; 2511 mos7840_port->led_timer1.function = mos7840_led_off;
2548 mos7840_port->led_timer1.expires = 2512 mos7840_port->led_timer1.expires =
2549 jiffies + msecs_to_jiffies(LED_ON_MS); 2513 jiffies + msecs_to_jiffies(LED_ON_MS);
2550 mos7840_port->led_timer1.data = 2514 mos7840_port->led_timer1.data = (unsigned long)mos7840_port;
2551 (unsigned long)mos7840_port;
2552 2515
2553 init_timer(&mos7840_port->led_timer2); 2516 init_timer(&mos7840_port->led_timer2);
2554 mos7840_port->led_timer2.function = 2517 mos7840_port->led_timer2.function = mos7840_led_flag_off;
2555 mos7840_led_flag_off; 2518 mos7840_port->led_timer2.expires =
2556 mos7840_port->led_timer2.expires = 2519 jiffies + msecs_to_jiffies(LED_OFF_MS);
2557 jiffies + msecs_to_jiffies(LED_OFF_MS); 2520 mos7840_port->led_timer2.data = (unsigned long)mos7840_port;
2558 mos7840_port->led_timer2.data =
2559 (unsigned long)mos7840_port;
2560 2521
2561 mos7840_port->led_flag = false; 2522 mos7840_port->led_flag = false;
2562 2523
2563 /* Turn off LED */ 2524 /* Turn off LED */
2564 mos7840_set_led_sync(serial->port[i], 2525 mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300);
2565 MODEM_CONTROL_REGISTER, 0x0300);
2566 }
2567 } 2526 }
2527out:
2528 if (pnum == serial->num_ports - 1) {
2529 /* Zero Length flag enable */
2530 Data = 0x0f;
2531 status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, Data);
2532 if (status < 0) {
2533 dev_dbg(&port->dev, "Writing ZLP_REG5 failed status-0x%x\n", status);
2534 goto error;
2535 } else
2536 dev_dbg(&port->dev, "ZLP_REG5 Writing success status%d\n", status);
2568 2537
2569 /* Zero Length flag enable */ 2538 /* setting configuration feature to one */
2570 Data = 0x0f; 2539 usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
2571 status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, Data); 2540 0x03, 0x00, 0x01, 0x00, NULL, 0x00,
2572 if (status < 0) { 2541 MOS_WDR_TIMEOUT);
2573 dev_dbg(&dev->dev, "Writing ZLP_REG5 failed status-0x%x\n", status); 2542 }
2574 goto error;
2575 } else
2576 dev_dbg(&dev->dev, "ZLP_REG5 Writing success status%d\n", status);
2577
2578 /* setting configuration feature to one */
2579 usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
2580 (__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, MOS_WDR_TIMEOUT);
2581 return 0; 2543 return 0;
2582error: 2544error:
2583 for (/* nothing */; i >= 0; i--) { 2545 kfree(mos7840_port->dr);
2584 mos7840_port = mos7840_get_port_private(serial->port[i]); 2546 kfree(mos7840_port->ctrl_buf);
2547 usb_free_urb(mos7840_port->control_urb);
2548 kfree(mos7840_port);
2585 2549
2586 kfree(mos7840_port->dr);
2587 kfree(mos7840_port->ctrl_buf);
2588 usb_free_urb(mos7840_port->control_urb);
2589 kfree(mos7840_port);
2590 serial->port[i] = NULL;
2591 }
2592 return status; 2550 return status;
2593} 2551}
2594 2552
2595/**************************************************************************** 2553static int mos7840_port_remove(struct usb_serial_port *port)
2596 * mos7840_disconnect
2597 * This function is called whenever the device is removed from the usb bus.
2598 ****************************************************************************/
2599
2600static void mos7840_disconnect(struct usb_serial *serial)
2601{ 2554{
2602 int i;
2603 unsigned long flags;
2604 struct moschip_port *mos7840_port; 2555 struct moschip_port *mos7840_port;
2605 2556
2606 /* check for the ports to be closed,close the ports and disconnect */ 2557 mos7840_port = mos7840_get_port_private(port);
2607 2558
2608 /* free private structure allocated for serial port * 2559 if (mos7840_port->has_led) {
2609 * stop reads and writes on all ports */ 2560 /* Turn off LED */
2561 mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300);
2610 2562
2611 for (i = 0; i < serial->num_ports; ++i) { 2563 del_timer_sync(&mos7840_port->led_timer1);
2612 mos7840_port = mos7840_get_port_private(serial->port[i]); 2564 del_timer_sync(&mos7840_port->led_timer2);
2613 if (mos7840_port) {
2614 spin_lock_irqsave(&mos7840_port->pool_lock, flags);
2615 mos7840_port->zombie = 1;
2616 spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
2617 usb_kill_urb(mos7840_port->control_urb);
2618 }
2619 } 2565 }
2620} 2566 usb_kill_urb(mos7840_port->control_urb);
2621 2567 usb_free_urb(mos7840_port->control_urb);
2622/**************************************************************************** 2568 kfree(mos7840_port->ctrl_buf);
2623 * mos7840_release 2569 kfree(mos7840_port->dr);
2624 * This function is called when the usb_serial structure is freed. 2570 kfree(mos7840_port);
2625 ****************************************************************************/
2626
2627static void mos7840_release(struct usb_serial *serial)
2628{
2629 int i;
2630 struct moschip_port *mos7840_port;
2631
2632 /* check for the ports to be closed,close the ports and disconnect */
2633 2571
2634 /* free private structure allocated for serial port * 2572 return 0;
2635 * stop reads and writes on all ports */
2636
2637 for (i = 0; i < serial->num_ports; ++i) {
2638 mos7840_port = mos7840_get_port_private(serial->port[i]);
2639 if (mos7840_port) {
2640 if (mos7840_port->has_led) {
2641 /* Turn off LED */
2642 mos7840_set_led_sync(mos7840_port->port,
2643 MODEM_CONTROL_REGISTER, 0x0300);
2644
2645 del_timer_sync(&mos7840_port->led_timer1);
2646 del_timer_sync(&mos7840_port->led_timer2);
2647 }
2648 kfree(mos7840_port->ctrl_buf);
2649 kfree(mos7840_port->dr);
2650 kfree(mos7840_port);
2651 }
2652 }
2653} 2573}
2654 2574
2655static struct usb_serial_driver moschip7840_4port_device = { 2575static struct usb_serial_driver moschip7840_4port_device = {
@@ -2677,9 +2597,8 @@ static struct usb_serial_driver moschip7840_4port_device = {
2677 .tiocmget = mos7840_tiocmget, 2597 .tiocmget = mos7840_tiocmget,
2678 .tiocmset = mos7840_tiocmset, 2598 .tiocmset = mos7840_tiocmset,
2679 .get_icount = mos7840_get_icount, 2599 .get_icount = mos7840_get_icount,
2680 .attach = mos7840_startup, 2600 .port_probe = mos7840_port_probe,
2681 .disconnect = mos7840_disconnect, 2601 .port_remove = mos7840_port_remove,
2682 .release = mos7840_release,
2683 .read_bulk_callback = mos7840_bulk_in_callback, 2602 .read_bulk_callback = mos7840_bulk_in_callback,
2684 .read_int_callback = mos7840_interrupt_callback, 2603 .read_int_callback = mos7840_interrupt_callback,
2685}; 2604};
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 6def58b79382..9ab73d295774 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -44,8 +44,8 @@ static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
44 const unsigned char *buf, int count); 44 const unsigned char *buf, int count);
45static int omninet_write_room(struct tty_struct *tty); 45static int omninet_write_room(struct tty_struct *tty);
46static void omninet_disconnect(struct usb_serial *serial); 46static void omninet_disconnect(struct usb_serial *serial);
47static void omninet_release(struct usb_serial *serial); 47static int omninet_port_probe(struct usb_serial_port *port);
48static int omninet_attach(struct usb_serial *serial); 48static int omninet_port_remove(struct usb_serial_port *port);
49 49
50static const struct usb_device_id id_table[] = { 50static const struct usb_device_id id_table[] = {
51 { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) }, 51 { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) },
@@ -62,7 +62,8 @@ static struct usb_serial_driver zyxel_omninet_device = {
62 .description = "ZyXEL - omni.net lcd plus usb", 62 .description = "ZyXEL - omni.net lcd plus usb",
63 .id_table = id_table, 63 .id_table = id_table,
64 .num_ports = 1, 64 .num_ports = 1,
65 .attach = omninet_attach, 65 .port_probe = omninet_port_probe,
66 .port_remove = omninet_port_remove,
66 .open = omninet_open, 67 .open = omninet_open,
67 .close = omninet_close, 68 .close = omninet_close,
68 .write = omninet_write, 69 .write = omninet_write,
@@ -70,7 +71,6 @@ static struct usb_serial_driver zyxel_omninet_device = {
70 .read_bulk_callback = omninet_read_bulk_callback, 71 .read_bulk_callback = omninet_read_bulk_callback,
71 .write_bulk_callback = omninet_write_bulk_callback, 72 .write_bulk_callback = omninet_write_bulk_callback,
72 .disconnect = omninet_disconnect, 73 .disconnect = omninet_disconnect,
73 .release = omninet_release,
74}; 74};
75 75
76static struct usb_serial_driver * const serial_drivers[] = { 76static struct usb_serial_driver * const serial_drivers[] = {
@@ -112,18 +112,26 @@ struct omninet_data {
112 __u8 od_outseq; /* Sequence number for bulk_out URBs */ 112 __u8 od_outseq; /* Sequence number for bulk_out URBs */
113}; 113};
114 114
115static int omninet_attach(struct usb_serial *serial) 115static int omninet_port_probe(struct usb_serial_port *port)
116{ 116{
117 struct omninet_data *od; 117 struct omninet_data *od;
118 struct usb_serial_port *port = serial->port[0];
119 118
120 od = kmalloc(sizeof(struct omninet_data), GFP_KERNEL); 119 od = kmalloc(sizeof(struct omninet_data), GFP_KERNEL);
121 if (!od) { 120 if (!od)
122 dev_err(&port->dev, "%s- kmalloc(%Zd) failed.\n",
123 __func__, sizeof(struct omninet_data));
124 return -ENOMEM; 121 return -ENOMEM;
125 } 122
126 usb_set_serial_port_data(port, od); 123 usb_set_serial_port_data(port, od);
124
125 return 0;
126}
127
128static int omninet_port_remove(struct usb_serial_port *port)
129{
130 struct omninet_data *od;
131
132 od = usb_get_serial_port_data(port);
133 kfree(od);
134
127 return 0; 135 return 0;
128} 136}
129 137
@@ -279,14 +287,6 @@ static void omninet_disconnect(struct usb_serial *serial)
279 usb_kill_urb(wport->write_urb); 287 usb_kill_urb(wport->write_urb);
280} 288}
281 289
282
283static void omninet_release(struct usb_serial *serial)
284{
285 struct usb_serial_port *port = serial->port[0];
286
287 kfree(usb_get_serial_port_data(port));
288}
289
290module_usb_serial_driver(serial_drivers, id_table); 290module_usb_serial_driver(serial_drivers, id_table);
291 291
292MODULE_AUTHOR(DRIVER_AUTHOR); 292MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index 41b1647306eb..6aba731d4864 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -155,7 +155,11 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype,
155{ 155{
156 struct usb_serial *serial = port->serial; 156 struct usb_serial *serial = port->serial;
157 int retval; 157 int retval;
158 u8 buffer[2]; 158 u8 *buffer;
159
160 buffer = kzalloc(1, GFP_KERNEL);
161 if (!buffer)
162 return -ENOMEM;
159 163
160 buffer[0] = val; 164 buffer[0] = val;
161 /* Send the message to the vendor control endpoint 165 /* Send the message to the vendor control endpoint
@@ -164,6 +168,7 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype,
164 requesttype, 168 requesttype,
165 USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 169 USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
166 0, 0, buffer, 1, 0); 170 0, 0, buffer, 1, 0);
171 kfree(buffer);
167 172
168 return retval; 173 return retval;
169} 174}
@@ -281,7 +286,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
281 if (!dr) { 286 if (!dr) {
282 dev_err(&port->dev, "out of memory\n"); 287 dev_err(&port->dev, "out of memory\n");
283 count = -ENOMEM; 288 count = -ENOMEM;
284 goto error; 289 goto error_no_dr;
285 } 290 }
286 291
287 dr->bRequestType = USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT; 292 dr->bRequestType = USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT;
@@ -311,6 +316,8 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
311 316
312 return count; 317 return count;
313error: 318error:
319 kfree(dr);
320error_no_dr:
314 usb_free_urb(urb); 321 usb_free_urb(urb);
315error_no_urb: 322error_no_urb:
316 kfree(buffer); 323 kfree(buffer);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 30cff03e9f01..5dee7d61241e 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -47,6 +47,7 @@
47/* Function prototypes */ 47/* Function prototypes */
48static int option_probe(struct usb_serial *serial, 48static int option_probe(struct usb_serial *serial,
49 const struct usb_device_id *id); 49 const struct usb_device_id *id);
50static int option_attach(struct usb_serial *serial);
50static void option_release(struct usb_serial *serial); 51static void option_release(struct usb_serial *serial);
51static int option_send_setup(struct usb_serial_port *port); 52static int option_send_setup(struct usb_serial_port *port);
52static void option_instat_callback(struct urb *urb); 53static void option_instat_callback(struct urb *urb);
@@ -503,11 +504,19 @@ static const struct option_blacklist_info net_intf5_blacklist = {
503 .reserved = BIT(5), 504 .reserved = BIT(5),
504}; 505};
505 506
507static const struct option_blacklist_info net_intf6_blacklist = {
508 .reserved = BIT(6),
509};
510
506static const struct option_blacklist_info zte_mf626_blacklist = { 511static const struct option_blacklist_info zte_mf626_blacklist = {
507 .sendsetup = BIT(0) | BIT(1), 512 .sendsetup = BIT(0) | BIT(1),
508 .reserved = BIT(4), 513 .reserved = BIT(4),
509}; 514};
510 515
516static const struct option_blacklist_info zte_1255_blacklist = {
517 .reserved = BIT(3) | BIT(4),
518};
519
511static const struct usb_device_id option_ids[] = { 520static const struct usb_device_id option_ids[] = {
512 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 521 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
513 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, 522 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -853,13 +862,19 @@ static const struct usb_device_id option_ids[] = {
853 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff), 862 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff),
854 .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, 863 .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
855 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) }, 864 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
856 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) }, 865 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff),
857 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) }, 866 .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
867 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff),
868 .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
858 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) }, 869 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
859 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff) }, 870 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff),
860 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff) }, 871 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
861 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff) }, 872 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff),
862 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff) }, 873 .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
874 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff),
875 .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
876 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff),
877 .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
863 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) }, 878 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
864 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) }, 879 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) },
865 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) }, 880 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
@@ -872,7 +887,8 @@ static const struct usb_device_id option_ids[] = {
872 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) }, 887 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
873 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff), 888 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff),
874 .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, 889 .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
875 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) }, 890 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff),
891 .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
876 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) }, 892 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
877 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) }, 893 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
878 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) }, 894 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
@@ -880,13 +896,22 @@ static const struct usb_device_id option_ids[] = {
880 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) }, 896 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
881 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff), 897 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
882 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 898 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
899 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
900 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
901 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */
902 .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
903 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
904 .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
883 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff), 905 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
884 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 906 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
885 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff), 907 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
886 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 908 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
887 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff), 909 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
888 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 910 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
889 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) }, 911 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff),
912 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
913 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff),
914 .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
890 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) }, 915 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
891 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) }, 916 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
892 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) }, 917 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
@@ -1002,18 +1027,24 @@ static const struct usb_device_id option_ids[] = {
1002 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) }, 1027 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) },
1003 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) }, 1028 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
1004 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) }, 1029 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
1005 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) }, 1030 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff),
1031 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1006 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) }, 1032 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
1007 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) }, 1033 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff),
1034 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1008 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) }, 1035 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
1009 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) }, 1036 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
1010 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) }, 1037 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
1011 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) }, 1038 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
1012 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) }, 1039 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff),
1040 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1013 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) }, 1041 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
1014 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) }, 1042 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff),
1015 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) }, 1043 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1016 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) }, 1044 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff),
1045 .driver_info = (kernel_ulong_t)&zte_1255_blacklist },
1046 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff),
1047 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1017 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) }, 1048 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
1018 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) }, 1049 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
1019 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) }, 1050 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
@@ -1058,8 +1089,16 @@ static const struct usb_device_id option_ids[] = {
1058 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) }, 1089 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
1059 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) }, 1090 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
1060 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) }, 1091 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
1092 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff),
1093 .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
1061 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff), 1094 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
1062 .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, 1095 .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
1096 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff),
1097 .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
1098 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff),
1099 .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
1100 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
1101 .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
1063 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 1102 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
1064 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist }, 1103 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
1065 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, 1104 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
@@ -1071,15 +1110,21 @@ static const struct usb_device_id option_ids[] = {
1071 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, 1110 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
1072 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, 1111 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
1073 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) }, 1112 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
1074 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) }, 1113 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
1075 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff) }, 1114 .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
1076 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) }, 1115 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
1116 .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1117 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff),
1118 .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
1077 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) }, 1119 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
1078 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) }, 1120 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
1079 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) }, 1121 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff),
1122 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1080 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) }, 1123 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
1081 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) }, 1124 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff),
1082 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) }, 1125 .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1126 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
1127 .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1083 1128
1084 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, 1129 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
1085 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, 1130 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
@@ -1244,8 +1289,9 @@ static struct usb_serial_driver option_1port_device = {
1244 .tiocmget = usb_wwan_tiocmget, 1289 .tiocmget = usb_wwan_tiocmget,
1245 .tiocmset = usb_wwan_tiocmset, 1290 .tiocmset = usb_wwan_tiocmset,
1246 .ioctl = usb_wwan_ioctl, 1291 .ioctl = usb_wwan_ioctl,
1247 .attach = usb_wwan_startup, 1292 .attach = option_attach,
1248 .release = option_release, 1293 .release = option_release,
1294 .port_probe = usb_wwan_port_probe,
1249 .port_remove = usb_wwan_port_remove, 1295 .port_remove = usb_wwan_port_remove,
1250 .read_int_callback = option_instat_callback, 1296 .read_int_callback = option_instat_callback,
1251#ifdef CONFIG_PM 1297#ifdef CONFIG_PM
@@ -1291,8 +1337,6 @@ static bool is_blacklisted(const u8 ifnum, enum option_blacklist_reason reason,
1291static int option_probe(struct usb_serial *serial, 1337static int option_probe(struct usb_serial *serial,
1292 const struct usb_device_id *id) 1338 const struct usb_device_id *id)
1293{ 1339{
1294 struct usb_wwan_intf_private *data;
1295 struct option_private *priv;
1296 struct usb_interface_descriptor *iface_desc = 1340 struct usb_interface_descriptor *iface_desc =
1297 &serial->interface->cur_altsetting->desc; 1341 &serial->interface->cur_altsetting->desc;
1298 struct usb_device_descriptor *dev_desc = &serial->dev->descriptor; 1342 struct usb_device_descriptor *dev_desc = &serial->dev->descriptor;
@@ -1330,6 +1374,19 @@ static int option_probe(struct usb_serial *serial,
1330 iface_desc->bInterfaceClass != USB_CLASS_CDC_DATA) 1374 iface_desc->bInterfaceClass != USB_CLASS_CDC_DATA)
1331 return -ENODEV; 1375 return -ENODEV;
1332 1376
1377 /* Store device id so we can use it during attach. */
1378 usb_set_serial_data(serial, (void *)id);
1379
1380 return 0;
1381}
1382
1383static int option_attach(struct usb_serial *serial)
1384{
1385 struct usb_interface_descriptor *iface_desc;
1386 const struct usb_device_id *id;
1387 struct usb_wwan_intf_private *data;
1388 struct option_private *priv;
1389
1333 data = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL); 1390 data = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
1334 if (!data) 1391 if (!data)
1335 return -ENOMEM; 1392 return -ENOMEM;
@@ -1340,6 +1397,10 @@ static int option_probe(struct usb_serial *serial,
1340 return -ENOMEM; 1397 return -ENOMEM;
1341 } 1398 }
1342 1399
1400 /* Retrieve device id stored at probe. */
1401 id = usb_get_serial_data(serial);
1402 iface_desc = &serial->interface->cur_altsetting->desc;
1403
1343 priv->bInterfaceNumber = iface_desc->bInterfaceNumber; 1404 priv->bInterfaceNumber = iface_desc->bInterfaceNumber;
1344 data->private = priv; 1405 data->private = priv;
1345 1406
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index 933241f03fd8..cee9a52ca891 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -137,8 +137,8 @@ static int oti6858_chars_in_buffer(struct tty_struct *tty);
137static int oti6858_tiocmget(struct tty_struct *tty); 137static int oti6858_tiocmget(struct tty_struct *tty);
138static int oti6858_tiocmset(struct tty_struct *tty, 138static int oti6858_tiocmset(struct tty_struct *tty,
139 unsigned int set, unsigned int clear); 139 unsigned int set, unsigned int clear);
140static int oti6858_startup(struct usb_serial *serial); 140static int oti6858_port_probe(struct usb_serial_port *port);
141static void oti6858_release(struct usb_serial *serial); 141static int oti6858_port_remove(struct usb_serial_port *port);
142 142
143/* device info */ 143/* device info */
144static struct usb_serial_driver oti6858_device = { 144static struct usb_serial_driver oti6858_device = {
@@ -161,8 +161,8 @@ static struct usb_serial_driver oti6858_device = {
161 .write_bulk_callback = oti6858_write_bulk_callback, 161 .write_bulk_callback = oti6858_write_bulk_callback,
162 .write_room = oti6858_write_room, 162 .write_room = oti6858_write_room,
163 .chars_in_buffer = oti6858_chars_in_buffer, 163 .chars_in_buffer = oti6858_chars_in_buffer,
164 .attach = oti6858_startup, 164 .port_probe = oti6858_port_probe,
165 .release = oti6858_release, 165 .port_remove = oti6858_port_remove,
166}; 166};
167 167
168static struct usb_serial_driver * const serial_drivers[] = { 168static struct usb_serial_driver * const serial_drivers[] = {
@@ -331,36 +331,33 @@ static void send_data(struct work_struct *work)
331 usb_serial_port_softint(port); 331 usb_serial_port_softint(port);
332} 332}
333 333
334static int oti6858_startup(struct usb_serial *serial) 334static int oti6858_port_probe(struct usb_serial_port *port)
335{ 335{
336 struct usb_serial_port *port = serial->port[0];
337 struct oti6858_private *priv; 336 struct oti6858_private *priv;
338 int i;
339
340 for (i = 0; i < serial->num_ports; ++i) {
341 priv = kzalloc(sizeof(struct oti6858_private), GFP_KERNEL);
342 if (!priv)
343 break;
344
345 spin_lock_init(&priv->lock);
346 init_waitqueue_head(&priv->intr_wait);
347/* INIT_WORK(&priv->setup_work, setup_line, serial->port[i]); */
348/* INIT_WORK(&priv->write_work, send_data, serial->port[i]); */
349 priv->port = port;
350 INIT_DELAYED_WORK(&priv->delayed_setup_work, setup_line);
351 INIT_DELAYED_WORK(&priv->delayed_write_work, send_data);
352
353 usb_set_serial_port_data(serial->port[i], priv);
354 }
355 if (i == serial->num_ports)
356 return 0;
357 337
358 for (--i; i >= 0; --i) { 338 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
359 priv = usb_get_serial_port_data(serial->port[i]); 339 if (!priv)
360 kfree(priv); 340 return -ENOMEM;
361 usb_set_serial_port_data(serial->port[i], NULL); 341
362 } 342 spin_lock_init(&priv->lock);
363 return -ENOMEM; 343 init_waitqueue_head(&priv->intr_wait);
344 priv->port = port;
345 INIT_DELAYED_WORK(&priv->delayed_setup_work, setup_line);
346 INIT_DELAYED_WORK(&priv->delayed_write_work, send_data);
347
348 usb_set_serial_port_data(port, priv);
349
350 return 0;
351}
352
353static int oti6858_port_remove(struct usb_serial_port *port)
354{
355 struct oti6858_private *priv;
356
357 priv = usb_get_serial_port_data(port);
358 kfree(priv);
359
360 return 0;
364} 361}
365 362
366static int oti6858_write(struct tty_struct *tty, struct usb_serial_port *port, 363static int oti6858_write(struct tty_struct *tty, struct usb_serial_port *port,
@@ -709,15 +706,6 @@ static int oti6858_ioctl(struct tty_struct *tty,
709 return -ENOIOCTLCMD; 706 return -ENOIOCTLCMD;
710} 707}
711 708
712
713static void oti6858_release(struct usb_serial *serial)
714{
715 int i;
716
717 for (i = 0; i < serial->num_ports; ++i)
718 kfree(usb_get_serial_port_data(serial->port[i]));
719}
720
721static void oti6858_read_int_callback(struct urb *urb) 709static void oti6858_read_int_callback(struct urb *urb)
722{ 710{
723 struct usb_serial_port *port = urb->context; 711 struct usb_serial_port *port = urb->context;
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 892ebdc7a364..600241901361 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -133,12 +133,15 @@ enum pl2303_type {
133 HX, /* HX version of the pl2303 chip */ 133 HX, /* HX version of the pl2303 chip */
134}; 134};
135 135
136struct pl2303_serial_private {
137 enum pl2303_type type;
138};
139
136struct pl2303_private { 140struct pl2303_private {
137 spinlock_t lock; 141 spinlock_t lock;
138 wait_queue_head_t delta_msr_wait; 142 wait_queue_head_t delta_msr_wait;
139 u8 line_control; 143 u8 line_control;
140 u8 line_status; 144 u8 line_status;
141 enum pl2303_type type;
142}; 145};
143 146
144static int pl2303_vendor_read(__u16 value, __u16 index, 147static int pl2303_vendor_read(__u16 value, __u16 index,
@@ -167,14 +170,19 @@ static int pl2303_vendor_write(__u16 value, __u16 index,
167 170
168static int pl2303_startup(struct usb_serial *serial) 171static int pl2303_startup(struct usb_serial *serial)
169{ 172{
170 struct pl2303_private *priv; 173 struct pl2303_serial_private *spriv;
171 enum pl2303_type type = type_0; 174 enum pl2303_type type = type_0;
172 unsigned char *buf; 175 unsigned char *buf;
173 int i; 176
177 spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
178 if (!spriv)
179 return -ENOMEM;
174 180
175 buf = kmalloc(10, GFP_KERNEL); 181 buf = kmalloc(10, GFP_KERNEL);
176 if (buf == NULL) 182 if (!buf) {
183 kfree(spriv);
177 return -ENOMEM; 184 return -ENOMEM;
185 }
178 186
179 if (serial->dev->descriptor.bDeviceClass == 0x02) 187 if (serial->dev->descriptor.bDeviceClass == 0x02)
180 type = type_0; 188 type = type_0;
@@ -186,15 +194,8 @@ static int pl2303_startup(struct usb_serial *serial)
186 type = type_1; 194 type = type_1;
187 dev_dbg(&serial->interface->dev, "device type: %d\n", type); 195 dev_dbg(&serial->interface->dev, "device type: %d\n", type);
188 196
189 for (i = 0; i < serial->num_ports; ++i) { 197 spriv->type = type;
190 priv = kzalloc(sizeof(struct pl2303_private), GFP_KERNEL); 198 usb_set_serial_data(serial, spriv);
191 if (!priv)
192 goto cleanup;
193 spin_lock_init(&priv->lock);
194 init_waitqueue_head(&priv->delta_msr_wait);
195 priv->type = type;
196 usb_set_serial_port_data(serial->port[i], priv);
197 }
198 199
199 pl2303_vendor_read(0x8484, 0, serial, buf); 200 pl2303_vendor_read(0x8484, 0, serial, buf);
200 pl2303_vendor_write(0x0404, 0, serial); 201 pl2303_vendor_write(0x0404, 0, serial);
@@ -213,15 +214,40 @@ static int pl2303_startup(struct usb_serial *serial)
213 214
214 kfree(buf); 215 kfree(buf);
215 return 0; 216 return 0;
217}
216 218
217cleanup: 219static void pl2303_release(struct usb_serial *serial)
218 kfree(buf); 220{
219 for (--i; i >= 0; --i) { 221 struct pl2303_serial_private *spriv;
220 priv = usb_get_serial_port_data(serial->port[i]); 222
221 kfree(priv); 223 spriv = usb_get_serial_data(serial);
222 usb_set_serial_port_data(serial->port[i], NULL); 224 kfree(spriv);
223 } 225}
224 return -ENOMEM; 226
227static int pl2303_port_probe(struct usb_serial_port *port)
228{
229 struct pl2303_private *priv;
230
231 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
232 if (!priv)
233 return -ENOMEM;
234
235 spin_lock_init(&priv->lock);
236 init_waitqueue_head(&priv->delta_msr_wait);
237
238 usb_set_serial_port_data(port, priv);
239
240 return 0;
241}
242
243static int pl2303_port_remove(struct usb_serial_port *port)
244{
245 struct pl2303_private *priv;
246
247 priv = usb_get_serial_port_data(port);
248 kfree(priv);
249
250 return 0;
225} 251}
226 252
227static int set_control_lines(struct usb_device *dev, u8 value) 253static int set_control_lines(struct usb_device *dev, u8 value)
@@ -240,6 +266,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
240 struct usb_serial_port *port, struct ktermios *old_termios) 266 struct usb_serial_port *port, struct ktermios *old_termios)
241{ 267{
242 struct usb_serial *serial = port->serial; 268 struct usb_serial *serial = port->serial;
269 struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
243 struct pl2303_private *priv = usb_get_serial_port_data(port); 270 struct pl2303_private *priv = usb_get_serial_port_data(port);
244 unsigned long flags; 271 unsigned long flags;
245 unsigned int cflag; 272 unsigned int cflag;
@@ -323,7 +350,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
323 } 350 }
324 if (baud > 1228800) { 351 if (baud > 1228800) {
325 /* type_0, type_1 only support up to 1228800 baud */ 352 /* type_0, type_1 only support up to 1228800 baud */
326 if (priv->type != HX) 353 if (spriv->type != HX)
327 baud = 1228800; 354 baud = 1228800;
328 else if (baud > 6000000) 355 else if (baud > 6000000)
329 baud = 6000000; 356 baud = 6000000;
@@ -426,7 +453,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
426 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]); 453 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
427 454
428 if (cflag & CRTSCTS) { 455 if (cflag & CRTSCTS) {
429 if (priv->type == HX) 456 if (spriv->type == HX)
430 pl2303_vendor_write(0x0, 0x61, serial); 457 pl2303_vendor_write(0x0, 0x61, serial);
431 else 458 else
432 pl2303_vendor_write(0x0, 0x41, serial); 459 pl2303_vendor_write(0x0, 0x41, serial);
@@ -468,10 +495,10 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
468{ 495{
469 struct ktermios tmp_termios; 496 struct ktermios tmp_termios;
470 struct usb_serial *serial = port->serial; 497 struct usb_serial *serial = port->serial;
471 struct pl2303_private *priv = usb_get_serial_port_data(port); 498 struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
472 int result; 499 int result;
473 500
474 if (priv->type != HX) { 501 if (spriv->type != HX) {
475 usb_clear_halt(serial->dev, port->write_urb->pipe); 502 usb_clear_halt(serial->dev, port->write_urb->pipe);
476 usb_clear_halt(serial->dev, port->read_urb->pipe); 503 usb_clear_halt(serial->dev, port->read_urb->pipe);
477 } else { 504 } else {
@@ -655,17 +682,6 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
655 dev_err(&port->dev, "error sending break = %d\n", result); 682 dev_err(&port->dev, "error sending break = %d\n", result);
656} 683}
657 684
658static void pl2303_release(struct usb_serial *serial)
659{
660 int i;
661 struct pl2303_private *priv;
662
663 for (i = 0; i < serial->num_ports; ++i) {
664 priv = usb_get_serial_port_data(serial->port[i]);
665 kfree(priv);
666 }
667}
668
669static void pl2303_update_line_status(struct usb_serial_port *port, 685static void pl2303_update_line_status(struct usb_serial_port *port,
670 unsigned char *data, 686 unsigned char *data,
671 unsigned int actual_length) 687 unsigned int actual_length)
@@ -827,6 +843,8 @@ static struct usb_serial_driver pl2303_device = {
827 .read_int_callback = pl2303_read_int_callback, 843 .read_int_callback = pl2303_read_int_callback,
828 .attach = pl2303_startup, 844 .attach = pl2303_startup,
829 .release = pl2303_release, 845 .release = pl2303_release,
846 .port_probe = pl2303_port_probe,
847 .port_remove = pl2303_port_remove,
830}; 848};
831 849
832static struct usb_serial_driver * const serial_drivers[] = { 850static struct usb_serial_driver * const serial_drivers[] = {
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index c3ddb65c05f2..aa148c21ea40 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -138,7 +138,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
138 138
139static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) 139static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
140{ 140{
141 struct usb_wwan_intf_private *data;
142 struct usb_host_interface *intf = serial->interface->cur_altsetting; 141 struct usb_host_interface *intf = serial->interface->cur_altsetting;
143 struct device *dev = &serial->dev->dev; 142 struct device *dev = &serial->dev->dev;
144 int retval = -ENODEV; 143 int retval = -ENODEV;
@@ -154,13 +153,6 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
154 ifnum = intf->desc.bInterfaceNumber; 153 ifnum = intf->desc.bInterfaceNumber;
155 dev_dbg(dev, "This Interface = %d\n", ifnum); 154 dev_dbg(dev, "This Interface = %d\n", ifnum);
156 155
157 data = kzalloc(sizeof(struct usb_wwan_intf_private),
158 GFP_KERNEL);
159 if (!data)
160 return -ENOMEM;
161
162 spin_lock_init(&data->susp_lock);
163
164 if (nintf == 1) { 156 if (nintf == 1) {
165 /* QDL mode */ 157 /* QDL mode */
166 /* Gobi 2000 has a single altsetting, older ones have two */ 158 /* Gobi 2000 has a single altsetting, older ones have two */
@@ -253,20 +245,28 @@ done:
253 } 245 }
254 } 246 }
255 247
256 /* Set serial->private if not returning error */
257 if (retval == 0)
258 usb_set_serial_data(serial, data);
259 else
260 kfree(data);
261
262 return retval; 248 return retval;
263} 249}
264 250
251static int qc_attach(struct usb_serial *serial)
252{
253 struct usb_wwan_intf_private *data;
254
255 data = kzalloc(sizeof(*data), GFP_KERNEL);
256 if (!data)
257 return -ENOMEM;
258
259 spin_lock_init(&data->susp_lock);
260
261 usb_set_serial_data(serial, data);
262
263 return 0;
264}
265
265static void qc_release(struct usb_serial *serial) 266static void qc_release(struct usb_serial *serial)
266{ 267{
267 struct usb_wwan_intf_private *priv = usb_get_serial_data(serial); 268 struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
268 269
269 /* Free the private data allocated in qcprobe */
270 usb_set_serial_data(serial, NULL); 270 usb_set_serial_data(serial, NULL);
271 kfree(priv); 271 kfree(priv);
272} 272}
@@ -285,8 +285,9 @@ static struct usb_serial_driver qcdevice = {
285 .write = usb_wwan_write, 285 .write = usb_wwan_write,
286 .write_room = usb_wwan_write_room, 286 .write_room = usb_wwan_write_room,
287 .chars_in_buffer = usb_wwan_chars_in_buffer, 287 .chars_in_buffer = usb_wwan_chars_in_buffer,
288 .attach = usb_wwan_startup, 288 .attach = qc_attach,
289 .release = qc_release, 289 .release = qc_release,
290 .port_probe = usb_wwan_port_probe,
290 .port_remove = usb_wwan_port_remove, 291 .port_remove = usb_wwan_port_remove,
291#ifdef CONFIG_PM 292#ifdef CONFIG_PM
292 .suspend = usb_wwan_suspend, 293 .suspend = usb_wwan_suspend,
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 2cdfdcc90b37..ffcfc962ab10 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -143,12 +143,12 @@ static void qt2_read_bulk_callback(struct urb *urb);
143 143
144static void qt2_release(struct usb_serial *serial) 144static void qt2_release(struct usb_serial *serial)
145{ 145{
146 int i; 146 struct qt2_serial_private *serial_priv;
147 147
148 kfree(usb_get_serial_data(serial)); 148 serial_priv = usb_get_serial_data(serial);
149 149
150 for (i = 0; i < serial->num_ports; i++) 150 usb_free_urb(serial_priv->read_urb);
151 kfree(usb_get_serial_port_data(serial->port[i])); 151 kfree(serial_priv);
152} 152}
153 153
154static inline int calc_baud_divisor(int baudrate) 154static inline int calc_baud_divisor(int baudrate)
@@ -423,11 +423,16 @@ static void qt2_close(struct usb_serial_port *port)
423 port_priv->is_open = false; 423 port_priv->is_open = false;
424 424
425 spin_lock_irqsave(&port_priv->urb_lock, flags); 425 spin_lock_irqsave(&port_priv->urb_lock, flags);
426 if (port_priv->write_urb->status == -EINPROGRESS) 426 usb_kill_urb(port_priv->write_urb);
427 usb_kill_urb(port_priv->write_urb);
428 port_priv->urb_in_use = false; 427 port_priv->urb_in_use = false;
429 spin_unlock_irqrestore(&port_priv->urb_lock, flags); 428 spin_unlock_irqrestore(&port_priv->urb_lock, flags);
430 429
430 mutex_lock(&port->serial->disc_mutex);
431 if (port->serial->disconnected) {
432 mutex_unlock(&port->serial->disc_mutex);
433 return;
434 }
435
431 /* flush the port transmit buffer */ 436 /* flush the port transmit buffer */
432 i = usb_control_msg(serial->dev, 437 i = usb_control_msg(serial->dev,
433 usb_rcvctrlpipe(serial->dev, 0), 438 usb_rcvctrlpipe(serial->dev, 0),
@@ -459,26 +464,14 @@ static void qt2_close(struct usb_serial_port *port)
459 dev_err(&port->dev, "%s - close port failed %i\n", 464 dev_err(&port->dev, "%s - close port failed %i\n",
460 __func__, i); 465 __func__, i);
461 466
467 mutex_unlock(&port->serial->disc_mutex);
462} 468}
463 469
464static void qt2_disconnect(struct usb_serial *serial) 470static void qt2_disconnect(struct usb_serial *serial)
465{ 471{
466 struct qt2_serial_private *serial_priv = usb_get_serial_data(serial); 472 struct qt2_serial_private *serial_priv = usb_get_serial_data(serial);
467 struct qt2_port_private *port_priv;
468 int i;
469
470 if (serial_priv->read_urb->status == -EINPROGRESS)
471 usb_kill_urb(serial_priv->read_urb);
472
473 usb_free_urb(serial_priv->read_urb);
474 473
475 for (i = 0; i < serial->num_ports; i++) { 474 usb_kill_urb(serial_priv->read_urb);
476 port_priv = usb_get_serial_port_data(serial->port[i]);
477
478 if (port_priv->write_urb->status == -EINPROGRESS)
479 usb_kill_urb(port_priv->write_urb);
480 usb_free_urb(port_priv->write_urb);
481 }
482} 475}
483 476
484static int get_serial_info(struct usb_serial_port *port, 477static int get_serial_info(struct usb_serial_port *port,
@@ -773,11 +766,9 @@ static void qt2_read_bulk_callback(struct urb *urb)
773 766
774static int qt2_setup_urbs(struct usb_serial *serial) 767static int qt2_setup_urbs(struct usb_serial *serial)
775{ 768{
776 struct usb_serial_port *port;
777 struct usb_serial_port *port0; 769 struct usb_serial_port *port0;
778 struct qt2_serial_private *serial_priv; 770 struct qt2_serial_private *serial_priv;
779 struct qt2_port_private *port_priv; 771 int status;
780 int pcount, status;
781 772
782 port0 = serial->port[0]; 773 port0 = serial->port[0];
783 774
@@ -795,46 +786,21 @@ static int qt2_setup_urbs(struct usb_serial *serial)
795 sizeof(serial_priv->read_buffer), 786 sizeof(serial_priv->read_buffer),
796 qt2_read_bulk_callback, serial); 787 qt2_read_bulk_callback, serial);
797 788
798 /* setup write_urb for each port */
799 for (pcount = 0; pcount < serial->num_ports; pcount++) {
800
801 port = serial->port[pcount];
802 port_priv = usb_get_serial_port_data(port);
803
804 port_priv->write_urb = usb_alloc_urb(0, GFP_KERNEL);
805 if (!port_priv->write_urb) {
806 dev_err(&serial->dev->dev,
807 "failed to alloc write_urb for port %i\n",
808 pcount);
809 return -ENOMEM;
810 }
811
812 usb_fill_bulk_urb(port_priv->write_urb,
813 serial->dev,
814 usb_sndbulkpipe(serial->dev,
815 port0->
816 bulk_out_endpointAddress),
817 port_priv->write_buffer,
818 sizeof(port_priv->write_buffer),
819 qt2_write_bulk_callback, port);
820 }
821
822 status = usb_submit_urb(serial_priv->read_urb, GFP_KERNEL); 789 status = usb_submit_urb(serial_priv->read_urb, GFP_KERNEL);
823 if (status != 0) { 790 if (status != 0) {
824 dev_err(&serial->dev->dev, 791 dev_err(&serial->dev->dev,
825 "%s - submit read urb failed %i\n", __func__, status); 792 "%s - submit read urb failed %i\n", __func__, status);
793 usb_free_urb(serial_priv->read_urb);
826 return status; 794 return status;
827 } 795 }
828 796
829 return 0; 797 return 0;
830
831} 798}
832 799
833static int qt2_attach(struct usb_serial *serial) 800static int qt2_attach(struct usb_serial *serial)
834{ 801{
835 struct qt2_serial_private *serial_priv; 802 struct qt2_serial_private *serial_priv;
836 struct qt2_port_private *port_priv; 803 int status;
837 int status, pcount;
838 804
839 /* power on unit */ 805 /* power on unit */
840 status = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 806 status = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
@@ -854,26 +820,6 @@ static int qt2_attach(struct usb_serial *serial)
854 820
855 usb_set_serial_data(serial, serial_priv); 821 usb_set_serial_data(serial, serial_priv);
856 822
857 for (pcount = 0; pcount < serial->num_ports; pcount++) {
858 port_priv = kzalloc(sizeof(*port_priv), GFP_KERNEL);
859 if (!port_priv) {
860 dev_err(&serial->dev->dev,
861 "%s- kmalloc(%Zd) failed.\n", __func__,
862 sizeof(*port_priv));
863 pcount--;
864 status = -ENOMEM;
865 goto attach_failed;
866 }
867
868 spin_lock_init(&port_priv->lock);
869 spin_lock_init(&port_priv->urb_lock);
870 init_waitqueue_head(&port_priv->delta_msr_wait);
871
872 port_priv->port = serial->port[pcount];
873
874 usb_set_serial_port_data(serial->port[pcount], port_priv);
875 }
876
877 status = qt2_setup_urbs(serial); 823 status = qt2_setup_urbs(serial);
878 if (status != 0) 824 if (status != 0)
879 goto attach_failed; 825 goto attach_failed;
@@ -881,14 +827,53 @@ static int qt2_attach(struct usb_serial *serial)
881 return 0; 827 return 0;
882 828
883attach_failed: 829attach_failed:
884 for (/* empty */; pcount >= 0; pcount--) {
885 port_priv = usb_get_serial_port_data(serial->port[pcount]);
886 kfree(port_priv);
887 }
888 kfree(serial_priv); 830 kfree(serial_priv);
889 return status; 831 return status;
890} 832}
891 833
834static int qt2_port_probe(struct usb_serial_port *port)
835{
836 struct usb_serial *serial = port->serial;
837 struct qt2_port_private *port_priv;
838 u8 bEndpointAddress;
839
840 port_priv = kzalloc(sizeof(*port_priv), GFP_KERNEL);
841 if (!port_priv)
842 return -ENOMEM;
843
844 spin_lock_init(&port_priv->lock);
845 spin_lock_init(&port_priv->urb_lock);
846 init_waitqueue_head(&port_priv->delta_msr_wait);
847 port_priv->port = port;
848
849 port_priv->write_urb = usb_alloc_urb(0, GFP_KERNEL);
850 if (!port_priv->write_urb) {
851 kfree(port_priv);
852 return -ENOMEM;
853 }
854 bEndpointAddress = serial->port[0]->bulk_out_endpointAddress;
855 usb_fill_bulk_urb(port_priv->write_urb, serial->dev,
856 usb_sndbulkpipe(serial->dev, bEndpointAddress),
857 port_priv->write_buffer,
858 sizeof(port_priv->write_buffer),
859 qt2_write_bulk_callback, port);
860
861 usb_set_serial_port_data(port, port_priv);
862
863 return 0;
864}
865
866static int qt2_port_remove(struct usb_serial_port *port)
867{
868 struct qt2_port_private *port_priv;
869
870 port_priv = usb_get_serial_port_data(port);
871 usb_free_urb(port_priv->write_urb);
872 kfree(port_priv);
873
874 return 0;
875}
876
892static int qt2_tiocmget(struct tty_struct *tty) 877static int qt2_tiocmget(struct tty_struct *tty)
893{ 878{
894 struct usb_serial_port *port = tty->driver_data; 879 struct usb_serial_port *port = tty->driver_data;
@@ -1127,6 +1112,8 @@ static struct usb_serial_driver qt2_device = {
1127 .attach = qt2_attach, 1112 .attach = qt2_attach,
1128 .release = qt2_release, 1113 .release = qt2_release,
1129 .disconnect = qt2_disconnect, 1114 .disconnect = qt2_disconnect,
1115 .port_probe = qt2_port_probe,
1116 .port_remove = qt2_port_remove,
1130 .dtr_rts = qt2_dtr_rts, 1117 .dtr_rts = qt2_dtr_rts,
1131 .break_ctl = qt2_break_ctl, 1118 .break_ctl = qt2_break_ctl,
1132 .tiocmget = qt2_tiocmget, 1119 .tiocmget = qt2_tiocmget,
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 01d882cf3775..270860f6bb2a 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -161,7 +161,6 @@ static int sierra_probe(struct usb_serial *serial,
161{ 161{
162 int result = 0; 162 int result = 0;
163 struct usb_device *udev; 163 struct usb_device *udev;
164 struct sierra_intf_private *data;
165 u8 ifnum; 164 u8 ifnum;
166 165
167 udev = serial->dev; 166 udev = serial->dev;
@@ -188,11 +187,6 @@ static int sierra_probe(struct usb_serial *serial,
188 return -ENODEV; 187 return -ENODEV;
189 } 188 }
190 189
191 data = serial->private = kzalloc(sizeof(struct sierra_intf_private), GFP_KERNEL);
192 if (!data)
193 return -ENOMEM;
194 spin_lock_init(&data->susp_lock);
195
196 return result; 190 return result;
197} 191}
198 192
@@ -884,11 +878,15 @@ static void sierra_dtr_rts(struct usb_serial_port *port, int on)
884 878
885static int sierra_startup(struct usb_serial *serial) 879static int sierra_startup(struct usb_serial *serial)
886{ 880{
887 struct usb_serial_port *port; 881 struct sierra_intf_private *intfdata;
888 struct sierra_port_private *portdata; 882
889 struct sierra_iface_info *himemoryp = NULL; 883 intfdata = kzalloc(sizeof(*intfdata), GFP_KERNEL);
890 int i; 884 if (!intfdata)
891 u8 ifnum; 885 return -ENOMEM;
886
887 spin_lock_init(&intfdata->susp_lock);
888
889 usb_set_serial_data(serial, intfdata);
892 890
893 /* Set Device mode to D0 */ 891 /* Set Device mode to D0 */
894 sierra_set_power_state(serial->dev, 0x0000); 892 sierra_set_power_state(serial->dev, 0x0000);
@@ -897,68 +895,71 @@ static int sierra_startup(struct usb_serial *serial)
897 if (nmea) 895 if (nmea)
898 sierra_vsc_set_nmea(serial->dev, 1); 896 sierra_vsc_set_nmea(serial->dev, 1);
899 897
900 /* Now setup per port private data */
901 for (i = 0; i < serial->num_ports; i++) {
902 port = serial->port[i];
903 portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
904 if (!portdata) {
905 dev_dbg(&port->dev, "%s: kmalloc for "
906 "sierra_port_private (%d) failed!\n",
907 __func__, i);
908 return -ENOMEM;
909 }
910 spin_lock_init(&portdata->lock);
911 init_usb_anchor(&portdata->active);
912 init_usb_anchor(&portdata->delayed);
913 ifnum = i;
914 /* Assume low memory requirements */
915 portdata->num_out_urbs = N_OUT_URB;
916 portdata->num_in_urbs = N_IN_URB;
917
918 /* Determine actual memory requirements */
919 if (serial->num_ports == 1) {
920 /* Get interface number for composite device */
921 ifnum = sierra_calc_interface(serial);
922 himemoryp =
923 (struct sierra_iface_info *)&typeB_interface_list;
924 if (is_himemory(ifnum, himemoryp)) {
925 portdata->num_out_urbs = N_OUT_URB_HM;
926 portdata->num_in_urbs = N_IN_URB_HM;
927 }
928 }
929 else {
930 himemoryp =
931 (struct sierra_iface_info *)&typeA_interface_list;
932 if (is_himemory(i, himemoryp)) {
933 portdata->num_out_urbs = N_OUT_URB_HM;
934 portdata->num_in_urbs = N_IN_URB_HM;
935 }
936 }
937 dev_dbg(&serial->dev->dev,
938 "Memory usage (urbs) interface #%d, in=%d, out=%d\n",
939 ifnum,portdata->num_in_urbs, portdata->num_out_urbs );
940 /* Set the port private data pointer */
941 usb_set_serial_port_data(port, portdata);
942 }
943
944 return 0; 898 return 0;
945} 899}
946 900
947static void sierra_release(struct usb_serial *serial) 901static void sierra_release(struct usb_serial *serial)
948{ 902{
949 int i; 903 struct sierra_intf_private *intfdata;
950 struct usb_serial_port *port; 904
905 intfdata = usb_get_serial_data(serial);
906 kfree(intfdata);
907}
908
909static int sierra_port_probe(struct usb_serial_port *port)
910{
911 struct usb_serial *serial = port->serial;
951 struct sierra_port_private *portdata; 912 struct sierra_port_private *portdata;
913 const struct sierra_iface_info *himemoryp;
914 u8 ifnum;
952 915
953 for (i = 0; i < serial->num_ports; ++i) { 916 portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
954 port = serial->port[i]; 917 if (!portdata)
955 if (!port) 918 return -ENOMEM;
956 continue; 919
957 portdata = usb_get_serial_port_data(port); 920 spin_lock_init(&portdata->lock);
958 if (!portdata) 921 init_usb_anchor(&portdata->active);
959 continue; 922 init_usb_anchor(&portdata->delayed);
960 kfree(portdata); 923
924 /* Assume low memory requirements */
925 portdata->num_out_urbs = N_OUT_URB;
926 portdata->num_in_urbs = N_IN_URB;
927
928 /* Determine actual memory requirements */
929 if (serial->num_ports == 1) {
930 /* Get interface number for composite device */
931 ifnum = sierra_calc_interface(serial);
932 himemoryp = &typeB_interface_list;
933 } else {
934 /* This is really the usb-serial port number of the interface
935 * rather than the interface number.
936 */
937 ifnum = port->number - serial->minor;
938 himemoryp = &typeA_interface_list;
961 } 939 }
940
941 if (is_himemory(ifnum, himemoryp)) {
942 portdata->num_out_urbs = N_OUT_URB_HM;
943 portdata->num_in_urbs = N_IN_URB_HM;
944 }
945
946 dev_dbg(&port->dev,
947 "Memory usage (urbs) interface #%d, in=%d, out=%d\n",
948 ifnum, portdata->num_in_urbs, portdata->num_out_urbs);
949
950 usb_set_serial_port_data(port, portdata);
951
952 return 0;
953}
954
955static int sierra_port_remove(struct usb_serial_port *port)
956{
957 struct sierra_port_private *portdata;
958
959 portdata = usb_get_serial_port_data(port);
960 kfree(portdata);
961
962 return 0;
962} 963}
963 964
964#ifdef CONFIG_PM 965#ifdef CONFIG_PM
@@ -1062,6 +1063,8 @@ static struct usb_serial_driver sierra_device = {
1062 .tiocmset = sierra_tiocmset, 1063 .tiocmset = sierra_tiocmset,
1063 .attach = sierra_startup, 1064 .attach = sierra_startup,
1064 .release = sierra_release, 1065 .release = sierra_release,
1066 .port_probe = sierra_port_probe,
1067 .port_remove = sierra_port_remove,
1065 .suspend = sierra_suspend, 1068 .suspend = sierra_suspend,
1066 .resume = sierra_resume, 1069 .resume = sierra_resume,
1067 .read_int_callback = sierra_instat_callback, 1070 .read_int_callback = sierra_instat_callback,
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 9716efe92955..769c137f8975 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -157,13 +157,10 @@ struct spcp8x5_private {
157 u8 line_status; 157 u8 line_status;
158}; 158};
159 159
160/* desc : when device plug in,this function would be called. 160static int spcp8x5_port_probe(struct usb_serial_port *port)
161 * thanks to usb_serial subsystem,then do almost every things for us. And what
162 * we should do just alloc the buffer */
163static int spcp8x5_startup(struct usb_serial *serial)
164{ 161{
162 struct usb_serial *serial = port->serial;
165 struct spcp8x5_private *priv; 163 struct spcp8x5_private *priv;
166 int i;
167 enum spcp8x5_type type = SPCP825_007_TYPE; 164 enum spcp8x5_type type = SPCP825_007_TYPE;
168 u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); 165 u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
169 166
@@ -180,34 +177,27 @@ static int spcp8x5_startup(struct usb_serial *serial)
180 type = SPCP825_PHILIP_TYPE; 177 type = SPCP825_PHILIP_TYPE;
181 dev_dbg(&serial->dev->dev, "device type = %d\n", (int)type); 178 dev_dbg(&serial->dev->dev, "device type = %d\n", (int)type);
182 179
183 for (i = 0; i < serial->num_ports; ++i) { 180 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
184 priv = kzalloc(sizeof(struct spcp8x5_private), GFP_KERNEL); 181 if (!priv)
185 if (!priv) 182 return -ENOMEM;
186 goto cleanup;
187 183
188 spin_lock_init(&priv->lock); 184 spin_lock_init(&priv->lock);
189 init_waitqueue_head(&priv->delta_msr_wait); 185 init_waitqueue_head(&priv->delta_msr_wait);
190 priv->type = type; 186 priv->type = type;
191 usb_set_serial_port_data(serial->port[i] , priv); 187
192 } 188 usb_set_serial_port_data(port , priv);
193 189
194 return 0; 190 return 0;
195cleanup:
196 for (--i; i >= 0; --i) {
197 priv = usb_get_serial_port_data(serial->port[i]);
198 kfree(priv);
199 usb_set_serial_port_data(serial->port[i] , NULL);
200 }
201 return -ENOMEM;
202} 191}
203 192
204/* call when the device plug out. free all the memory alloced by probe */ 193static int spcp8x5_port_remove(struct usb_serial_port *port)
205static void spcp8x5_release(struct usb_serial *serial)
206{ 194{
207 int i; 195 struct spcp8x5_private *priv;
208 196
209 for (i = 0; i < serial->num_ports; i++) 197 priv = usb_get_serial_port_data(port);
210 kfree(usb_get_serial_port_data(serial->port[i])); 198 kfree(priv);
199
200 return 0;
211} 201}
212 202
213/* set the modem control line of the device. 203/* set the modem control line of the device.
@@ -649,8 +639,8 @@ static struct usb_serial_driver spcp8x5_device = {
649 .ioctl = spcp8x5_ioctl, 639 .ioctl = spcp8x5_ioctl,
650 .tiocmget = spcp8x5_tiocmget, 640 .tiocmget = spcp8x5_tiocmget,
651 .tiocmset = spcp8x5_tiocmset, 641 .tiocmset = spcp8x5_tiocmset,
652 .attach = spcp8x5_startup, 642 .port_probe = spcp8x5_port_probe,
653 .release = spcp8x5_release, 643 .port_remove = spcp8x5_port_remove,
654 .process_read_urb = spcp8x5_process_read_urb, 644 .process_read_urb = spcp8x5_process_read_urb,
655}; 645};
656 646
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 015810b3785b..868d1e6852e2 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -67,13 +67,6 @@ struct ssu100_port_private {
67 struct async_icount icount; 67 struct async_icount icount;
68}; 68};
69 69
70static void ssu100_release(struct usb_serial *serial)
71{
72 struct ssu100_port_private *priv = usb_get_serial_port_data(*serial->port);
73
74 kfree(priv);
75}
76
77static inline int ssu100_control_msg(struct usb_device *dev, 70static inline int ssu100_control_msg(struct usb_device *dev,
78 u8 request, u16 data, u16 index) 71 u8 request, u16 data, u16 index)
79{ 72{
@@ -442,21 +435,33 @@ static int ssu100_ioctl(struct tty_struct *tty,
442 435
443static int ssu100_attach(struct usb_serial *serial) 436static int ssu100_attach(struct usb_serial *serial)
444{ 437{
438 return ssu100_initdevice(serial->dev);
439}
440
441static int ssu100_port_probe(struct usb_serial_port *port)
442{
445 struct ssu100_port_private *priv; 443 struct ssu100_port_private *priv;
446 struct usb_serial_port *port = *serial->port;
447 444
448 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 445 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
449 if (!priv) { 446 if (!priv)
450 dev_err(&port->dev, "%s- kmalloc(%Zd) failed.\n", __func__,
451 sizeof(*priv));
452 return -ENOMEM; 447 return -ENOMEM;
453 }
454 448
455 spin_lock_init(&priv->status_lock); 449 spin_lock_init(&priv->status_lock);
456 init_waitqueue_head(&priv->delta_msr_wait); 450 init_waitqueue_head(&priv->delta_msr_wait);
451
457 usb_set_serial_port_data(port, priv); 452 usb_set_serial_port_data(port, priv);
458 453
459 return ssu100_initdevice(serial->dev); 454 return 0;
455}
456
457static int ssu100_port_remove(struct usb_serial_port *port)
458{
459 struct ssu100_port_private *priv;
460
461 priv = usb_get_serial_port_data(port);
462 kfree(priv);
463
464 return 0;
460} 465}
461 466
462static int ssu100_tiocmget(struct tty_struct *tty) 467static int ssu100_tiocmget(struct tty_struct *tty)
@@ -647,7 +652,8 @@ static struct usb_serial_driver ssu100_device = {
647 .open = ssu100_open, 652 .open = ssu100_open,
648 .close = ssu100_close, 653 .close = ssu100_close,
649 .attach = ssu100_attach, 654 .attach = ssu100_attach,
650 .release = ssu100_release, 655 .port_probe = ssu100_port_probe,
656 .port_remove = ssu100_port_remove,
651 .dtr_rts = ssu100_dtr_rts, 657 .dtr_rts = ssu100_dtr_rts,
652 .process_read_urb = ssu100_process_read_urb, 658 .process_read_urb = ssu100_process_read_urb,
653 .tiocmget = ssu100_tiocmget, 659 .tiocmget = ssu100_tiocmget,
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 6f49392cda5b..f2530d2ef3c4 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -97,6 +97,8 @@ struct ti_device {
97 97
98static int ti_startup(struct usb_serial *serial); 98static int ti_startup(struct usb_serial *serial);
99static void ti_release(struct usb_serial *serial); 99static void ti_release(struct usb_serial *serial);
100static int ti_port_probe(struct usb_serial_port *port);
101static int ti_port_remove(struct usb_serial_port *port);
100static int ti_open(struct tty_struct *tty, struct usb_serial_port *port); 102static int ti_open(struct tty_struct *tty, struct usb_serial_port *port);
101static void ti_close(struct usb_serial_port *port); 103static void ti_close(struct usb_serial_port *port);
102static int ti_write(struct tty_struct *tty, struct usb_serial_port *port, 104static int ti_write(struct tty_struct *tty, struct usb_serial_port *port,
@@ -221,6 +223,8 @@ static struct usb_serial_driver ti_1port_device = {
221 .num_ports = 1, 223 .num_ports = 1,
222 .attach = ti_startup, 224 .attach = ti_startup,
223 .release = ti_release, 225 .release = ti_release,
226 .port_probe = ti_port_probe,
227 .port_remove = ti_port_remove,
224 .open = ti_open, 228 .open = ti_open,
225 .close = ti_close, 229 .close = ti_close,
226 .write = ti_write, 230 .write = ti_write,
@@ -249,6 +253,8 @@ static struct usb_serial_driver ti_2port_device = {
249 .num_ports = 2, 253 .num_ports = 2,
250 .attach = ti_startup, 254 .attach = ti_startup,
251 .release = ti_release, 255 .release = ti_release,
256 .port_probe = ti_port_probe,
257 .port_remove = ti_port_remove,
252 .open = ti_open, 258 .open = ti_open,
253 .close = ti_close, 259 .close = ti_close,
254 .write = ti_write, 260 .write = ti_write,
@@ -347,11 +353,8 @@ module_exit(ti_exit);
347static int ti_startup(struct usb_serial *serial) 353static int ti_startup(struct usb_serial *serial)
348{ 354{
349 struct ti_device *tdev; 355 struct ti_device *tdev;
350 struct ti_port *tport;
351 struct usb_device *dev = serial->dev; 356 struct usb_device *dev = serial->dev;
352 int status; 357 int status;
353 int i;
354
355 358
356 dev_dbg(&dev->dev, 359 dev_dbg(&dev->dev,
357 "%s - product 0x%4X, num configurations %d, configuration value %d", 360 "%s - product 0x%4X, num configurations %d, configuration value %d",
@@ -399,42 +402,8 @@ static int ti_startup(struct usb_serial *serial)
399 goto free_tdev; 402 goto free_tdev;
400 } 403 }
401 404
402 /* set up port structures */
403 for (i = 0; i < serial->num_ports; ++i) {
404 tport = kzalloc(sizeof(struct ti_port), GFP_KERNEL);
405 if (tport == NULL) {
406 dev_err(&dev->dev, "%s - out of memory\n", __func__);
407 status = -ENOMEM;
408 goto free_tports;
409 }
410 spin_lock_init(&tport->tp_lock);
411 tport->tp_uart_base_addr = (i == 0 ?
412 TI_UART1_BASE_ADDR : TI_UART2_BASE_ADDR);
413 tport->tp_closing_wait = closing_wait;
414 init_waitqueue_head(&tport->tp_msr_wait);
415 init_waitqueue_head(&tport->tp_write_wait);
416 if (kfifo_alloc(&tport->write_fifo, TI_WRITE_BUF_SIZE,
417 GFP_KERNEL)) {
418 dev_err(&dev->dev, "%s - out of memory\n", __func__);
419 kfree(tport);
420 status = -ENOMEM;
421 goto free_tports;
422 }
423 tport->tp_port = serial->port[i];
424 tport->tp_tdev = tdev;
425 usb_set_serial_port_data(serial->port[i], tport);
426 tport->tp_uart_mode = 0; /* default is RS232 */
427 }
428
429 return 0; 405 return 0;
430 406
431free_tports:
432 for (--i; i >= 0; --i) {
433 tport = usb_get_serial_port_data(serial->port[i]);
434 kfifo_free(&tport->write_fifo);
435 kfree(tport);
436 usb_set_serial_port_data(serial->port[i], NULL);
437 }
438free_tdev: 407free_tdev:
439 kfree(tdev); 408 kfree(tdev);
440 usb_set_serial_data(serial, NULL); 409 usb_set_serial_data(serial, NULL);
@@ -444,21 +413,50 @@ free_tdev:
444 413
445static void ti_release(struct usb_serial *serial) 414static void ti_release(struct usb_serial *serial)
446{ 415{
447 int i;
448 struct ti_device *tdev = usb_get_serial_data(serial); 416 struct ti_device *tdev = usb_get_serial_data(serial);
417
418 kfree(tdev);
419}
420
421static int ti_port_probe(struct usb_serial_port *port)
422{
449 struct ti_port *tport; 423 struct ti_port *tport;
450 424
451 for (i = 0; i < serial->num_ports; ++i) { 425 tport = kzalloc(sizeof(*tport), GFP_KERNEL);
452 tport = usb_get_serial_port_data(serial->port[i]); 426 if (!tport)
453 if (tport) { 427 return -ENOMEM;
454 kfifo_free(&tport->write_fifo); 428
455 kfree(tport); 429 spin_lock_init(&tport->tp_lock);
456 } 430 if (port == port->serial->port[0])
431 tport->tp_uart_base_addr = TI_UART1_BASE_ADDR;
432 else
433 tport->tp_uart_base_addr = TI_UART2_BASE_ADDR;
434 tport->tp_closing_wait = closing_wait;
435 init_waitqueue_head(&tport->tp_msr_wait);
436 init_waitqueue_head(&tport->tp_write_wait);
437 if (kfifo_alloc(&tport->write_fifo, TI_WRITE_BUF_SIZE, GFP_KERNEL)) {
438 kfree(tport);
439 return -ENOMEM;
457 } 440 }
441 tport->tp_port = port;
442 tport->tp_tdev = usb_get_serial_data(port->serial);
443 tport->tp_uart_mode = 0; /* default is RS232 */
458 444
459 kfree(tdev); 445 usb_set_serial_port_data(port, tport);
446
447 return 0;
460} 448}
461 449
450static int ti_port_remove(struct usb_serial_port *port)
451{
452 struct ti_port *tport;
453
454 tport = usb_get_serial_port_data(port);
455 kfifo_free(&tport->write_fifo);
456 kfree(tport);
457
458 return 0;
459}
462 460
463static int ti_open(struct tty_struct *tty, struct usb_serial_port *port) 461static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
464{ 462{
diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h
index 1f034d2397c6..684739b8efd0 100644
--- a/drivers/usb/serial/usb-wwan.h
+++ b/drivers/usb/serial/usb-wwan.h
@@ -8,7 +8,7 @@
8extern void usb_wwan_dtr_rts(struct usb_serial_port *port, int on); 8extern void usb_wwan_dtr_rts(struct usb_serial_port *port, int on);
9extern int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port); 9extern int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port);
10extern void usb_wwan_close(struct usb_serial_port *port); 10extern void usb_wwan_close(struct usb_serial_port *port);
11extern int usb_wwan_startup(struct usb_serial *serial); 11extern int usb_wwan_port_probe(struct usb_serial_port *port);
12extern int usb_wwan_port_remove(struct usb_serial_port *port); 12extern int usb_wwan_port_remove(struct usb_serial_port *port);
13extern int usb_wwan_write_room(struct tty_struct *tty); 13extern int usb_wwan_write_room(struct tty_struct *tty);
14extern void usb_wwan_set_termios(struct tty_struct *tty, 14extern void usb_wwan_set_termios(struct tty_struct *tty,
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index e42aa398ed37..61a73ad1a187 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -447,10 +447,12 @@ void usb_wwan_close(struct usb_serial_port *port)
447EXPORT_SYMBOL(usb_wwan_close); 447EXPORT_SYMBOL(usb_wwan_close);
448 448
449/* Helper functions used by usb_wwan_setup_urbs */ 449/* Helper functions used by usb_wwan_setup_urbs */
450static struct urb *usb_wwan_setup_urb(struct usb_serial *serial, int endpoint, 450static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port,
451 int endpoint,
451 int dir, void *ctx, char *buf, int len, 452 int dir, void *ctx, char *buf, int len,
452 void (*callback) (struct urb *)) 453 void (*callback) (struct urb *))
453{ 454{
455 struct usb_serial *serial = port->serial;
454 struct urb *urb; 456 struct urb *urb;
455 457
456 if (endpoint == -1) 458 if (endpoint == -1)
@@ -472,101 +474,75 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial *serial, int endpoint,
472 return urb; 474 return urb;
473} 475}
474 476
475/* Setup urbs */ 477int usb_wwan_port_probe(struct usb_serial_port *port)
476static void usb_wwan_setup_urbs(struct usb_serial *serial)
477{ 478{
478 int i, j;
479 struct usb_serial_port *port;
480 struct usb_wwan_port_private *portdata; 479 struct usb_wwan_port_private *portdata;
480 struct urb *urb;
481 u8 *buffer;
482 int err;
483 int i;
481 484
482 for (i = 0; i < serial->num_ports; i++) { 485 portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
483 port = serial->port[i]; 486 if (!portdata)
484 portdata = usb_get_serial_port_data(port); 487 return -ENOMEM;
485 488
486 /* Do indat endpoints first */ 489 init_usb_anchor(&portdata->delayed);
487 for (j = 0; j < N_IN_URB; ++j) {
488 portdata->in_urbs[j] = usb_wwan_setup_urb(serial,
489 port->
490 bulk_in_endpointAddress,
491 USB_DIR_IN,
492 port,
493 portdata->
494 in_buffer[j],
495 IN_BUFLEN,
496 usb_wwan_indat_callback);
497 }
498 490
499 /* outdat endpoints */ 491 for (i = 0; i < N_IN_URB; i++) {
500 for (j = 0; j < N_OUT_URB; ++j) { 492 buffer = (u8 *)__get_free_page(GFP_KERNEL);
501 portdata->out_urbs[j] = usb_wwan_setup_urb(serial, 493 if (!buffer)
502 port-> 494 goto bail_out_error;
503 bulk_out_endpointAddress, 495 portdata->in_buffer[i] = buffer;
504 USB_DIR_OUT, 496
505 port, 497 urb = usb_wwan_setup_urb(port, port->bulk_in_endpointAddress,
506 portdata-> 498 USB_DIR_IN, port,
507 out_buffer 499 buffer, IN_BUFLEN,
508 [j], 500 usb_wwan_indat_callback);
509 OUT_BUFLEN, 501 portdata->in_urbs[i] = urb;
510 usb_wwan_outdat_callback);
511 }
512 } 502 }
513}
514
515int usb_wwan_startup(struct usb_serial *serial)
516{
517 int i, j, err;
518 struct usb_serial_port *port;
519 struct usb_wwan_port_private *portdata;
520 u8 *buffer;
521 503
522 /* Now setup per port private data */ 504 for (i = 0; i < N_OUT_URB; i++) {
523 for (i = 0; i < serial->num_ports; i++) { 505 if (port->bulk_out_endpointAddress == -1)
524 port = serial->port[i]; 506 continue;
525 portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
526 if (!portdata) {
527 dev_dbg(&port->dev, "%s: kmalloc for usb_wwan_port_private (%d) failed!.\n",
528 __func__, i);
529 return 1;
530 }
531 init_usb_anchor(&portdata->delayed);
532 507
533 for (j = 0; j < N_IN_URB; j++) { 508 buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL);
534 buffer = (u8 *) __get_free_page(GFP_KERNEL); 509 if (!buffer)
535 if (!buffer) 510 goto bail_out_error2;
536 goto bail_out_error; 511 portdata->out_buffer[i] = buffer;
537 portdata->in_buffer[j] = buffer;
538 }
539 512
540 for (j = 0; j < N_OUT_URB; j++) { 513 urb = usb_wwan_setup_urb(port, port->bulk_out_endpointAddress,
541 buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL); 514 USB_DIR_OUT, port,
542 if (!buffer) 515 buffer, OUT_BUFLEN,
543 goto bail_out_error2; 516 usb_wwan_outdat_callback);
544 portdata->out_buffer[j] = buffer; 517 portdata->out_urbs[i] = urb;
545 } 518 }
546 519
547 usb_set_serial_port_data(port, portdata); 520 usb_set_serial_port_data(port, portdata);
548 521
549 if (!port->interrupt_in_urb) 522 if (port->interrupt_in_urb) {
550 continue;
551 err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); 523 err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
552 if (err) 524 if (err)
553 dev_dbg(&port->dev, "%s: submit irq_in urb failed %d\n", 525 dev_dbg(&port->dev, "%s: submit irq_in urb failed %d\n",
554 __func__, err); 526 __func__, err);
555 } 527 }
556 usb_wwan_setup_urbs(serial); 528
557 return 0; 529 return 0;
558 530
559bail_out_error2: 531bail_out_error2:
560 for (j = 0; j < N_OUT_URB; j++) 532 for (i = 0; i < N_OUT_URB; i++) {
561 kfree(portdata->out_buffer[j]); 533 usb_free_urb(portdata->out_urbs[i]);
534 kfree(portdata->out_buffer[i]);
535 }
562bail_out_error: 536bail_out_error:
563 for (j = 0; j < N_IN_URB; j++) 537 for (i = 0; i < N_IN_URB; i++) {
564 if (portdata->in_buffer[j]) 538 usb_free_urb(portdata->in_urbs[i]);
565 free_page((unsigned long)portdata->in_buffer[j]); 539 free_page((unsigned long)portdata->in_buffer[i]);
540 }
566 kfree(portdata); 541 kfree(portdata);
567 return 1; 542
543 return -ENOMEM;
568} 544}
569EXPORT_SYMBOL(usb_wwan_startup); 545EXPORT_SYMBOL_GPL(usb_wwan_port_probe);
570 546
571int usb_wwan_port_remove(struct usb_serial_port *port) 547int usb_wwan_port_remove(struct usb_serial_port *port)
572{ 548{
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 346c7efc20b0..b9fca3586d74 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -83,6 +83,8 @@ static int whiteheat_firmware_attach(struct usb_serial *serial);
83/* function prototypes for the Connect Tech WhiteHEAT serial converter */ 83/* function prototypes for the Connect Tech WhiteHEAT serial converter */
84static int whiteheat_attach(struct usb_serial *serial); 84static int whiteheat_attach(struct usb_serial *serial);
85static void whiteheat_release(struct usb_serial *serial); 85static void whiteheat_release(struct usb_serial *serial);
86static int whiteheat_port_probe(struct usb_serial_port *port);
87static int whiteheat_port_remove(struct usb_serial_port *port);
86static int whiteheat_open(struct tty_struct *tty, 88static int whiteheat_open(struct tty_struct *tty,
87 struct usb_serial_port *port); 89 struct usb_serial_port *port);
88static void whiteheat_close(struct usb_serial_port *port); 90static void whiteheat_close(struct usb_serial_port *port);
@@ -117,6 +119,8 @@ static struct usb_serial_driver whiteheat_device = {
117 .num_ports = 4, 119 .num_ports = 4,
118 .attach = whiteheat_attach, 120 .attach = whiteheat_attach,
119 .release = whiteheat_release, 121 .release = whiteheat_release,
122 .port_probe = whiteheat_port_probe,
123 .port_remove = whiteheat_port_remove,
120 .open = whiteheat_open, 124 .open = whiteheat_open,
121 .close = whiteheat_close, 125 .close = whiteheat_close,
122 .ioctl = whiteheat_ioctl, 126 .ioctl = whiteheat_ioctl,
@@ -218,15 +222,12 @@ static int whiteheat_attach(struct usb_serial *serial)
218{ 222{
219 struct usb_serial_port *command_port; 223 struct usb_serial_port *command_port;
220 struct whiteheat_command_private *command_info; 224 struct whiteheat_command_private *command_info;
221 struct usb_serial_port *port;
222 struct whiteheat_private *info;
223 struct whiteheat_hw_info *hw_info; 225 struct whiteheat_hw_info *hw_info;
224 int pipe; 226 int pipe;
225 int ret; 227 int ret;
226 int alen; 228 int alen;
227 __u8 *command; 229 __u8 *command;
228 __u8 *result; 230 __u8 *result;
229 int i;
230 231
231 command_port = serial->port[COMMAND_PORT]; 232 command_port = serial->port[COMMAND_PORT];
232 233
@@ -285,22 +286,6 @@ static int whiteheat_attach(struct usb_serial *serial)
285 serial->type->description, 286 serial->type->description,
286 hw_info->sw_major_rev, hw_info->sw_minor_rev); 287 hw_info->sw_major_rev, hw_info->sw_minor_rev);
287 288
288 for (i = 0; i < serial->num_ports; i++) {
289 port = serial->port[i];
290
291 info = kmalloc(sizeof(struct whiteheat_private), GFP_KERNEL);
292 if (info == NULL) {
293 dev_err(&port->dev,
294 "%s: Out of memory for port structures\n",
295 serial->type->description);
296 goto no_private;
297 }
298
299 info->mcr = 0;
300
301 usb_set_serial_port_data(port, info);
302 }
303
304 command_info = kmalloc(sizeof(struct whiteheat_command_private), 289 command_info = kmalloc(sizeof(struct whiteheat_command_private),
305 GFP_KERNEL); 290 GFP_KERNEL);
306 if (command_info == NULL) { 291 if (command_info == NULL) {
@@ -333,16 +318,10 @@ no_firmware:
333 "%s: please contact support@connecttech.com\n", 318 "%s: please contact support@connecttech.com\n",
334 serial->type->description); 319 serial->type->description);
335 kfree(result); 320 kfree(result);
321 kfree(command);
336 return -ENODEV; 322 return -ENODEV;
337 323
338no_command_private: 324no_command_private:
339 for (i = serial->num_ports - 1; i >= 0; i--) {
340 port = serial->port[i];
341 info = usb_get_serial_port_data(port);
342 kfree(info);
343no_private:
344 ;
345 }
346 kfree(result); 325 kfree(result);
347no_result_buffer: 326no_result_buffer:
348 kfree(command); 327 kfree(command);
@@ -350,21 +329,36 @@ no_command_buffer:
350 return -ENOMEM; 329 return -ENOMEM;
351} 330}
352 331
353
354static void whiteheat_release(struct usb_serial *serial) 332static void whiteheat_release(struct usb_serial *serial)
355{ 333{
356 struct usb_serial_port *command_port; 334 struct usb_serial_port *command_port;
357 struct whiteheat_private *info;
358 int i;
359 335
360 /* free up our private data for our command port */ 336 /* free up our private data for our command port */
361 command_port = serial->port[COMMAND_PORT]; 337 command_port = serial->port[COMMAND_PORT];
362 kfree(usb_get_serial_port_data(command_port)); 338 kfree(usb_get_serial_port_data(command_port));
339}
363 340
364 for (i = 0; i < serial->num_ports; i++) { 341static int whiteheat_port_probe(struct usb_serial_port *port)
365 info = usb_get_serial_port_data(serial->port[i]); 342{
366 kfree(info); 343 struct whiteheat_private *info;
367 } 344
345 info = kzalloc(sizeof(*info), GFP_KERNEL);
346 if (!info)
347 return -ENOMEM;
348
349 usb_set_serial_port_data(port, info);
350
351 return 0;
352}
353
354static int whiteheat_port_remove(struct usb_serial_port *port)
355{
356 struct whiteheat_private *info;
357
358 info = usb_get_serial_port_data(port);
359 kfree(info);
360
361 return 0;
368} 362}
369 363
370static int whiteheat_open(struct tty_struct *tty, struct usb_serial_port *port) 364static int whiteheat_open(struct tty_struct *tty, struct usb_serial_port *port)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 779cd954abcb..d305a5aa3a5d 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1004,6 +1004,12 @@ UNUSUAL_DEV( 0x07cf, 0x1001, 0x1000, 0x9999,
1004 USB_SC_8070, USB_PR_CB, NULL, 1004 USB_SC_8070, USB_PR_CB, NULL,
1005 US_FL_NEED_OVERRIDE | US_FL_FIX_INQUIRY ), 1005 US_FL_NEED_OVERRIDE | US_FL_FIX_INQUIRY ),
1006 1006
1007/* Submitted by Oleksandr Chumachenko <ledest@gmail.com> */
1008UNUSUAL_DEV( 0x07cf, 0x1167, 0x0100, 0x0100,
1009 "Casio",
1010 "EX-N1 DigitalCamera",
1011 USB_SC_8070, USB_PR_DEVICE, NULL, 0),
1012
1007/* Submitted by Hartmut Wahl <hwahl@hwahl.de>*/ 1013/* Submitted by Hartmut Wahl <hwahl@hwahl.de>*/
1008UNUSUAL_DEV( 0x0839, 0x000a, 0x0001, 0x0001, 1014UNUSUAL_DEV( 0x0839, 0x000a, 0x0001, 0x0001,
1009 "Samsung", 1015 "Samsung",
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 6d369fe9d30b..6c119944bbb6 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -408,7 +408,7 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
408 struct vfio_pci_device *vdev = device_data; 408 struct vfio_pci_device *vdev = device_data;
409 struct pci_dev *pdev = vdev->pdev; 409 struct pci_dev *pdev = vdev->pdev;
410 unsigned int index; 410 unsigned int index;
411 u64 phys_len, req_len, pgoff, req_start, phys; 411 u64 phys_len, req_len, pgoff, req_start;
412 int ret; 412 int ret;
413 413
414 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); 414 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
@@ -463,10 +463,9 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
463 vma->vm_private_data = vdev; 463 vma->vm_private_data = vdev;
464 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; 464 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
465 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 465 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
466 vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
466 467
467 phys = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff; 468 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
468
469 return remap_pfn_range(vma, vma->vm_start, phys,
470 req_len, vma->vm_page_prot); 469 req_len, vma->vm_page_prot);
471} 470}
472 471
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index d8dedc7d3910..3639371fa697 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -366,6 +366,17 @@ static int vfio_intx_enable(struct vfio_pci_device *vdev)
366 return -ENOMEM; 366 return -ENOMEM;
367 367
368 vdev->num_ctx = 1; 368 vdev->num_ctx = 1;
369
370 /*
371 * If the virtual interrupt is masked, restore it. Devices
372 * supporting DisINTx can be masked at the hardware level
373 * here, non-PCI-2.3 devices will have to wait until the
374 * interrupt is enabled.
375 */
376 vdev->ctx[0].masked = vdev->virq_disabled;
377 if (vdev->pci_2_3)
378 pci_intx(vdev->pdev, !vdev->ctx[0].masked);
379
369 vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX; 380 vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
370 381
371 return 0; 382 return 0;
@@ -400,25 +411,26 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
400 return PTR_ERR(trigger); 411 return PTR_ERR(trigger);
401 } 412 }
402 413
414 vdev->ctx[0].trigger = trigger;
415
403 if (!vdev->pci_2_3) 416 if (!vdev->pci_2_3)
404 irqflags = 0; 417 irqflags = 0;
405 418
406 ret = request_irq(pdev->irq, vfio_intx_handler, 419 ret = request_irq(pdev->irq, vfio_intx_handler,
407 irqflags, vdev->ctx[0].name, vdev); 420 irqflags, vdev->ctx[0].name, vdev);
408 if (ret) { 421 if (ret) {
422 vdev->ctx[0].trigger = NULL;
409 kfree(vdev->ctx[0].name); 423 kfree(vdev->ctx[0].name);
410 eventfd_ctx_put(trigger); 424 eventfd_ctx_put(trigger);
411 return ret; 425 return ret;
412 } 426 }
413 427
414 vdev->ctx[0].trigger = trigger;
415
416 /* 428 /*
417 * INTx disable will stick across the new irq setup, 429 * INTx disable will stick across the new irq setup,
418 * disable_irq won't. 430 * disable_irq won't.
419 */ 431 */
420 spin_lock_irqsave(&vdev->irqlock, flags); 432 spin_lock_irqsave(&vdev->irqlock, flags);
421 if (!vdev->pci_2_3 && (vdev->ctx[0].masked || vdev->virq_disabled)) 433 if (!vdev->pci_2_3 && vdev->ctx[0].masked)
422 disable_irq_nosync(pdev->irq); 434 disable_irq_nosync(pdev->irq);
423 spin_unlock_irqrestore(&vdev->irqlock, flags); 435 spin_unlock_irqrestore(&vdev->irqlock, flags);
424 436
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 072cbbadbc36..7f93f34b7f91 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -379,7 +379,8 @@ static void handle_rx(struct vhost_net *net)
379 .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE 379 .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
380 }; 380 };
381 size_t total_len = 0; 381 size_t total_len = 0;
382 int err, headcount, mergeable; 382 int err, mergeable;
383 s16 headcount;
383 size_t vhost_hlen, sock_hlen; 384 size_t vhost_hlen, sock_hlen;
384 size_t vhost_len, sock_len; 385 size_t vhost_len, sock_len;
385 /* TODO: check that we are running from vhost_worker? */ 386 /* TODO: check that we are running from vhost_worker? */
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index ed8e2e6c8df2..aa31692064dd 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -330,17 +330,6 @@ static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
330 return 0; 330 return 0;
331} 331}
332 332
333static u16 tcm_vhost_set_fabric_sense_len(struct se_cmd *se_cmd,
334 u32 sense_length)
335{
336 return 0;
337}
338
339static u16 tcm_vhost_get_fabric_sense_len(void)
340{
341 return 0;
342}
343
344static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd) 333static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
345{ 334{
346 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; 335 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
@@ -426,10 +415,7 @@ static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
426{ 415{
427 struct tcm_vhost_cmd *tv_cmd; 416 struct tcm_vhost_cmd *tv_cmd;
428 struct tcm_vhost_nexus *tv_nexus; 417 struct tcm_vhost_nexus *tv_nexus;
429 struct se_portal_group *se_tpg = &tv_tpg->se_tpg;
430 struct se_session *se_sess; 418 struct se_session *se_sess;
431 struct se_cmd *se_cmd;
432 int sam_task_attr;
433 419
434 tv_nexus = tv_tpg->tpg_nexus; 420 tv_nexus = tv_tpg->tpg_nexus;
435 if (!tv_nexus) { 421 if (!tv_nexus) {
@@ -445,23 +431,11 @@ static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
445 } 431 }
446 INIT_LIST_HEAD(&tv_cmd->tvc_completion_list); 432 INIT_LIST_HEAD(&tv_cmd->tvc_completion_list);
447 tv_cmd->tvc_tag = v_req->tag; 433 tv_cmd->tvc_tag = v_req->tag;
434 tv_cmd->tvc_task_attr = v_req->task_attr;
435 tv_cmd->tvc_exp_data_len = exp_data_len;
436 tv_cmd->tvc_data_direction = data_direction;
437 tv_cmd->tvc_nexus = tv_nexus;
448 438
449 se_cmd = &tv_cmd->tvc_se_cmd;
450 /*
451 * Locate the SAM Task Attr from virtio_scsi_cmd_req
452 */
453 sam_task_attr = v_req->task_attr;
454 /*
455 * Initialize struct se_cmd descriptor from TCM infrastructure
456 */
457 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, exp_data_len,
458 data_direction, sam_task_attr,
459 &tv_cmd->tvc_sense_buf[0]);
460
461#if 0 /* FIXME: vhost_scsi_allocate_cmd() BIDI operation */
462 if (bidi)
463 se_cmd->se_cmd_flags |= SCF_BIDI;
464#endif
465 return tv_cmd; 439 return tv_cmd;
466} 440}
467 441
@@ -560,37 +534,10 @@ static void tcm_vhost_submission_work(struct work_struct *work)
560{ 534{
561 struct tcm_vhost_cmd *tv_cmd = 535 struct tcm_vhost_cmd *tv_cmd =
562 container_of(work, struct tcm_vhost_cmd, work); 536 container_of(work, struct tcm_vhost_cmd, work);
537 struct tcm_vhost_nexus *tv_nexus;
563 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; 538 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
564 struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL; 539 struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
565 int rc, sg_no_bidi = 0; 540 int rc, sg_no_bidi = 0;
566 /*
567 * Locate the struct se_lun pointer based on v_req->lun, and
568 * attach it to struct se_cmd
569 */
570 rc = transport_lookup_cmd_lun(&tv_cmd->tvc_se_cmd, tv_cmd->tvc_lun);
571 if (rc < 0) {
572 pr_err("Failed to look up lun: %d\n", tv_cmd->tvc_lun);
573 transport_send_check_condition_and_sense(&tv_cmd->tvc_se_cmd,
574 tv_cmd->tvc_se_cmd.scsi_sense_reason, 0);
575 transport_generic_free_cmd(se_cmd, 0);
576 return;
577 }
578
579 rc = target_setup_cmd_from_cdb(se_cmd, tv_cmd->tvc_cdb);
580 if (rc == -ENOMEM) {
581 transport_send_check_condition_and_sense(se_cmd,
582 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
583 transport_generic_free_cmd(se_cmd, 0);
584 return;
585 } else if (rc < 0) {
586 if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
587 tcm_vhost_queue_status(se_cmd);
588 else
589 transport_send_check_condition_and_sense(se_cmd,
590 se_cmd->scsi_sense_reason, 0);
591 transport_generic_free_cmd(se_cmd, 0);
592 return;
593 }
594 541
595 if (tv_cmd->tvc_sgl_count) { 542 if (tv_cmd->tvc_sgl_count) {
596 sg_ptr = tv_cmd->tvc_sgl; 543 sg_ptr = tv_cmd->tvc_sgl;
@@ -608,17 +555,19 @@ static void tcm_vhost_submission_work(struct work_struct *work)
608 } else { 555 } else {
609 sg_ptr = NULL; 556 sg_ptr = NULL;
610 } 557 }
611 558 tv_nexus = tv_cmd->tvc_nexus;
612 rc = transport_generic_map_mem_to_cmd(se_cmd, sg_ptr, 559
613 tv_cmd->tvc_sgl_count, sg_bidi_ptr, 560 rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
614 sg_no_bidi); 561 tv_cmd->tvc_cdb, &tv_cmd->tvc_sense_buf[0],
562 tv_cmd->tvc_lun, tv_cmd->tvc_exp_data_len,
563 tv_cmd->tvc_task_attr, tv_cmd->tvc_data_direction,
564 0, sg_ptr, tv_cmd->tvc_sgl_count,
565 sg_bidi_ptr, sg_no_bidi);
615 if (rc < 0) { 566 if (rc < 0) {
616 transport_send_check_condition_and_sense(se_cmd, 567 transport_send_check_condition_and_sense(se_cmd,
617 se_cmd->scsi_sense_reason, 0); 568 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
618 transport_generic_free_cmd(se_cmd, 0); 569 transport_generic_free_cmd(se_cmd, 0);
619 return;
620 } 570 }
621 transport_handle_cdb_direct(se_cmd);
622} 571}
623 572
624static void vhost_scsi_handle_vq(struct vhost_scsi *vs) 573static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
@@ -1531,8 +1480,6 @@ static struct target_core_fabric_ops tcm_vhost_ops = {
1531 .queue_data_in = tcm_vhost_queue_data_in, 1480 .queue_data_in = tcm_vhost_queue_data_in,
1532 .queue_status = tcm_vhost_queue_status, 1481 .queue_status = tcm_vhost_queue_status,
1533 .queue_tm_rsp = tcm_vhost_queue_tm_rsp, 1482 .queue_tm_rsp = tcm_vhost_queue_tm_rsp,
1534 .get_fabric_sense_len = tcm_vhost_get_fabric_sense_len,
1535 .set_fabric_sense_len = tcm_vhost_set_fabric_sense_len,
1536 /* 1483 /*
1537 * Setup callers for generic logic in target_core_fabric_configfs.c 1484 * Setup callers for generic logic in target_core_fabric_configfs.c
1538 */ 1485 */
diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
index d9e93557d669..7e87c63ecbcd 100644
--- a/drivers/vhost/tcm_vhost.h
+++ b/drivers/vhost/tcm_vhost.h
@@ -5,6 +5,12 @@
5struct tcm_vhost_cmd { 5struct tcm_vhost_cmd {
6 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */ 6 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
7 int tvc_vq_desc; 7 int tvc_vq_desc;
8 /* virtio-scsi initiator task attribute */
9 int tvc_task_attr;
10 /* virtio-scsi initiator data direction */
11 enum dma_data_direction tvc_data_direction;
12 /* Expected data transfer length from virtio-scsi header */
13 u32 tvc_exp_data_len;
8 /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */ 14 /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
9 u64 tvc_tag; 15 u64 tvc_tag;
10 /* The number of scatterlists associated with this cmd */ 16 /* The number of scatterlists associated with this cmd */
@@ -17,6 +23,8 @@ struct tcm_vhost_cmd {
17 struct virtio_scsi_cmd_resp __user *tvc_resp; 23 struct virtio_scsi_cmd_resp __user *tvc_resp;
18 /* Pointer to vhost_scsi for our device */ 24 /* Pointer to vhost_scsi for our device */
19 struct vhost_scsi *tvc_vhost; 25 struct vhost_scsi *tvc_vhost;
26 /* Pointer to vhost nexus memory */
27 struct tcm_vhost_nexus *tvc_nexus;
20 /* The TCM I/O descriptor that is accessed via container_of() */ 28 /* The TCM I/O descriptor that is accessed via container_of() */
21 struct se_cmd tvc_se_cmd; 29 struct se_cmd tvc_se_cmd;
22 /* work item used for cmwq dispatch to tcm_vhost_submission_work() */ 30 /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 20c33c42600a..d08d7998a4aa 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2139,21 +2139,6 @@ config FB_UDL
2139 mplayer -vo fbdev. Supports all USB 2.0 era DisplayLink devices. 2139 mplayer -vo fbdev. Supports all USB 2.0 era DisplayLink devices.
2140 To compile as a module, choose M here: the module name is udlfb. 2140 To compile as a module, choose M here: the module name is udlfb.
2141 2141
2142config FB_PNX4008_DUM
2143 tristate "Display Update Module support on Philips PNX4008 board"
2144 depends on FB && ARCH_PNX4008
2145 ---help---
2146 Say Y here to enable support for PNX4008 Display Update Module (DUM)
2147
2148config FB_PNX4008_DUM_RGB
2149 tristate "RGB Framebuffer support on Philips PNX4008 board"
2150 depends on FB_PNX4008_DUM
2151 select FB_CFB_FILLRECT
2152 select FB_CFB_COPYAREA
2153 select FB_CFB_IMAGEBLIT
2154 ---help---
2155 Say Y here to enable support for PNX4008 RGB Framebuffer
2156
2157config FB_IBM_GXT4500 2142config FB_IBM_GXT4500
2158 tristate "Framebuffer support for IBM GXT4500P adaptor" 2143 tristate "Framebuffer support for IBM GXT4500P adaptor"
2159 depends on FB && PPC 2144 depends on FB && PPC
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 194035986af2..23e948ebfab8 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -127,8 +127,6 @@ obj-$(CONFIG_FB_S3C) += s3c-fb.o
127obj-$(CONFIG_FB_S3C2410) += s3c2410fb.o 127obj-$(CONFIG_FB_S3C2410) += s3c2410fb.o
128obj-$(CONFIG_FB_FSL_DIU) += fsl-diu-fb.o 128obj-$(CONFIG_FB_FSL_DIU) += fsl-diu-fb.o
129obj-$(CONFIG_FB_COBALT) += cobalt_lcdfb.o 129obj-$(CONFIG_FB_COBALT) += cobalt_lcdfb.o
130obj-$(CONFIG_FB_PNX4008_DUM) += pnx4008/
131obj-$(CONFIG_FB_PNX4008_DUM_RGB) += pnx4008/
132obj-$(CONFIG_FB_IBM_GXT4500) += gxt4500.o 130obj-$(CONFIG_FB_IBM_GXT4500) += gxt4500.o
133obj-$(CONFIG_FB_PS3) += ps3fb.o 131obj-$(CONFIG_FB_PS3) += ps3fb.o
134obj-$(CONFIG_FB_SM501) += sm501fb.o 132obj-$(CONFIG_FB_SM501) += sm501fb.o
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index 887df9d81422..7fa1bf823729 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -949,7 +949,6 @@ static int round_down_bpp = 1; /* for mode probing */
949 949
950 950
951static int amifb_ilbm = 0; /* interleaved or normal bitplanes */ 951static int amifb_ilbm = 0; /* interleaved or normal bitplanes */
952static int amifb_inverse = 0;
953 952
954static u32 amifb_hfmin __initdata; /* monitor hfreq lower limit (Hz) */ 953static u32 amifb_hfmin __initdata; /* monitor hfreq lower limit (Hz) */
955static u32 amifb_hfmax __initdata; /* monitor hfreq upper limit (Hz) */ 954static u32 amifb_hfmax __initdata; /* monitor hfreq upper limit (Hz) */
@@ -2355,7 +2354,6 @@ static int __init amifb_setup(char *options)
2355 if (!*this_opt) 2354 if (!*this_opt)
2356 continue; 2355 continue;
2357 if (!strcmp(this_opt, "inverse")) { 2356 if (!strcmp(this_opt, "inverse")) {
2358 amifb_inverse = 1;
2359 fb_invert_cmaps(); 2357 fb_invert_cmaps();
2360 } else if (!strcmp(this_opt, "ilbm")) 2358 } else if (!strcmp(this_opt, "ilbm"))
2361 amifb_ilbm = 1; 2359 amifb_ilbm = 1;
diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
index a1d58e9d3073..4659d5da6ff8 100644
--- a/drivers/video/arcfb.c
+++ b/drivers/video/arcfb.c
@@ -552,6 +552,7 @@ static int __devinit arcfb_probe(struct platform_device *dev)
552 "arcfb", info)) { 552 "arcfb", info)) {
553 printk(KERN_INFO 553 printk(KERN_INFO
554 "arcfb: Failed req IRQ %d\n", par->irq); 554 "arcfb: Failed req IRQ %d\n", par->irq);
555 retval = -EBUSY;
555 goto err1; 556 goto err1;
556 } 557 }
557 } 558 }
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 15055395cd95..94cac9f9919f 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -931,8 +931,10 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
931 } 931 }
932 932
933 info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len); 933 info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len);
934 if (!info->screen_base) 934 if (!info->screen_base) {
935 ret = -ENOMEM;
935 goto release_intmem; 936 goto release_intmem;
937 }
936 938
937 /* 939 /*
938 * Don't clear the framebuffer -- someone may have set 940 * Don't clear the framebuffer -- someone may have set
@@ -960,6 +962,7 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
960 sinfo->mmio = ioremap(info->fix.mmio_start, info->fix.mmio_len); 962 sinfo->mmio = ioremap(info->fix.mmio_start, info->fix.mmio_len);
961 if (!sinfo->mmio) { 963 if (!sinfo->mmio) {
962 dev_err(dev, "cannot map LCDC registers\n"); 964 dev_err(dev, "cannot map LCDC registers\n");
965 ret = -ENOMEM;
963 goto release_mem; 966 goto release_mem;
964 } 967 }
965 968
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index c101697a4ba7..765a945f8ea1 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -60,7 +60,8 @@ config LCD_LTV350QV
60 The LTV350QV panel is present on all ATSTK1000 boards. 60 The LTV350QV panel is present on all ATSTK1000 boards.
61 61
62config LCD_ILI9320 62config LCD_ILI9320
63 tristate 63 tristate "ILI Technology ILI9320 controller support"
64 depends on SPI
64 help 65 help
65 If you have a panel based on the ILI9320 controller chip 66 If you have a panel based on the ILI9320 controller chip
66 then say y to include a power driver for it. 67 then say y to include a power driver for it.
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
index c6915c6c3cd1..585949b57055 100644
--- a/drivers/video/backlight/lm3639_bl.c
+++ b/drivers/video/backlight/lm3639_bl.c
@@ -206,11 +206,11 @@ static ssize_t lm3639_bled_mode_store(struct device *dev,
206 206
207out: 207out:
208 dev_err(pchip->dev, "%s:i2c access fail to register\n", __func__); 208 dev_err(pchip->dev, "%s:i2c access fail to register\n", __func__);
209 return size; 209 return ret;
210 210
211out_input: 211out_input:
212 dev_err(pchip->dev, "%s:input conversion fail\n", __func__); 212 dev_err(pchip->dev, "%s:input conversion fail\n", __func__);
213 return size; 213 return ret;
214 214
215} 215}
216 216
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 995f0164c9b0..069983ca49ff 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -213,7 +213,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
213 pb->exit = data->exit; 213 pb->exit = data->exit;
214 pb->dev = &pdev->dev; 214 pb->dev = &pdev->dev;
215 215
216 pb->pwm = pwm_get(&pdev->dev, NULL); 216 pb->pwm = devm_pwm_get(&pdev->dev, NULL);
217 if (IS_ERR(pb->pwm)) { 217 if (IS_ERR(pb->pwm)) {
218 dev_err(&pdev->dev, "unable to request PWM, trying legacy API\n"); 218 dev_err(&pdev->dev, "unable to request PWM, trying legacy API\n");
219 219
@@ -246,7 +246,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
246 if (IS_ERR(bl)) { 246 if (IS_ERR(bl)) {
247 dev_err(&pdev->dev, "failed to register backlight\n"); 247 dev_err(&pdev->dev, "failed to register backlight\n");
248 ret = PTR_ERR(bl); 248 ret = PTR_ERR(bl);
249 goto err_bl; 249 goto err_alloc;
250 } 250 }
251 251
252 bl->props.brightness = data->dft_brightness; 252 bl->props.brightness = data->dft_brightness;
@@ -255,8 +255,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
255 platform_set_drvdata(pdev, bl); 255 platform_set_drvdata(pdev, bl);
256 return 0; 256 return 0;
257 257
258err_bl:
259 pwm_put(pb->pwm);
260err_alloc: 258err_alloc:
261 if (data->exit) 259 if (data->exit)
262 data->exit(&pdev->dev); 260 data->exit(&pdev->dev);
@@ -271,7 +269,6 @@ static int pwm_backlight_remove(struct platform_device *pdev)
271 backlight_device_unregister(bl); 269 backlight_device_unregister(bl);
272 pwm_config(pb->pwm, 0, pb->period); 270 pwm_config(pb->pwm, 0, pb->period);
273 pwm_disable(pb->pwm); 271 pwm_disable(pb->pwm);
274 pwm_put(pb->pwm);
275 if (pb->exit) 272 if (pb->exit)
276 pb->exit(&pdev->dev); 273 pb->exit(&pdev->dev);
277 return 0; 274 return 0;
diff --git a/drivers/video/bf537-lq035.c b/drivers/video/bf537-lq035.c
index befbc80d11fc..7347aa1e5e4a 100644
--- a/drivers/video/bf537-lq035.c
+++ b/drivers/video/bf537-lq035.c
@@ -760,18 +760,20 @@ static int __devinit bfin_lq035_probe(struct platform_device *pdev)
760 bfin_lq035_fb.flags = FBINFO_DEFAULT; 760 bfin_lq035_fb.flags = FBINFO_DEFAULT;
761 761
762 762
763 bfin_lq035_fb.pseudo_palette = kzalloc(sizeof(u32) * 16, GFP_KERNEL); 763 bfin_lq035_fb.pseudo_palette = devm_kzalloc(&pdev->dev,
764 sizeof(u32) * 16,
765 GFP_KERNEL);
764 if (bfin_lq035_fb.pseudo_palette == NULL) { 766 if (bfin_lq035_fb.pseudo_palette == NULL) {
765 pr_err("failed to allocate pseudo_palette\n"); 767 pr_err("failed to allocate pseudo_palette\n");
766 ret = -ENOMEM; 768 ret = -ENOMEM;
767 goto out_palette; 769 goto out_table;
768 } 770 }
769 771
770 if (fb_alloc_cmap(&bfin_lq035_fb.cmap, NBR_PALETTE, 0) < 0) { 772 if (fb_alloc_cmap(&bfin_lq035_fb.cmap, NBR_PALETTE, 0) < 0) {
771 pr_err("failed to allocate colormap (%d entries)\n", 773 pr_err("failed to allocate colormap (%d entries)\n",
772 NBR_PALETTE); 774 NBR_PALETTE);
773 ret = -EFAULT; 775 ret = -EFAULT;
774 goto out_cmap; 776 goto out_table;
775 } 777 }
776 778
777 if (register_framebuffer(&bfin_lq035_fb) < 0) { 779 if (register_framebuffer(&bfin_lq035_fb) < 0) {
@@ -804,9 +806,6 @@ out_lcd:
804 unregister_framebuffer(&bfin_lq035_fb); 806 unregister_framebuffer(&bfin_lq035_fb);
805out_reg: 807out_reg:
806 fb_dealloc_cmap(&bfin_lq035_fb.cmap); 808 fb_dealloc_cmap(&bfin_lq035_fb.cmap);
807out_cmap:
808 kfree(bfin_lq035_fb.pseudo_palette);
809out_palette:
810out_table: 809out_table:
811 dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0); 810 dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
812 fb_buffer = NULL; 811 fb_buffer = NULL;
@@ -834,7 +833,6 @@ static int __devexit bfin_lq035_remove(struct platform_device *pdev)
834 free_dma(CH_PPI); 833 free_dma(CH_PPI);
835 834
836 835
837 kfree(bfin_lq035_fb.pseudo_palette);
838 fb_dealloc_cmap(&bfin_lq035_fb.cmap); 836 fb_dealloc_cmap(&bfin_lq035_fb.cmap);
839 837
840 838
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index dc2f0047769b..ff5663f5c64f 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -525,6 +525,7 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
525 info = fbinfo->par; 525 info = fbinfo->par;
526 info->fb = fbinfo; 526 info->fb = fbinfo;
527 info->dev = &pdev->dev; 527 info->dev = &pdev->dev;
528 spin_lock_init(&info->lock);
528 529
529 platform_set_drvdata(pdev, fbinfo); 530 platform_set_drvdata(pdev, fbinfo);
530 531
@@ -601,7 +602,8 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
601 602
602 fbinfo->fbops = &bfin_bf54x_fb_ops; 603 fbinfo->fbops = &bfin_bf54x_fb_ops;
603 604
604 fbinfo->pseudo_palette = kzalloc(sizeof(u32) * 16, GFP_KERNEL); 605 fbinfo->pseudo_palette = devm_kzalloc(&pdev->dev, sizeof(u32) * 16,
606 GFP_KERNEL);
605 if (!fbinfo->pseudo_palette) { 607 if (!fbinfo->pseudo_palette) {
606 printk(KERN_ERR DRIVER_NAME 608 printk(KERN_ERR DRIVER_NAME
607 "Fail to allocate pseudo_palette\n"); 609 "Fail to allocate pseudo_palette\n");
@@ -616,7 +618,7 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
616 "Fail to allocate colormap (%d entries)\n", 618 "Fail to allocate colormap (%d entries)\n",
617 BFIN_LCD_NBR_PALETTE_ENTRIES); 619 BFIN_LCD_NBR_PALETTE_ENTRIES);
618 ret = -EFAULT; 620 ret = -EFAULT;
619 goto out5; 621 goto out4;
620 } 622 }
621 623
622 if (request_ports(info)) { 624 if (request_ports(info)) {
@@ -671,8 +673,6 @@ out7:
671 free_ports(info); 673 free_ports(info);
672out6: 674out6:
673 fb_dealloc_cmap(&fbinfo->cmap); 675 fb_dealloc_cmap(&fbinfo->cmap);
674out5:
675 kfree(fbinfo->pseudo_palette);
676out4: 676out4:
677 dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer, 677 dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
678 info->dma_handle); 678 info->dma_handle);
@@ -699,7 +699,6 @@ static int __devexit bfin_bf54x_remove(struct platform_device *pdev)
699 dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer, 699 dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
700 info->dma_handle); 700 info->dma_handle);
701 701
702 kfree(fbinfo->pseudo_palette);
703 fb_dealloc_cmap(&fbinfo->cmap); 702 fb_dealloc_cmap(&fbinfo->cmap);
704 703
705#ifndef NO_BL_SUPPORT 704#ifndef NO_BL_SUPPORT
diff --git a/drivers/video/bfin-lq035q1-fb.c b/drivers/video/bfin-lq035q1-fb.c
index 353c02fe8a95..6fbc75c2f0a1 100644
--- a/drivers/video/bfin-lq035q1-fb.c
+++ b/drivers/video/bfin-lq035q1-fb.c
@@ -577,6 +577,7 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
577 info = fbinfo->par; 577 info = fbinfo->par;
578 info->fb = fbinfo; 578 info->fb = fbinfo;
579 info->dev = &pdev->dev; 579 info->dev = &pdev->dev;
580 spin_lock_init(&info->lock);
580 581
581 info->disp_info = pdev->dev.platform_data; 582 info->disp_info = pdev->dev.platform_data;
582 583
@@ -853,17 +854,7 @@ static struct platform_driver bfin_lq035q1_driver = {
853 }, 854 },
854}; 855};
855 856
856static int __init bfin_lq035q1_driver_init(void) 857module_platform_driver(bfin_lq035q1_driver);
857{
858 return platform_driver_register(&bfin_lq035q1_driver);
859}
860module_init(bfin_lq035q1_driver_init);
861
862static void __exit bfin_lq035q1_driver_cleanup(void)
863{
864 platform_driver_unregister(&bfin_lq035q1_driver);
865}
866module_exit(bfin_lq035q1_driver_cleanup);
867 858
868MODULE_DESCRIPTION("Blackfin TFT LCD Driver"); 859MODULE_DESCRIPTION("Blackfin TFT LCD Driver");
869MODULE_LICENSE("GPL"); 860MODULE_LICENSE("GPL");
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index 7a0c05f3537e..ae0fb24b8b43 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -447,6 +447,7 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
447 info = fbinfo->par; 447 info = fbinfo->par;
448 info->fb = fbinfo; 448 info->fb = fbinfo;
449 info->dev = &pdev->dev; 449 info->dev = &pdev->dev;
450 spin_lock_init(&info->lock);
450 451
451 platform_set_drvdata(pdev, fbinfo); 452 platform_set_drvdata(pdev, fbinfo);
452 453
diff --git a/drivers/video/bw2.c b/drivers/video/bw2.c
index 7ba74cd4be61..6bea9a936798 100644
--- a/drivers/video/bw2.c
+++ b/drivers/video/bw2.c
@@ -319,8 +319,10 @@ static int __devinit bw2_probe(struct platform_device *op)
319 319
320 info->screen_base = of_ioremap(&op->resource[0], 0, 320 info->screen_base = of_ioremap(&op->resource[0], 0,
321 info->fix.smem_len, "bw2 ram"); 321 info->fix.smem_len, "bw2 ram");
322 if (!info->screen_base) 322 if (!info->screen_base) {
323 err = -ENOMEM;
323 goto out_unmap_regs; 324 goto out_unmap_regs;
325 }
324 326
325 bw2_blank(FB_BLANK_UNBLANK, info); 327 bw2_blank(FB_BLANK_UNBLANK, info);
326 328
diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c
index f927a7b1a8d4..c5e7612ff876 100644
--- a/drivers/video/cg3.c
+++ b/drivers/video/cg3.c
@@ -398,7 +398,8 @@ static int __devinit cg3_probe(struct platform_device *op)
398 goto out_unmap_screen; 398 goto out_unmap_screen;
399 } 399 }
400 400
401 if (fb_alloc_cmap(&info->cmap, 256, 0)) 401 err = fb_alloc_cmap(&info->cmap, 256, 0);
402 if (err)
402 goto out_unmap_screen; 403 goto out_unmap_screen;
403 404
404 fb_set_cmap(&info->cmap, info); 405 fb_set_cmap(&info->cmap, info);
diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/cobalt_lcdfb.c
index eae46f6457e2..01a4ee7cc6b1 100644
--- a/drivers/video/cobalt_lcdfb.c
+++ b/drivers/video/cobalt_lcdfb.c
@@ -348,7 +348,8 @@ static int __devinit cobalt_lcdfb_probe(struct platform_device *dev)
348 } 348 }
349 349
350 info->screen_size = resource_size(res); 350 info->screen_size = resource_size(res);
351 info->screen_base = ioremap(res->start, info->screen_size); 351 info->screen_base = devm_ioremap(&dev->dev, res->start,
352 info->screen_size);
352 info->fbops = &cobalt_lcd_fbops; 353 info->fbops = &cobalt_lcd_fbops;
353 info->fix = cobalt_lcdfb_fix; 354 info->fix = cobalt_lcdfb_fix;
354 info->fix.smem_start = res->start; 355 info->fix.smem_start = res->start;
@@ -359,7 +360,6 @@ static int __devinit cobalt_lcdfb_probe(struct platform_device *dev)
359 360
360 retval = register_framebuffer(info); 361 retval = register_framebuffer(info);
361 if (retval < 0) { 362 if (retval < 0) {
362 iounmap(info->screen_base);
363 framebuffer_release(info); 363 framebuffer_release(info);
364 return retval; 364 return retval;
365 } 365 }
@@ -380,7 +380,6 @@ static int __devexit cobalt_lcdfb_remove(struct platform_device *dev)
380 380
381 info = platform_get_drvdata(dev); 381 info = platform_get_drvdata(dev);
382 if (info) { 382 if (info) {
383 iounmap(info->screen_base);
384 unregister_framebuffer(info); 383 unregister_framebuffer(info);
385 framebuffer_release(info); 384 framebuffer_release(info);
386 } 385 }
diff --git a/drivers/video/console/font_mini_4x6.c b/drivers/video/console/font_mini_4x6.c
index fa6e698e63c4..838caa1cfef7 100644
--- a/drivers/video/console/font_mini_4x6.c
+++ b/drivers/video/console/font_mini_4x6.c
@@ -1092,7 +1092,7 @@ static const unsigned char fontdata_mini_4x6[FONTDATAMAX] = {
1092 /*{*/ /* Char 124: '|' */ 1092 /*{*/ /* Char 124: '|' */
1093 0x44, /*= [ * ] */ 1093 0x44, /*= [ * ] */
1094 0x44, /*= [ * ] */ 1094 0x44, /*= [ * ] */
1095 0x00, /*= [ ] */ 1095 0x44, /*= [ * ] */
1096 0x44, /*= [ * ] */ 1096 0x44, /*= [ * ] */
1097 0x44, /*= [ * ] */ 1097 0x44, /*= [ * ] */
1098 0x00, /*= [ ] */ 1098 0x00, /*= [ ] */
diff --git a/drivers/video/console/font_sun8x16.c b/drivers/video/console/font_sun8x16.c
index 5abf290c6eb7..268151325b83 100644
--- a/drivers/video/console/font_sun8x16.c
+++ b/drivers/video/console/font_sun8x16.c
@@ -127,7 +127,7 @@ static const unsigned char fontdata_sun8x16[FONTDATAMAX] = {
127/*y*/ 0x00,0x00,0x00,0x00,0x00,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7e,0x06,0x0c,0xf8,0x00, 127/*y*/ 0x00,0x00,0x00,0x00,0x00,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7e,0x06,0x0c,0xf8,0x00,
128/*z*/ 0x00,0x00,0x00,0x00,0x00,0xfe,0xcc,0x18,0x30,0x60,0xc6,0xfe,0x00,0x00,0x00,0x00, 128/*z*/ 0x00,0x00,0x00,0x00,0x00,0xfe,0xcc,0x18,0x30,0x60,0xc6,0xfe,0x00,0x00,0x00,0x00,
129/*{*/ 0x00,0x00,0x0e,0x18,0x18,0x18,0x70,0x18,0x18,0x18,0x18,0x0e,0x00,0x00,0x00,0x00, 129/*{*/ 0x00,0x00,0x0e,0x18,0x18,0x18,0x70,0x18,0x18,0x18,0x18,0x0e,0x00,0x00,0x00,0x00,
130/*|*/ 0x00,0x00,0x18,0x18,0x18,0x18,0x00,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, 130/*|*/ 0x00,0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00,
131/*}*/ 0x00,0x00,0x70,0x18,0x18,0x18,0x0e,0x18,0x18,0x18,0x18,0x70,0x00,0x00,0x00,0x00, 131/*}*/ 0x00,0x00,0x70,0x18,0x18,0x18,0x0e,0x18,0x18,0x18,0x18,0x70,0x00,0x00,0x00,0x00,
132/*~*/ 0x00,0x00,0x76,0xdc,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 132/*~*/ 0x00,0x00,0x76,0xdc,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
133/* */ 0x00,0x00,0x00,0x00,0x10,0x38,0x6c,0xc6,0xc6,0xc6,0xfe,0x00,0x00,0x00,0x00,0x00, 133/* */ 0x00,0x00,0x00,0x00,0x10,0x38,0x6c,0xc6,0xc6,0xc6,0xfe,0x00,0x00,0x00,0x00,0x00,
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c
index c1527f5b47ee..e40125cb313e 100644
--- a/drivers/video/cyber2000fb.c
+++ b/drivers/video/cyber2000fb.c
@@ -1804,8 +1804,10 @@ cyberpro_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1804 1804
1805 cfb->irq = dev->irq; 1805 cfb->irq = dev->irq;
1806 cfb->region = pci_ioremap_bar(dev, 0); 1806 cfb->region = pci_ioremap_bar(dev, 0);
1807 if (!cfb->region) 1807 if (!cfb->region) {
1808 err = -ENOMEM;
1808 goto failed_ioremap; 1809 goto failed_ioremap;
1810 }
1809 1811
1810 cfb->regs = cfb->region + MMIO_OFFSET; 1812 cfb->regs = cfb->region + MMIO_OFFSET;
1811 cfb->fb.device = &dev->dev; 1813 cfb->fb.device = &dev->dev;
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 113d43a16f54..80665f66ac1a 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -26,7 +26,9 @@
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/uaccess.h> 28#include <linux/uaccess.h>
29#include <linux/pm_runtime.h>
29#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31#include <linux/wait.h>
30#include <linux/clk.h> 32#include <linux/clk.h>
31#include <linux/cpufreq.h> 33#include <linux/cpufreq.h>
32#include <linux/console.h> 34#include <linux/console.h>
@@ -48,6 +50,7 @@
48#define LCD_PL_LOAD_DONE BIT(6) 50#define LCD_PL_LOAD_DONE BIT(6)
49#define LCD_FIFO_UNDERFLOW BIT(5) 51#define LCD_FIFO_UNDERFLOW BIT(5)
50#define LCD_SYNC_LOST BIT(2) 52#define LCD_SYNC_LOST BIT(2)
53#define LCD_FRAME_DONE BIT(0)
51 54
52/* LCD DMA Control Register */ 55/* LCD DMA Control Register */
53#define LCD_DMA_BURST_SIZE(x) ((x) << 4) 56#define LCD_DMA_BURST_SIZE(x) ((x) << 4)
@@ -86,6 +89,8 @@
86#define LCD_V2_LIDD_CLK_EN BIT(1) 89#define LCD_V2_LIDD_CLK_EN BIT(1)
87#define LCD_V2_CORE_CLK_EN BIT(0) 90#define LCD_V2_CORE_CLK_EN BIT(0)
88#define LCD_V2_LPP_B10 26 91#define LCD_V2_LPP_B10 26
92#define LCD_V2_TFT_24BPP_MODE BIT(25)
93#define LCD_V2_TFT_24BPP_UNPACK BIT(26)
89 94
90/* LCD Raster Timing 2 Register */ 95/* LCD Raster Timing 2 Register */
91#define LCD_AC_BIAS_TRANSITIONS_PER_INT(x) ((x) << 16) 96#define LCD_AC_BIAS_TRANSITIONS_PER_INT(x) ((x) << 16)
@@ -135,6 +140,8 @@ static void __iomem *da8xx_fb_reg_base;
135static struct resource *lcdc_regs; 140static struct resource *lcdc_regs;
136static unsigned int lcd_revision; 141static unsigned int lcd_revision;
137static irq_handler_t lcdc_irq_handler; 142static irq_handler_t lcdc_irq_handler;
143static wait_queue_head_t frame_done_wq;
144static int frame_done_flag;
138 145
139static inline unsigned int lcdc_read(unsigned int addr) 146static inline unsigned int lcdc_read(unsigned int addr)
140{ 147{
@@ -156,7 +163,6 @@ struct da8xx_fb_par {
156 unsigned int dma_end; 163 unsigned int dma_end;
157 struct clk *lcdc_clk; 164 struct clk *lcdc_clk;
158 int irq; 165 int irq;
159 unsigned short pseudo_palette[16];
160 unsigned int palette_sz; 166 unsigned int palette_sz;
161 unsigned int pxl_clk; 167 unsigned int pxl_clk;
162 int blank; 168 int blank;
@@ -175,6 +181,7 @@ struct da8xx_fb_par {
175 unsigned int lcd_fck_rate; 181 unsigned int lcd_fck_rate;
176#endif 182#endif
177 void (*panel_power_ctrl)(int); 183 void (*panel_power_ctrl)(int);
184 u32 pseudo_palette[16];
178}; 185};
179 186
180/* Variable Screen Information */ 187/* Variable Screen Information */
@@ -288,13 +295,26 @@ static inline void lcd_enable_raster(void)
288} 295}
289 296
290/* Disable the Raster Engine of the LCD Controller */ 297/* Disable the Raster Engine of the LCD Controller */
291static inline void lcd_disable_raster(void) 298static inline void lcd_disable_raster(bool wait_for_frame_done)
292{ 299{
293 u32 reg; 300 u32 reg;
301 int ret;
294 302
295 reg = lcdc_read(LCD_RASTER_CTRL_REG); 303 reg = lcdc_read(LCD_RASTER_CTRL_REG);
296 if (reg & LCD_RASTER_ENABLE) 304 if (reg & LCD_RASTER_ENABLE)
297 lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); 305 lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
306 else
307 /* return if already disabled */
308 return;
309
310 if ((wait_for_frame_done == true) && (lcd_revision == LCD_VERSION_2)) {
311 frame_done_flag = 0;
312 ret = wait_event_interruptible_timeout(frame_done_wq,
313 frame_done_flag != 0,
314 msecs_to_jiffies(50));
315 if (ret == 0)
316 pr_err("LCD Controller timed out\n");
317 }
298} 318}
299 319
300static void lcd_blit(int load_mode, struct da8xx_fb_par *par) 320static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
@@ -321,7 +341,8 @@ static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
321 } else { 341 } else {
322 reg_int = lcdc_read(LCD_INT_ENABLE_SET_REG) | 342 reg_int = lcdc_read(LCD_INT_ENABLE_SET_REG) |
323 LCD_V2_END_OF_FRAME0_INT_ENA | 343 LCD_V2_END_OF_FRAME0_INT_ENA |
324 LCD_V2_END_OF_FRAME1_INT_ENA; 344 LCD_V2_END_OF_FRAME1_INT_ENA |
345 LCD_FRAME_DONE;
325 lcdc_write(reg_int, LCD_INT_ENABLE_SET_REG); 346 lcdc_write(reg_int, LCD_INT_ENABLE_SET_REG);
326 } 347 }
327 reg_dma |= LCD_DUAL_FRAME_BUFFER_ENABLE; 348 reg_dma |= LCD_DUAL_FRAME_BUFFER_ENABLE;
@@ -499,6 +520,9 @@ static int lcd_cfg_frame_buffer(struct da8xx_fb_par *par, u32 width, u32 height,
499{ 520{
500 u32 reg; 521 u32 reg;
501 522
523 if (bpp > 16 && lcd_revision == LCD_VERSION_1)
524 return -EINVAL;
525
502 /* Set the Panel Width */ 526 /* Set the Panel Width */
503 /* Pixels per line = (PPL + 1)*16 */ 527 /* Pixels per line = (PPL + 1)*16 */
504 if (lcd_revision == LCD_VERSION_1) { 528 if (lcd_revision == LCD_VERSION_1) {
@@ -542,14 +566,19 @@ static int lcd_cfg_frame_buffer(struct da8xx_fb_par *par, u32 width, u32 height,
542 reg = lcdc_read(LCD_RASTER_CTRL_REG) & ~(1 << 8); 566 reg = lcdc_read(LCD_RASTER_CTRL_REG) & ~(1 << 8);
543 if (raster_order) 567 if (raster_order)
544 reg |= LCD_RASTER_ORDER; 568 reg |= LCD_RASTER_ORDER;
545 lcdc_write(reg, LCD_RASTER_CTRL_REG); 569
570 par->palette_sz = 16 * 2;
546 571
547 switch (bpp) { 572 switch (bpp) {
548 case 1: 573 case 1:
549 case 2: 574 case 2:
550 case 4: 575 case 4:
551 case 16: 576 case 16:
552 par->palette_sz = 16 * 2; 577 break;
578 case 24:
579 reg |= LCD_V2_TFT_24BPP_MODE;
580 case 32:
581 reg |= LCD_V2_TFT_24BPP_UNPACK;
553 break; 582 break;
554 583
555 case 8: 584 case 8:
@@ -560,9 +589,12 @@ static int lcd_cfg_frame_buffer(struct da8xx_fb_par *par, u32 width, u32 height,
560 return -EINVAL; 589 return -EINVAL;
561 } 590 }
562 591
592 lcdc_write(reg, LCD_RASTER_CTRL_REG);
593
563 return 0; 594 return 0;
564} 595}
565 596
597#define CNVT_TOHW(val, width) ((((val) << (width)) + 0x7FFF - (val)) >> 16)
566static int fb_setcolreg(unsigned regno, unsigned red, unsigned green, 598static int fb_setcolreg(unsigned regno, unsigned red, unsigned green,
567 unsigned blue, unsigned transp, 599 unsigned blue, unsigned transp,
568 struct fb_info *info) 600 struct fb_info *info)
@@ -578,13 +610,38 @@ static int fb_setcolreg(unsigned regno, unsigned red, unsigned green,
578 if (info->fix.visual == FB_VISUAL_DIRECTCOLOR) 610 if (info->fix.visual == FB_VISUAL_DIRECTCOLOR)
579 return 1; 611 return 1;
580 612
581 if (info->var.bits_per_pixel == 4) { 613 if (info->var.bits_per_pixel > 16 && lcd_revision == LCD_VERSION_1)
582 if (regno > 15) 614 return -EINVAL;
583 return 1;
584 615
585 if (info->var.grayscale) { 616 switch (info->fix.visual) {
586 pal = regno; 617 case FB_VISUAL_TRUECOLOR:
587 } else { 618 red = CNVT_TOHW(red, info->var.red.length);
619 green = CNVT_TOHW(green, info->var.green.length);
620 blue = CNVT_TOHW(blue, info->var.blue.length);
621 break;
622 case FB_VISUAL_PSEUDOCOLOR:
623 switch (info->var.bits_per_pixel) {
624 case 4:
625 if (regno > 15)
626 return -EINVAL;
627
628 if (info->var.grayscale) {
629 pal = regno;
630 } else {
631 red >>= 4;
632 green >>= 8;
633 blue >>= 12;
634
635 pal = red & 0x0f00;
636 pal |= green & 0x00f0;
637 pal |= blue & 0x000f;
638 }
639 if (regno == 0)
640 pal |= 0x2000;
641 palette[regno] = pal;
642 break;
643
644 case 8:
588 red >>= 4; 645 red >>= 4;
589 green >>= 8; 646 green >>= 8;
590 blue >>= 12; 647 blue >>= 12;
@@ -592,36 +649,36 @@ static int fb_setcolreg(unsigned regno, unsigned red, unsigned green,
592 pal = (red & 0x0f00); 649 pal = (red & 0x0f00);
593 pal |= (green & 0x00f0); 650 pal |= (green & 0x00f0);
594 pal |= (blue & 0x000f); 651 pal |= (blue & 0x000f);
595 }
596 if (regno == 0)
597 pal |= 0x2000;
598 palette[regno] = pal;
599
600 } else if (info->var.bits_per_pixel == 8) {
601 red >>= 4;
602 green >>= 8;
603 blue >>= 12;
604
605 pal = (red & 0x0f00);
606 pal |= (green & 0x00f0);
607 pal |= (blue & 0x000f);
608 652
609 if (palette[regno] != pal) { 653 if (palette[regno] != pal) {
610 update_hw = 1; 654 update_hw = 1;
611 palette[regno] = pal; 655 palette[regno] = pal;
656 }
657 break;
612 } 658 }
613 } else if ((info->var.bits_per_pixel == 16) && regno < 16) { 659 break;
614 red >>= (16 - info->var.red.length); 660 }
615 red <<= info->var.red.offset;
616 661
617 green >>= (16 - info->var.green.length); 662 /* Truecolor has hardware independent palette */
618 green <<= info->var.green.offset; 663 if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
664 u32 v;
619 665
620 blue >>= (16 - info->var.blue.length); 666 if (regno > 15)
621 blue <<= info->var.blue.offset; 667 return -EINVAL;
622 668
623 par->pseudo_palette[regno] = red | green | blue; 669 v = (red << info->var.red.offset) |
670 (green << info->var.green.offset) |
671 (blue << info->var.blue.offset);
624 672
673 switch (info->var.bits_per_pixel) {
674 case 16:
675 ((u16 *) (info->pseudo_palette))[regno] = v;
676 break;
677 case 24:
678 case 32:
679 ((u32 *) (info->pseudo_palette))[regno] = v;
680 break;
681 }
625 if (palette[0] != 0x4000) { 682 if (palette[0] != 0x4000) {
626 update_hw = 1; 683 update_hw = 1;
627 palette[0] = 0x4000; 684 palette[0] = 0x4000;
@@ -634,11 +691,12 @@ static int fb_setcolreg(unsigned regno, unsigned red, unsigned green,
634 691
635 return 0; 692 return 0;
636} 693}
694#undef CNVT_TOHW
637 695
638static void lcd_reset(struct da8xx_fb_par *par) 696static void lcd_reset(struct da8xx_fb_par *par)
639{ 697{
640 /* Disable the Raster if previously Enabled */ 698 /* Disable the Raster if previously Enabled */
641 lcd_disable_raster(); 699 lcd_disable_raster(false);
642 700
643 /* DMA has to be disabled */ 701 /* DMA has to be disabled */
644 lcdc_write(0, LCD_DMA_CTRL_REG); 702 lcdc_write(0, LCD_DMA_CTRL_REG);
@@ -734,7 +792,7 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg)
734 u32 stat = lcdc_read(LCD_MASKED_STAT_REG); 792 u32 stat = lcdc_read(LCD_MASKED_STAT_REG);
735 793
736 if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) { 794 if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) {
737 lcd_disable_raster(); 795 lcd_disable_raster(false);
738 lcdc_write(stat, LCD_MASKED_STAT_REG); 796 lcdc_write(stat, LCD_MASKED_STAT_REG);
739 lcd_enable_raster(); 797 lcd_enable_raster();
740 } else if (stat & LCD_PL_LOAD_DONE) { 798 } else if (stat & LCD_PL_LOAD_DONE) {
@@ -744,7 +802,7 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg)
744 * interrupt via the following write to the status register. If 802 * interrupt via the following write to the status register. If
745 * this is done after then one gets multiple PL done interrupts. 803 * this is done after then one gets multiple PL done interrupts.
746 */ 804 */
747 lcd_disable_raster(); 805 lcd_disable_raster(false);
748 806
749 lcdc_write(stat, LCD_MASKED_STAT_REG); 807 lcdc_write(stat, LCD_MASKED_STAT_REG);
750 808
@@ -775,6 +833,14 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg)
775 par->vsync_flag = 1; 833 par->vsync_flag = 1;
776 wake_up_interruptible(&par->vsync_wait); 834 wake_up_interruptible(&par->vsync_wait);
777 } 835 }
836
837 /* Set only when controller is disabled and at the end of
838 * active frame
839 */
840 if (stat & BIT(0)) {
841 frame_done_flag = 1;
842 wake_up_interruptible(&frame_done_wq);
843 }
778 } 844 }
779 845
780 lcdc_write(0, LCD_END_OF_INT_IND_REG); 846 lcdc_write(0, LCD_END_OF_INT_IND_REG);
@@ -789,7 +855,7 @@ static irqreturn_t lcdc_irq_handler_rev01(int irq, void *arg)
789 u32 reg_ras; 855 u32 reg_ras;
790 856
791 if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) { 857 if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) {
792 lcd_disable_raster(); 858 lcd_disable_raster(false);
793 lcdc_write(stat, LCD_STAT_REG); 859 lcdc_write(stat, LCD_STAT_REG);
794 lcd_enable_raster(); 860 lcd_enable_raster();
795 } else if (stat & LCD_PL_LOAD_DONE) { 861 } else if (stat & LCD_PL_LOAD_DONE) {
@@ -799,7 +865,7 @@ static irqreturn_t lcdc_irq_handler_rev01(int irq, void *arg)
799 * interrupt via the following write to the status register. If 865 * interrupt via the following write to the status register. If
800 * this is done after then one gets multiple PL done interrupts. 866 * this is done after then one gets multiple PL done interrupts.
801 */ 867 */
802 lcd_disable_raster(); 868 lcd_disable_raster(false);
803 869
804 lcdc_write(stat, LCD_STAT_REG); 870 lcdc_write(stat, LCD_STAT_REG);
805 871
@@ -842,6 +908,9 @@ static int fb_check_var(struct fb_var_screeninfo *var,
842{ 908{
843 int err = 0; 909 int err = 0;
844 910
911 if (var->bits_per_pixel > 16 && lcd_revision == LCD_VERSION_1)
912 return -EINVAL;
913
845 switch (var->bits_per_pixel) { 914 switch (var->bits_per_pixel) {
846 case 1: 915 case 1:
847 case 8: 916 case 8:
@@ -877,6 +946,26 @@ static int fb_check_var(struct fb_var_screeninfo *var,
877 var->transp.length = 0; 946 var->transp.length = 0;
878 var->nonstd = 0; 947 var->nonstd = 0;
879 break; 948 break;
949 case 24:
950 var->red.offset = 16;
951 var->red.length = 8;
952 var->green.offset = 8;
953 var->green.length = 8;
954 var->blue.offset = 0;
955 var->blue.length = 8;
956 var->nonstd = 0;
957 break;
958 case 32:
959 var->transp.offset = 24;
960 var->transp.length = 8;
961 var->red.offset = 16;
962 var->red.length = 8;
963 var->green.offset = 8;
964 var->green.length = 8;
965 var->blue.offset = 0;
966 var->blue.length = 8;
967 var->nonstd = 0;
968 break;
880 default: 969 default:
881 err = -EINVAL; 970 err = -EINVAL;
882 } 971 }
@@ -898,9 +987,10 @@ static int lcd_da8xx_cpufreq_transition(struct notifier_block *nb,
898 if (val == CPUFREQ_POSTCHANGE) { 987 if (val == CPUFREQ_POSTCHANGE) {
899 if (par->lcd_fck_rate != clk_get_rate(par->lcdc_clk)) { 988 if (par->lcd_fck_rate != clk_get_rate(par->lcdc_clk)) {
900 par->lcd_fck_rate = clk_get_rate(par->lcdc_clk); 989 par->lcd_fck_rate = clk_get_rate(par->lcdc_clk);
901 lcd_disable_raster(); 990 lcd_disable_raster(true);
902 lcd_calc_clk_divider(par); 991 lcd_calc_clk_divider(par);
903 lcd_enable_raster(); 992 if (par->blank == FB_BLANK_UNBLANK)
993 lcd_enable_raster();
904 } 994 }
905 } 995 }
906 996
@@ -935,7 +1025,7 @@ static int __devexit fb_remove(struct platform_device *dev)
935 if (par->panel_power_ctrl) 1025 if (par->panel_power_ctrl)
936 par->panel_power_ctrl(0); 1026 par->panel_power_ctrl(0);
937 1027
938 lcd_disable_raster(); 1028 lcd_disable_raster(true);
939 lcdc_write(0, LCD_RASTER_CTRL_REG); 1029 lcdc_write(0, LCD_RASTER_CTRL_REG);
940 1030
941 /* disable DMA */ 1031 /* disable DMA */
@@ -948,8 +1038,8 @@ static int __devexit fb_remove(struct platform_device *dev)
948 dma_free_coherent(NULL, par->vram_size, par->vram_virt, 1038 dma_free_coherent(NULL, par->vram_size, par->vram_virt,
949 par->vram_phys); 1039 par->vram_phys);
950 free_irq(par->irq, par); 1040 free_irq(par->irq, par);
951 clk_disable(par->lcdc_clk); 1041 pm_runtime_put_sync(&dev->dev);
952 clk_put(par->lcdc_clk); 1042 pm_runtime_disable(&dev->dev);
953 framebuffer_release(info); 1043 framebuffer_release(info);
954 iounmap(da8xx_fb_reg_base); 1044 iounmap(da8xx_fb_reg_base);
955 release_mem_region(lcdc_regs->start, resource_size(lcdc_regs)); 1045 release_mem_region(lcdc_regs->start, resource_size(lcdc_regs));
@@ -1051,7 +1141,7 @@ static int cfb_blank(int blank, struct fb_info *info)
1051 if (par->panel_power_ctrl) 1141 if (par->panel_power_ctrl)
1052 par->panel_power_ctrl(0); 1142 par->panel_power_ctrl(0);
1053 1143
1054 lcd_disable_raster(); 1144 lcd_disable_raster(true);
1055 break; 1145 break;
1056 default: 1146 default:
1057 ret = -EINVAL; 1147 ret = -EINVAL;
@@ -1183,9 +1273,9 @@ static int __devinit fb_probe(struct platform_device *device)
1183 ret = -ENODEV; 1273 ret = -ENODEV;
1184 goto err_ioremap; 1274 goto err_ioremap;
1185 } 1275 }
1186 ret = clk_enable(fb_clk); 1276
1187 if (ret) 1277 pm_runtime_enable(&device->dev);
1188 goto err_clk_put; 1278 pm_runtime_get_sync(&device->dev);
1189 1279
1190 /* Determine LCD IP Version */ 1280 /* Determine LCD IP Version */
1191 switch (lcdc_read(LCD_PID_REG)) { 1281 switch (lcdc_read(LCD_PID_REG)) {
@@ -1213,7 +1303,7 @@ static int __devinit fb_probe(struct platform_device *device)
1213 if (i == ARRAY_SIZE(known_lcd_panels)) { 1303 if (i == ARRAY_SIZE(known_lcd_panels)) {
1214 dev_err(&device->dev, "GLCD: No valid panel found\n"); 1304 dev_err(&device->dev, "GLCD: No valid panel found\n");
1215 ret = -ENODEV; 1305 ret = -ENODEV;
1216 goto err_clk_disable; 1306 goto err_pm_runtime_disable;
1217 } else 1307 } else
1218 dev_info(&device->dev, "GLCD: Found %s panel\n", 1308 dev_info(&device->dev, "GLCD: Found %s panel\n",
1219 fb_pdata->type); 1309 fb_pdata->type);
@@ -1225,7 +1315,7 @@ static int __devinit fb_probe(struct platform_device *device)
1225 if (!da8xx_fb_info) { 1315 if (!da8xx_fb_info) {
1226 dev_dbg(&device->dev, "Memory allocation failed for fb_info\n"); 1316 dev_dbg(&device->dev, "Memory allocation failed for fb_info\n");
1227 ret = -ENOMEM; 1317 ret = -ENOMEM;
1228 goto err_clk_disable; 1318 goto err_pm_runtime_disable;
1229 } 1319 }
1230 1320
1231 par = da8xx_fb_info->par; 1321 par = da8xx_fb_info->par;
@@ -1356,8 +1446,10 @@ static int __devinit fb_probe(struct platform_device *device)
1356 1446
1357 if (lcd_revision == LCD_VERSION_1) 1447 if (lcd_revision == LCD_VERSION_1)
1358 lcdc_irq_handler = lcdc_irq_handler_rev01; 1448 lcdc_irq_handler = lcdc_irq_handler_rev01;
1359 else 1449 else {
1450 init_waitqueue_head(&frame_done_wq);
1360 lcdc_irq_handler = lcdc_irq_handler_rev02; 1451 lcdc_irq_handler = lcdc_irq_handler_rev02;
1452 }
1361 1453
1362 ret = request_irq(par->irq, lcdc_irq_handler, 0, 1454 ret = request_irq(par->irq, lcdc_irq_handler, 0,
1363 DRIVER_NAME, par); 1455 DRIVER_NAME, par);
@@ -1385,11 +1477,9 @@ err_release_fb_mem:
1385err_release_fb: 1477err_release_fb:
1386 framebuffer_release(da8xx_fb_info); 1478 framebuffer_release(da8xx_fb_info);
1387 1479
1388err_clk_disable: 1480err_pm_runtime_disable:
1389 clk_disable(fb_clk); 1481 pm_runtime_put_sync(&device->dev);
1390 1482 pm_runtime_disable(&device->dev);
1391err_clk_put:
1392 clk_put(fb_clk);
1393 1483
1394err_ioremap: 1484err_ioremap:
1395 iounmap(da8xx_fb_reg_base); 1485 iounmap(da8xx_fb_reg_base);
@@ -1401,6 +1491,69 @@ err_request_mem:
1401} 1491}
1402 1492
1403#ifdef CONFIG_PM 1493#ifdef CONFIG_PM
1494struct lcdc_context {
1495 u32 clk_enable;
1496 u32 ctrl;
1497 u32 dma_ctrl;
1498 u32 raster_timing_0;
1499 u32 raster_timing_1;
1500 u32 raster_timing_2;
1501 u32 int_enable_set;
1502 u32 dma_frm_buf_base_addr_0;
1503 u32 dma_frm_buf_ceiling_addr_0;
1504 u32 dma_frm_buf_base_addr_1;
1505 u32 dma_frm_buf_ceiling_addr_1;
1506 u32 raster_ctrl;
1507} reg_context;
1508
1509static void lcd_context_save(void)
1510{
1511 if (lcd_revision == LCD_VERSION_2) {
1512 reg_context.clk_enable = lcdc_read(LCD_CLK_ENABLE_REG);
1513 reg_context.int_enable_set = lcdc_read(LCD_INT_ENABLE_SET_REG);
1514 }
1515
1516 reg_context.ctrl = lcdc_read(LCD_CTRL_REG);
1517 reg_context.dma_ctrl = lcdc_read(LCD_DMA_CTRL_REG);
1518 reg_context.raster_timing_0 = lcdc_read(LCD_RASTER_TIMING_0_REG);
1519 reg_context.raster_timing_1 = lcdc_read(LCD_RASTER_TIMING_1_REG);
1520 reg_context.raster_timing_2 = lcdc_read(LCD_RASTER_TIMING_2_REG);
1521 reg_context.dma_frm_buf_base_addr_0 =
1522 lcdc_read(LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
1523 reg_context.dma_frm_buf_ceiling_addr_0 =
1524 lcdc_read(LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
1525 reg_context.dma_frm_buf_base_addr_1 =
1526 lcdc_read(LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
1527 reg_context.dma_frm_buf_ceiling_addr_1 =
1528 lcdc_read(LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
1529 reg_context.raster_ctrl = lcdc_read(LCD_RASTER_CTRL_REG);
1530 return;
1531}
1532
1533static void lcd_context_restore(void)
1534{
1535 if (lcd_revision == LCD_VERSION_2) {
1536 lcdc_write(reg_context.clk_enable, LCD_CLK_ENABLE_REG);
1537 lcdc_write(reg_context.int_enable_set, LCD_INT_ENABLE_SET_REG);
1538 }
1539
1540 lcdc_write(reg_context.ctrl, LCD_CTRL_REG);
1541 lcdc_write(reg_context.dma_ctrl, LCD_DMA_CTRL_REG);
1542 lcdc_write(reg_context.raster_timing_0, LCD_RASTER_TIMING_0_REG);
1543 lcdc_write(reg_context.raster_timing_1, LCD_RASTER_TIMING_1_REG);
1544 lcdc_write(reg_context.raster_timing_2, LCD_RASTER_TIMING_2_REG);
1545 lcdc_write(reg_context.dma_frm_buf_base_addr_0,
1546 LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
1547 lcdc_write(reg_context.dma_frm_buf_ceiling_addr_0,
1548 LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
1549 lcdc_write(reg_context.dma_frm_buf_base_addr_1,
1550 LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
1551 lcdc_write(reg_context.dma_frm_buf_ceiling_addr_1,
1552 LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
1553 lcdc_write(reg_context.raster_ctrl, LCD_RASTER_CTRL_REG);
1554 return;
1555}
1556
1404static int fb_suspend(struct platform_device *dev, pm_message_t state) 1557static int fb_suspend(struct platform_device *dev, pm_message_t state)
1405{ 1558{
1406 struct fb_info *info = platform_get_drvdata(dev); 1559 struct fb_info *info = platform_get_drvdata(dev);
@@ -1411,8 +1564,9 @@ static int fb_suspend(struct platform_device *dev, pm_message_t state)
1411 par->panel_power_ctrl(0); 1564 par->panel_power_ctrl(0);
1412 1565
1413 fb_set_suspend(info, 1); 1566 fb_set_suspend(info, 1);
1414 lcd_disable_raster(); 1567 lcd_disable_raster(true);
1415 clk_disable(par->lcdc_clk); 1568 lcd_context_save();
1569 pm_runtime_put_sync(&dev->dev);
1416 console_unlock(); 1570 console_unlock();
1417 1571
1418 return 0; 1572 return 0;
@@ -1423,11 +1577,14 @@ static int fb_resume(struct platform_device *dev)
1423 struct da8xx_fb_par *par = info->par; 1577 struct da8xx_fb_par *par = info->par;
1424 1578
1425 console_lock(); 1579 console_lock();
1426 clk_enable(par->lcdc_clk); 1580 pm_runtime_get_sync(&dev->dev);
1427 lcd_enable_raster(); 1581 lcd_context_restore();
1582 if (par->blank == FB_BLANK_UNBLANK) {
1583 lcd_enable_raster();
1428 1584
1429 if (par->panel_power_ctrl) 1585 if (par->panel_power_ctrl)
1430 par->panel_power_ctrl(1); 1586 par->panel_power_ctrl(1);
1587 }
1431 1588
1432 fb_set_suspend(info, 0); 1589 fb_set_suspend(info, 0);
1433 console_unlock(); 1590 console_unlock();
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index f2c092da84b0..755ef3e65caf 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -529,7 +529,8 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
529 * any of the framebuffer registers. 529 * any of the framebuffer registers.
530 */ 530 */
531 fbi->res = res; 531 fbi->res = res;
532 fbi->mmio_base = ioremap(res->start, resource_size(res)); 532 fbi->mmio_base = devm_ioremap(&pdev->dev, res->start,
533 resource_size(res));
533 if (!fbi->mmio_base) { 534 if (!fbi->mmio_base) {
534 err = -ENXIO; 535 err = -ENXIO;
535 goto failed_resource; 536 goto failed_resource;
@@ -553,20 +554,20 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
553 if (err == 0) { 554 if (err == 0) {
554 dev_err(info->dev, "No suitable video mode found\n"); 555 dev_err(info->dev, "No suitable video mode found\n");
555 err = -EINVAL; 556 err = -EINVAL;
556 goto failed_mode; 557 goto failed_resource;
557 } 558 }
558 559
559 if (mach_info->setup) { 560 if (mach_info->setup) {
560 err = mach_info->setup(pdev); 561 err = mach_info->setup(pdev);
561 if (err) 562 if (err)
562 goto failed_mode; 563 goto failed_resource;
563 } 564 }
564 565
565 err = ep93xxfb_check_var(&info->var, info); 566 err = ep93xxfb_check_var(&info->var, info);
566 if (err) 567 if (err)
567 goto failed_check; 568 goto failed_check;
568 569
569 fbi->clk = clk_get(info->dev, NULL); 570 fbi->clk = devm_clk_get(&pdev->dev, NULL);
570 if (IS_ERR(fbi->clk)) { 571 if (IS_ERR(fbi->clk)) {
571 err = PTR_ERR(fbi->clk); 572 err = PTR_ERR(fbi->clk);
572 fbi->clk = NULL; 573 fbi->clk = NULL;
@@ -578,19 +579,15 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
578 579
579 err = register_framebuffer(info); 580 err = register_framebuffer(info);
580 if (err) 581 if (err)
581 goto failed; 582 goto failed_check;
582 583
583 dev_info(info->dev, "registered. Mode = %dx%d-%d\n", 584 dev_info(info->dev, "registered. Mode = %dx%d-%d\n",
584 info->var.xres, info->var.yres, info->var.bits_per_pixel); 585 info->var.xres, info->var.yres, info->var.bits_per_pixel);
585 return 0; 586 return 0;
586 587
587failed:
588 clk_put(fbi->clk);
589failed_check: 588failed_check:
590 if (fbi->mach_info->teardown) 589 if (fbi->mach_info->teardown)
591 fbi->mach_info->teardown(pdev); 590 fbi->mach_info->teardown(pdev);
592failed_mode:
593 iounmap(fbi->mmio_base);
594failed_resource: 591failed_resource:
595 ep93xxfb_dealloc_videomem(info); 592 ep93xxfb_dealloc_videomem(info);
596failed_videomem: 593failed_videomem:
@@ -609,8 +606,6 @@ static int __devexit ep93xxfb_remove(struct platform_device *pdev)
609 606
610 unregister_framebuffer(info); 607 unregister_framebuffer(info);
611 clk_disable(fbi->clk); 608 clk_disable(fbi->clk);
612 clk_put(fbi->clk);
613 iounmap(fbi->mmio_base);
614 ep93xxfb_dealloc_videomem(info); 609 ep93xxfb_dealloc_videomem(info);
615 fb_dealloc_cmap(&info->cmap); 610 fb_dealloc_cmap(&info->cmap);
616 611
diff --git a/drivers/video/exynos/exynos_dp_core.c b/drivers/video/exynos/exynos_dp_core.c
index c6c016a506ce..d55470e75412 100644
--- a/drivers/video/exynos/exynos_dp_core.c
+++ b/drivers/video/exynos/exynos_dp_core.c
@@ -29,6 +29,9 @@ static int exynos_dp_init_dp(struct exynos_dp_device *dp)
29 29
30 exynos_dp_swreset(dp); 30 exynos_dp_swreset(dp);
31 31
32 exynos_dp_init_analog_param(dp);
33 exynos_dp_init_interrupt(dp);
34
32 /* SW defined function Normal operation */ 35 /* SW defined function Normal operation */
33 exynos_dp_enable_sw_function(dp); 36 exynos_dp_enable_sw_function(dp);
34 37
@@ -260,7 +263,7 @@ static void exynos_dp_set_lane_lane_pre_emphasis(struct exynos_dp_device *dp,
260 263
261static void exynos_dp_link_start(struct exynos_dp_device *dp) 264static void exynos_dp_link_start(struct exynos_dp_device *dp)
262{ 265{
263 u8 buf[5]; 266 u8 buf[4];
264 int lane; 267 int lane;
265 int lane_count; 268 int lane_count;
266 269
@@ -295,10 +298,10 @@ static void exynos_dp_link_start(struct exynos_dp_device *dp)
295 exynos_dp_set_training_pattern(dp, TRAINING_PTN1); 298 exynos_dp_set_training_pattern(dp, TRAINING_PTN1);
296 299
297 /* Set RX training pattern */ 300 /* Set RX training pattern */
298 buf[0] = DPCD_SCRAMBLING_DISABLED |
299 DPCD_TRAINING_PATTERN_1;
300 exynos_dp_write_byte_to_dpcd(dp, 301 exynos_dp_write_byte_to_dpcd(dp,
301 DPCD_ADDR_TRAINING_PATTERN_SET, buf[0]); 302 DPCD_ADDR_TRAINING_PATTERN_SET,
303 DPCD_SCRAMBLING_DISABLED |
304 DPCD_TRAINING_PATTERN_1);
302 305
303 for (lane = 0; lane < lane_count; lane++) 306 for (lane = 0; lane < lane_count; lane++)
304 buf[lane] = DPCD_PRE_EMPHASIS_PATTERN2_LEVEL0 | 307 buf[lane] = DPCD_PRE_EMPHASIS_PATTERN2_LEVEL0 |
@@ -308,7 +311,7 @@ static void exynos_dp_link_start(struct exynos_dp_device *dp)
308 lane_count, buf); 311 lane_count, buf);
309} 312}
310 313
311static unsigned char exynos_dp_get_lane_status(u8 link_status[6], int lane) 314static unsigned char exynos_dp_get_lane_status(u8 link_status[2], int lane)
312{ 315{
313 int shift = (lane & 1) * 4; 316 int shift = (lane & 1) * 4;
314 u8 link_value = link_status[lane>>1]; 317 u8 link_value = link_status[lane>>1];
@@ -316,7 +319,7 @@ static unsigned char exynos_dp_get_lane_status(u8 link_status[6], int lane)
316 return (link_value >> shift) & 0xf; 319 return (link_value >> shift) & 0xf;
317} 320}
318 321
319static int exynos_dp_clock_recovery_ok(u8 link_status[6], int lane_count) 322static int exynos_dp_clock_recovery_ok(u8 link_status[2], int lane_count)
320{ 323{
321 int lane; 324 int lane;
322 u8 lane_status; 325 u8 lane_status;
@@ -329,22 +332,23 @@ static int exynos_dp_clock_recovery_ok(u8 link_status[6], int lane_count)
329 return 0; 332 return 0;
330} 333}
331 334
332static int exynos_dp_channel_eq_ok(u8 link_status[6], int lane_count) 335static int exynos_dp_channel_eq_ok(u8 link_align[3], int lane_count)
333{ 336{
334 int lane; 337 int lane;
335 u8 lane_align; 338 u8 lane_align;
336 u8 lane_status; 339 u8 lane_status;
337 340
338 lane_align = link_status[2]; 341 lane_align = link_align[2];
339 if ((lane_align & DPCD_INTERLANE_ALIGN_DONE) == 0) 342 if ((lane_align & DPCD_INTERLANE_ALIGN_DONE) == 0)
340 return -EINVAL; 343 return -EINVAL;
341 344
342 for (lane = 0; lane < lane_count; lane++) { 345 for (lane = 0; lane < lane_count; lane++) {
343 lane_status = exynos_dp_get_lane_status(link_status, lane); 346 lane_status = exynos_dp_get_lane_status(link_align, lane);
344 lane_status &= DPCD_CHANNEL_EQ_BITS; 347 lane_status &= DPCD_CHANNEL_EQ_BITS;
345 if (lane_status != DPCD_CHANNEL_EQ_BITS) 348 if (lane_status != DPCD_CHANNEL_EQ_BITS)
346 return -EINVAL; 349 return -EINVAL;
347 } 350 }
351
348 return 0; 352 return 0;
349} 353}
350 354
@@ -417,69 +421,17 @@ static unsigned int exynos_dp_get_lane_link_training(
417 421
418static void exynos_dp_reduce_link_rate(struct exynos_dp_device *dp) 422static void exynos_dp_reduce_link_rate(struct exynos_dp_device *dp)
419{ 423{
420 if (dp->link_train.link_rate == LINK_RATE_2_70GBPS) { 424 exynos_dp_training_pattern_dis(dp);
421 /* set to reduced bit rate */ 425 exynos_dp_set_enhanced_mode(dp);
422 dp->link_train.link_rate = LINK_RATE_1_62GBPS;
423 dev_err(dp->dev, "set to bandwidth %.2x\n",
424 dp->link_train.link_rate);
425 dp->link_train.lt_state = START;
426 } else {
427 exynos_dp_training_pattern_dis(dp);
428 /* set enhanced mode if available */
429 exynos_dp_set_enhanced_mode(dp);
430 dp->link_train.lt_state = FAILED;
431 }
432}
433 426
434static void exynos_dp_get_adjust_train(struct exynos_dp_device *dp, 427 dp->link_train.lt_state = FAILED;
435 u8 adjust_request[2])
436{
437 int lane;
438 int lane_count;
439 u8 voltage_swing;
440 u8 pre_emphasis;
441 u8 training_lane;
442
443 lane_count = dp->link_train.lane_count;
444 for (lane = 0; lane < lane_count; lane++) {
445 voltage_swing = exynos_dp_get_adjust_request_voltage(
446 adjust_request, lane);
447 pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis(
448 adjust_request, lane);
449 training_lane = DPCD_VOLTAGE_SWING_SET(voltage_swing) |
450 DPCD_PRE_EMPHASIS_SET(pre_emphasis);
451
452 if (voltage_swing == VOLTAGE_LEVEL_3 ||
453 pre_emphasis == PRE_EMPHASIS_LEVEL_3) {
454 training_lane |= DPCD_MAX_SWING_REACHED;
455 training_lane |= DPCD_MAX_PRE_EMPHASIS_REACHED;
456 }
457 dp->link_train.training_lane[lane] = training_lane;
458 }
459}
460
461static int exynos_dp_check_max_cr_loop(struct exynos_dp_device *dp,
462 u8 voltage_swing)
463{
464 int lane;
465 int lane_count;
466
467 lane_count = dp->link_train.lane_count;
468 for (lane = 0; lane < lane_count; lane++) {
469 if (voltage_swing == VOLTAGE_LEVEL_3 ||
470 dp->link_train.cr_loop[lane] == MAX_CR_LOOP)
471 return -EINVAL;
472 }
473 return 0;
474} 428}
475 429
476static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp) 430static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
477{ 431{
478 u8 data; 432 u8 link_status[2];
479 u8 link_status[6];
480 int lane; 433 int lane;
481 int lane_count; 434 int lane_count;
482 u8 buf[5];
483 435
484 u8 adjust_request[2]; 436 u8 adjust_request[2];
485 u8 voltage_swing; 437 u8 voltage_swing;
@@ -488,100 +440,154 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
488 440
489 usleep_range(100, 101); 441 usleep_range(100, 101);
490 442
491 exynos_dp_read_bytes_from_dpcd(dp, DPCD_ADDR_LANE0_1_STATUS,
492 6, link_status);
493 lane_count = dp->link_train.lane_count; 443 lane_count = dp->link_train.lane_count;
494 444
445 exynos_dp_read_bytes_from_dpcd(dp, DPCD_ADDR_LANE0_1_STATUS,
446 2, link_status);
447
495 if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) { 448 if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) {
496 /* set training pattern 2 for EQ */ 449 /* set training pattern 2 for EQ */
497 exynos_dp_set_training_pattern(dp, TRAINING_PTN2); 450 exynos_dp_set_training_pattern(dp, TRAINING_PTN2);
498 451
499 adjust_request[0] = link_status[4]; 452 for (lane = 0; lane < lane_count; lane++) {
500 adjust_request[1] = link_status[5]; 453 exynos_dp_read_bytes_from_dpcd(dp,
454 DPCD_ADDR_ADJUST_REQUEST_LANE0_1,
455 2, adjust_request);
456 voltage_swing = exynos_dp_get_adjust_request_voltage(
457 adjust_request, lane);
458 pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis(
459 adjust_request, lane);
460 training_lane = DPCD_VOLTAGE_SWING_SET(voltage_swing) |
461 DPCD_PRE_EMPHASIS_SET(pre_emphasis);
501 462
502 exynos_dp_get_adjust_train(dp, adjust_request); 463 if (voltage_swing == VOLTAGE_LEVEL_3)
464 training_lane |= DPCD_MAX_SWING_REACHED;
465 if (pre_emphasis == PRE_EMPHASIS_LEVEL_3)
466 training_lane |= DPCD_MAX_PRE_EMPHASIS_REACHED;
503 467
504 buf[0] = DPCD_SCRAMBLING_DISABLED | 468 dp->link_train.training_lane[lane] = training_lane;
505 DPCD_TRAINING_PATTERN_2;
506 exynos_dp_write_byte_to_dpcd(dp,
507 DPCD_ADDR_TRAINING_PATTERN_SET,
508 buf[0]);
509 469
510 for (lane = 0; lane < lane_count; lane++) {
511 exynos_dp_set_lane_link_training(dp, 470 exynos_dp_set_lane_link_training(dp,
512 dp->link_train.training_lane[lane], 471 dp->link_train.training_lane[lane],
513 lane); 472 lane);
514 buf[lane] = dp->link_train.training_lane[lane];
515 exynos_dp_write_byte_to_dpcd(dp,
516 DPCD_ADDR_TRAINING_LANE0_SET + lane,
517 buf[lane]);
518 } 473 }
519 dp->link_train.lt_state = EQUALIZER_TRAINING;
520 } else {
521 exynos_dp_read_byte_from_dpcd(dp,
522 DPCD_ADDR_ADJUST_REQUEST_LANE0_1,
523 &data);
524 adjust_request[0] = data;
525 474
526 exynos_dp_read_byte_from_dpcd(dp, 475 exynos_dp_write_byte_to_dpcd(dp,
527 DPCD_ADDR_ADJUST_REQUEST_LANE2_3, 476 DPCD_ADDR_TRAINING_PATTERN_SET,
528 &data); 477 DPCD_SCRAMBLING_DISABLED |
529 adjust_request[1] = data; 478 DPCD_TRAINING_PATTERN_2);
479
480 exynos_dp_write_bytes_to_dpcd(dp,
481 DPCD_ADDR_TRAINING_LANE0_SET,
482 lane_count,
483 dp->link_train.training_lane);
530 484
485 dev_info(dp->dev, "Link Training Clock Recovery success\n");
486 dp->link_train.lt_state = EQUALIZER_TRAINING;
487 } else {
531 for (lane = 0; lane < lane_count; lane++) { 488 for (lane = 0; lane < lane_count; lane++) {
532 training_lane = exynos_dp_get_lane_link_training( 489 training_lane = exynos_dp_get_lane_link_training(
533 dp, lane); 490 dp, lane);
491 exynos_dp_read_bytes_from_dpcd(dp,
492 DPCD_ADDR_ADJUST_REQUEST_LANE0_1,
493 2, adjust_request);
534 voltage_swing = exynos_dp_get_adjust_request_voltage( 494 voltage_swing = exynos_dp_get_adjust_request_voltage(
535 adjust_request, lane); 495 adjust_request, lane);
536 pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis( 496 pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis(
537 adjust_request, lane); 497 adjust_request, lane);
538 if ((DPCD_VOLTAGE_SWING_GET(training_lane) == voltage_swing) &&
539 (DPCD_PRE_EMPHASIS_GET(training_lane) == pre_emphasis))
540 dp->link_train.cr_loop[lane]++;
541 dp->link_train.training_lane[lane] = training_lane;
542 }
543 498
544 if (exynos_dp_check_max_cr_loop(dp, voltage_swing) != 0) { 499 if (voltage_swing == VOLTAGE_LEVEL_3 ||
545 exynos_dp_reduce_link_rate(dp); 500 pre_emphasis == PRE_EMPHASIS_LEVEL_3) {
546 } else { 501 dev_err(dp->dev, "voltage or pre emphasis reached max level\n");
547 exynos_dp_get_adjust_train(dp, adjust_request); 502 goto reduce_link_rate;
503 }
548 504
549 for (lane = 0; lane < lane_count; lane++) { 505 if ((DPCD_VOLTAGE_SWING_GET(training_lane) ==
550 exynos_dp_set_lane_link_training(dp, 506 voltage_swing) &&
551 dp->link_train.training_lane[lane], 507 (DPCD_PRE_EMPHASIS_GET(training_lane) ==
552 lane); 508 pre_emphasis)) {
553 buf[lane] = dp->link_train.training_lane[lane]; 509 dp->link_train.cr_loop[lane]++;
554 exynos_dp_write_byte_to_dpcd(dp, 510 if (dp->link_train.cr_loop[lane] == MAX_CR_LOOP) {
555 DPCD_ADDR_TRAINING_LANE0_SET + lane, 511 dev_err(dp->dev, "CR Max loop\n");
556 buf[lane]); 512 goto reduce_link_rate;
513 }
557 } 514 }
515
516 training_lane = DPCD_VOLTAGE_SWING_SET(voltage_swing) |
517 DPCD_PRE_EMPHASIS_SET(pre_emphasis);
518
519 if (voltage_swing == VOLTAGE_LEVEL_3)
520 training_lane |= DPCD_MAX_SWING_REACHED;
521 if (pre_emphasis == PRE_EMPHASIS_LEVEL_3)
522 training_lane |= DPCD_MAX_PRE_EMPHASIS_REACHED;
523
524 dp->link_train.training_lane[lane] = training_lane;
525
526 exynos_dp_set_lane_link_training(dp,
527 dp->link_train.training_lane[lane], lane);
558 } 528 }
529
530 exynos_dp_write_bytes_to_dpcd(dp,
531 DPCD_ADDR_TRAINING_LANE0_SET,
532 lane_count,
533 dp->link_train.training_lane);
559 } 534 }
560 535
561 return 0; 536 return 0;
537
538reduce_link_rate:
539 exynos_dp_reduce_link_rate(dp);
540 return -EIO;
562} 541}
563 542
564static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp) 543static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
565{ 544{
566 u8 link_status[6]; 545 u8 link_status[2];
546 u8 link_align[3];
567 int lane; 547 int lane;
568 int lane_count; 548 int lane_count;
569 u8 buf[5];
570 u32 reg; 549 u32 reg;
571 550
572 u8 adjust_request[2]; 551 u8 adjust_request[2];
552 u8 voltage_swing;
553 u8 pre_emphasis;
554 u8 training_lane;
573 555
574 usleep_range(400, 401); 556 usleep_range(400, 401);
575 557
576 exynos_dp_read_bytes_from_dpcd(dp, DPCD_ADDR_LANE0_1_STATUS,
577 6, link_status);
578 lane_count = dp->link_train.lane_count; 558 lane_count = dp->link_train.lane_count;
579 559
560 exynos_dp_read_bytes_from_dpcd(dp, DPCD_ADDR_LANE0_1_STATUS,
561 2, link_status);
562
580 if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) { 563 if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) {
581 adjust_request[0] = link_status[4]; 564 link_align[0] = link_status[0];
582 adjust_request[1] = link_status[5]; 565 link_align[1] = link_status[1];
583 566
584 if (exynos_dp_channel_eq_ok(link_status, lane_count) == 0) { 567 exynos_dp_read_byte_from_dpcd(dp,
568 DPCD_ADDR_LANE_ALIGN_STATUS_UPDATED,
569 &link_align[2]);
570
571 for (lane = 0; lane < lane_count; lane++) {
572 exynos_dp_read_bytes_from_dpcd(dp,
573 DPCD_ADDR_ADJUST_REQUEST_LANE0_1,
574 2, adjust_request);
575 voltage_swing = exynos_dp_get_adjust_request_voltage(
576 adjust_request, lane);
577 pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis(
578 adjust_request, lane);
579 training_lane = DPCD_VOLTAGE_SWING_SET(voltage_swing) |
580 DPCD_PRE_EMPHASIS_SET(pre_emphasis);
581
582 if (voltage_swing == VOLTAGE_LEVEL_3)
583 training_lane |= DPCD_MAX_SWING_REACHED;
584 if (pre_emphasis == PRE_EMPHASIS_LEVEL_3)
585 training_lane |= DPCD_MAX_PRE_EMPHASIS_REACHED;
586
587 dp->link_train.training_lane[lane] = training_lane;
588 }
589
590 if (exynos_dp_channel_eq_ok(link_align, lane_count) == 0) {
585 /* traing pattern Set to Normal */ 591 /* traing pattern Set to Normal */
586 exynos_dp_training_pattern_dis(dp); 592 exynos_dp_training_pattern_dis(dp);
587 593
@@ -596,39 +602,42 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
596 dp->link_train.lane_count = reg; 602 dp->link_train.lane_count = reg;
597 dev_dbg(dp->dev, "final lane count = %.2x\n", 603 dev_dbg(dp->dev, "final lane count = %.2x\n",
598 dp->link_train.lane_count); 604 dp->link_train.lane_count);
605
599 /* set enhanced mode if available */ 606 /* set enhanced mode if available */
600 exynos_dp_set_enhanced_mode(dp); 607 exynos_dp_set_enhanced_mode(dp);
601
602 dp->link_train.lt_state = FINISHED; 608 dp->link_train.lt_state = FINISHED;
603 } else { 609 } else {
604 /* not all locked */ 610 /* not all locked */
605 dp->link_train.eq_loop++; 611 dp->link_train.eq_loop++;
606 612
607 if (dp->link_train.eq_loop > MAX_EQ_LOOP) { 613 if (dp->link_train.eq_loop > MAX_EQ_LOOP) {
608 exynos_dp_reduce_link_rate(dp); 614 dev_err(dp->dev, "EQ Max loop\n");
609 } else { 615 goto reduce_link_rate;
610 exynos_dp_get_adjust_train(dp, adjust_request);
611
612 for (lane = 0; lane < lane_count; lane++) {
613 exynos_dp_set_lane_link_training(dp,
614 dp->link_train.training_lane[lane],
615 lane);
616 buf[lane] = dp->link_train.training_lane[lane];
617 exynos_dp_write_byte_to_dpcd(dp,
618 DPCD_ADDR_TRAINING_LANE0_SET + lane,
619 buf[lane]);
620 }
621 } 616 }
617
618 for (lane = 0; lane < lane_count; lane++)
619 exynos_dp_set_lane_link_training(dp,
620 dp->link_train.training_lane[lane],
621 lane);
622
623 exynos_dp_write_bytes_to_dpcd(dp,
624 DPCD_ADDR_TRAINING_LANE0_SET,
625 lane_count,
626 dp->link_train.training_lane);
622 } 627 }
623 } else { 628 } else {
624 exynos_dp_reduce_link_rate(dp); 629 goto reduce_link_rate;
625 } 630 }
626 631
627 return 0; 632 return 0;
633
634reduce_link_rate:
635 exynos_dp_reduce_link_rate(dp);
636 return -EIO;
628} 637}
629 638
630static void exynos_dp_get_max_rx_bandwidth(struct exynos_dp_device *dp, 639static void exynos_dp_get_max_rx_bandwidth(struct exynos_dp_device *dp,
631 u8 *bandwidth) 640 u8 *bandwidth)
632{ 641{
633 u8 data; 642 u8 data;
634 643
@@ -641,7 +650,7 @@ static void exynos_dp_get_max_rx_bandwidth(struct exynos_dp_device *dp,
641} 650}
642 651
643static void exynos_dp_get_max_rx_lane_count(struct exynos_dp_device *dp, 652static void exynos_dp_get_max_rx_lane_count(struct exynos_dp_device *dp,
644 u8 *lane_count) 653 u8 *lane_count)
645{ 654{
646 u8 data; 655 u8 data;
647 656
@@ -693,13 +702,7 @@ static void exynos_dp_init_training(struct exynos_dp_device *dp,
693static int exynos_dp_sw_link_training(struct exynos_dp_device *dp) 702static int exynos_dp_sw_link_training(struct exynos_dp_device *dp)
694{ 703{
695 int retval = 0; 704 int retval = 0;
696 int training_finished; 705 int training_finished = 0;
697
698 /* Turn off unnecessary lane */
699 if (dp->link_train.lane_count == 1)
700 exynos_dp_set_analog_power_down(dp, CH1_BLOCK, 1);
701
702 training_finished = 0;
703 706
704 dp->link_train.lt_state = START; 707 dp->link_train.lt_state = START;
705 708
@@ -710,10 +713,14 @@ static int exynos_dp_sw_link_training(struct exynos_dp_device *dp)
710 exynos_dp_link_start(dp); 713 exynos_dp_link_start(dp);
711 break; 714 break;
712 case CLOCK_RECOVERY: 715 case CLOCK_RECOVERY:
713 exynos_dp_process_clock_recovery(dp); 716 retval = exynos_dp_process_clock_recovery(dp);
717 if (retval)
718 dev_err(dp->dev, "LT CR failed!\n");
714 break; 719 break;
715 case EQUALIZER_TRAINING: 720 case EQUALIZER_TRAINING:
716 exynos_dp_process_equalizer_training(dp); 721 retval = exynos_dp_process_equalizer_training(dp);
722 if (retval)
723 dev_err(dp->dev, "LT EQ failed!\n");
717 break; 724 break;
718 case FINISHED: 725 case FINISHED:
719 training_finished = 1; 726 training_finished = 1;
@@ -872,40 +879,33 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
872 879
873 dp->dev = &pdev->dev; 880 dp->dev = &pdev->dev;
874 881
875 dp->clock = clk_get(&pdev->dev, "dp"); 882 dp->clock = devm_clk_get(&pdev->dev, "dp");
876 if (IS_ERR(dp->clock)) { 883 if (IS_ERR(dp->clock)) {
877 dev_err(&pdev->dev, "failed to get clock\n"); 884 dev_err(&pdev->dev, "failed to get clock\n");
878 return PTR_ERR(dp->clock); 885 return PTR_ERR(dp->clock);
879 } 886 }
880 887
881 clk_enable(dp->clock); 888 clk_prepare_enable(dp->clock);
882 889
883 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 890 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
884 if (!res) {
885 dev_err(&pdev->dev, "failed to get registers\n");
886 ret = -EINVAL;
887 goto err_clock;
888 }
889 891
890 dp->reg_base = devm_request_and_ioremap(&pdev->dev, res); 892 dp->reg_base = devm_request_and_ioremap(&pdev->dev, res);
891 if (!dp->reg_base) { 893 if (!dp->reg_base) {
892 dev_err(&pdev->dev, "failed to ioremap\n"); 894 dev_err(&pdev->dev, "failed to ioremap\n");
893 ret = -ENOMEM; 895 return -ENOMEM;
894 goto err_clock;
895 } 896 }
896 897
897 dp->irq = platform_get_irq(pdev, 0); 898 dp->irq = platform_get_irq(pdev, 0);
898 if (!dp->irq) { 899 if (!dp->irq) {
899 dev_err(&pdev->dev, "failed to get irq\n"); 900 dev_err(&pdev->dev, "failed to get irq\n");
900 ret = -ENODEV; 901 return -ENODEV;
901 goto err_clock;
902 } 902 }
903 903
904 ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, 0, 904 ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, 0,
905 "exynos-dp", dp); 905 "exynos-dp", dp);
906 if (ret) { 906 if (ret) {
907 dev_err(&pdev->dev, "failed to request irq\n"); 907 dev_err(&pdev->dev, "failed to request irq\n");
908 goto err_clock; 908 return ret;
909 } 909 }
910 910
911 dp->video_info = pdata->video_info; 911 dp->video_info = pdata->video_info;
@@ -917,7 +917,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
917 ret = exynos_dp_detect_hpd(dp); 917 ret = exynos_dp_detect_hpd(dp);
918 if (ret) { 918 if (ret) {
919 dev_err(&pdev->dev, "unable to detect hpd\n"); 919 dev_err(&pdev->dev, "unable to detect hpd\n");
920 goto err_clock; 920 return ret;
921 } 921 }
922 922
923 exynos_dp_handle_edid(dp); 923 exynos_dp_handle_edid(dp);
@@ -926,7 +926,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
926 dp->video_info->link_rate); 926 dp->video_info->link_rate);
927 if (ret) { 927 if (ret) {
928 dev_err(&pdev->dev, "unable to do link train\n"); 928 dev_err(&pdev->dev, "unable to do link train\n");
929 goto err_clock; 929 return ret;
930 } 930 }
931 931
932 exynos_dp_enable_scramble(dp, 1); 932 exynos_dp_enable_scramble(dp, 1);
@@ -940,17 +940,12 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
940 ret = exynos_dp_config_video(dp, dp->video_info); 940 ret = exynos_dp_config_video(dp, dp->video_info);
941 if (ret) { 941 if (ret) {
942 dev_err(&pdev->dev, "unable to config video\n"); 942 dev_err(&pdev->dev, "unable to config video\n");
943 goto err_clock; 943 return ret;
944 } 944 }
945 945
946 platform_set_drvdata(pdev, dp); 946 platform_set_drvdata(pdev, dp);
947 947
948 return 0; 948 return 0;
949
950err_clock:
951 clk_put(dp->clock);
952
953 return ret;
954} 949}
955 950
956static int __devexit exynos_dp_remove(struct platform_device *pdev) 951static int __devexit exynos_dp_remove(struct platform_device *pdev)
@@ -961,8 +956,7 @@ static int __devexit exynos_dp_remove(struct platform_device *pdev)
961 if (pdata && pdata->phy_exit) 956 if (pdata && pdata->phy_exit)
962 pdata->phy_exit(); 957 pdata->phy_exit();
963 958
964 clk_disable(dp->clock); 959 clk_disable_unprepare(dp->clock);
965 clk_put(dp->clock);
966 960
967 return 0; 961 return 0;
968} 962}
@@ -977,7 +971,7 @@ static int exynos_dp_suspend(struct device *dev)
977 if (pdata && pdata->phy_exit) 971 if (pdata && pdata->phy_exit)
978 pdata->phy_exit(); 972 pdata->phy_exit();
979 973
980 clk_disable(dp->clock); 974 clk_disable_unprepare(dp->clock);
981 975
982 return 0; 976 return 0;
983} 977}
@@ -991,7 +985,7 @@ static int exynos_dp_resume(struct device *dev)
991 if (pdata && pdata->phy_init) 985 if (pdata && pdata->phy_init)
992 pdata->phy_init(); 986 pdata->phy_init();
993 987
994 clk_enable(dp->clock); 988 clk_prepare_enable(dp->clock);
995 989
996 exynos_dp_init_dp(dp); 990 exynos_dp_init_dp(dp);
997 991
diff --git a/drivers/video/exynos/exynos_dp_core.h b/drivers/video/exynos/exynos_dp_core.h
index 8526e548c385..57b8a6531c0e 100644
--- a/drivers/video/exynos/exynos_dp_core.h
+++ b/drivers/video/exynos/exynos_dp_core.h
@@ -43,7 +43,7 @@ void exynos_dp_init_interrupt(struct exynos_dp_device *dp);
43void exynos_dp_reset(struct exynos_dp_device *dp); 43void exynos_dp_reset(struct exynos_dp_device *dp);
44void exynos_dp_swreset(struct exynos_dp_device *dp); 44void exynos_dp_swreset(struct exynos_dp_device *dp);
45void exynos_dp_config_interrupt(struct exynos_dp_device *dp); 45void exynos_dp_config_interrupt(struct exynos_dp_device *dp);
46u32 exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp); 46enum pll_status exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp);
47void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable); 47void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable);
48void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp, 48void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp,
49 enum analog_power_block block, 49 enum analog_power_block block,
@@ -105,7 +105,7 @@ u32 exynos_dp_get_lane1_link_training(struct exynos_dp_device *dp);
105u32 exynos_dp_get_lane2_link_training(struct exynos_dp_device *dp); 105u32 exynos_dp_get_lane2_link_training(struct exynos_dp_device *dp);
106u32 exynos_dp_get_lane3_link_training(struct exynos_dp_device *dp); 106u32 exynos_dp_get_lane3_link_training(struct exynos_dp_device *dp);
107void exynos_dp_reset_macro(struct exynos_dp_device *dp); 107void exynos_dp_reset_macro(struct exynos_dp_device *dp);
108int exynos_dp_init_video(struct exynos_dp_device *dp); 108void exynos_dp_init_video(struct exynos_dp_device *dp);
109 109
110void exynos_dp_set_video_color_format(struct exynos_dp_device *dp, 110void exynos_dp_set_video_color_format(struct exynos_dp_device *dp,
111 u32 color_depth, 111 u32 color_depth,
@@ -144,7 +144,7 @@ void exynos_dp_disable_scrambling(struct exynos_dp_device *dp);
144#define DPCD_ADDR_TRAINING_PATTERN_SET 0x0102 144#define DPCD_ADDR_TRAINING_PATTERN_SET 0x0102
145#define DPCD_ADDR_TRAINING_LANE0_SET 0x0103 145#define DPCD_ADDR_TRAINING_LANE0_SET 0x0103
146#define DPCD_ADDR_LANE0_1_STATUS 0x0202 146#define DPCD_ADDR_LANE0_1_STATUS 0x0202
147#define DPCD_ADDR_LANE_ALIGN__STATUS_UPDATED 0x0204 147#define DPCD_ADDR_LANE_ALIGN_STATUS_UPDATED 0x0204
148#define DPCD_ADDR_ADJUST_REQUEST_LANE0_1 0x0206 148#define DPCD_ADDR_ADJUST_REQUEST_LANE0_1 0x0206
149#define DPCD_ADDR_ADJUST_REQUEST_LANE2_3 0x0207 149#define DPCD_ADDR_ADJUST_REQUEST_LANE2_3 0x0207
150#define DPCD_ADDR_TEST_REQUEST 0x0218 150#define DPCD_ADDR_TEST_REQUEST 0x0218
diff --git a/drivers/video/exynos/exynos_dp_reg.c b/drivers/video/exynos/exynos_dp_reg.c
index 2db5b9aa250a..3f5ca8a0d5ea 100644
--- a/drivers/video/exynos/exynos_dp_reg.c
+++ b/drivers/video/exynos/exynos_dp_reg.c
@@ -77,7 +77,7 @@ void exynos_dp_init_analog_param(struct exynos_dp_device *dp)
77 writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_3); 77 writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_3);
78 78
79 reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM | 79 reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM |
80 TX_CUR1_2X | TX_CUR_8_MA; 80 TX_CUR1_2X | TX_CUR_16_MA;
81 writel(reg, dp->reg_base + EXYNOS_DP_PLL_FILTER_CTL_1); 81 writel(reg, dp->reg_base + EXYNOS_DP_PLL_FILTER_CTL_1);
82 82
83 reg = CH3_AMP_400_MV | CH2_AMP_400_MV | 83 reg = CH3_AMP_400_MV | CH2_AMP_400_MV |
@@ -148,9 +148,6 @@ void exynos_dp_reset(struct exynos_dp_device *dp)
148 writel(0x2, dp->reg_base + EXYNOS_DP_M_AUD_GEN_FILTER_TH); 148 writel(0x2, dp->reg_base + EXYNOS_DP_M_AUD_GEN_FILTER_TH);
149 149
150 writel(0x00000101, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); 150 writel(0x00000101, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
151
152 exynos_dp_init_analog_param(dp);
153 exynos_dp_init_interrupt(dp);
154} 151}
155 152
156void exynos_dp_swreset(struct exynos_dp_device *dp) 153void exynos_dp_swreset(struct exynos_dp_device *dp)
@@ -179,7 +176,7 @@ void exynos_dp_config_interrupt(struct exynos_dp_device *dp)
179 writel(reg, dp->reg_base + EXYNOS_DP_INT_STA_MASK); 176 writel(reg, dp->reg_base + EXYNOS_DP_INT_STA_MASK);
180} 177}
181 178
182u32 exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp) 179enum pll_status exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp)
183{ 180{
184 u32 reg; 181 u32 reg;
185 182
@@ -401,6 +398,7 @@ int exynos_dp_start_aux_transaction(struct exynos_dp_device *dp)
401{ 398{
402 int reg; 399 int reg;
403 int retval = 0; 400 int retval = 0;
401 int timeout_loop = 0;
404 402
405 /* Enable AUX CH operation */ 403 /* Enable AUX CH operation */
406 reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2); 404 reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2);
@@ -409,8 +407,15 @@ int exynos_dp_start_aux_transaction(struct exynos_dp_device *dp)
409 407
410 /* Is AUX CH command reply received? */ 408 /* Is AUX CH command reply received? */
411 reg = readl(dp->reg_base + EXYNOS_DP_INT_STA); 409 reg = readl(dp->reg_base + EXYNOS_DP_INT_STA);
412 while (!(reg & RPLY_RECEIV)) 410 while (!(reg & RPLY_RECEIV)) {
411 timeout_loop++;
412 if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
413 dev_err(dp->dev, "AUX CH command reply failed!\n");
414 return -ETIMEDOUT;
415 }
413 reg = readl(dp->reg_base + EXYNOS_DP_INT_STA); 416 reg = readl(dp->reg_base + EXYNOS_DP_INT_STA);
417 usleep_range(10, 11);
418 }
414 419
415 /* Clear interrupt source for AUX CH command reply */ 420 /* Clear interrupt source for AUX CH command reply */
416 writel(RPLY_RECEIV, dp->reg_base + EXYNOS_DP_INT_STA); 421 writel(RPLY_RECEIV, dp->reg_base + EXYNOS_DP_INT_STA);
@@ -471,7 +476,8 @@ int exynos_dp_write_byte_to_dpcd(struct exynos_dp_device *dp,
471 if (retval == 0) 476 if (retval == 0)
472 break; 477 break;
473 else 478 else
474 dev_err(dp->dev, "Aux Transaction fail!\n"); 479 dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
480 __func__);
475 } 481 }
476 482
477 return retval; 483 return retval;
@@ -511,7 +517,8 @@ int exynos_dp_read_byte_from_dpcd(struct exynos_dp_device *dp,
511 if (retval == 0) 517 if (retval == 0)
512 break; 518 break;
513 else 519 else
514 dev_err(dp->dev, "Aux Transaction fail!\n"); 520 dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
521 __func__);
515 } 522 }
516 523
517 /* Read data buffer */ 524 /* Read data buffer */
@@ -575,7 +582,8 @@ int exynos_dp_write_bytes_to_dpcd(struct exynos_dp_device *dp,
575 if (retval == 0) 582 if (retval == 0)
576 break; 583 break;
577 else 584 else
578 dev_err(dp->dev, "Aux Transaction fail!\n"); 585 dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
586 __func__);
579 } 587 }
580 588
581 start_offset += cur_data_count; 589 start_offset += cur_data_count;
@@ -632,7 +640,8 @@ int exynos_dp_read_bytes_from_dpcd(struct exynos_dp_device *dp,
632 if (retval == 0) 640 if (retval == 0)
633 break; 641 break;
634 else 642 else
635 dev_err(dp->dev, "Aux Transaction fail!\n"); 643 dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
644 __func__);
636 } 645 }
637 646
638 for (cur_data_idx = 0; cur_data_idx < cur_data_count; 647 for (cur_data_idx = 0; cur_data_idx < cur_data_count;
@@ -677,7 +686,7 @@ int exynos_dp_select_i2c_device(struct exynos_dp_device *dp,
677 /* Start AUX transaction */ 686 /* Start AUX transaction */
678 retval = exynos_dp_start_aux_transaction(dp); 687 retval = exynos_dp_start_aux_transaction(dp);
679 if (retval != 0) 688 if (retval != 0)
680 dev_err(dp->dev, "Aux Transaction fail!\n"); 689 dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
681 690
682 return retval; 691 return retval;
683} 692}
@@ -717,7 +726,8 @@ int exynos_dp_read_byte_from_i2c(struct exynos_dp_device *dp,
717 if (retval == 0) 726 if (retval == 0)
718 break; 727 break;
719 else 728 else
720 dev_err(dp->dev, "Aux Transaction fail!\n"); 729 dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
730 __func__);
721 } 731 }
722 732
723 /* Read data */ 733 /* Read data */
@@ -777,7 +787,9 @@ int exynos_dp_read_bytes_from_i2c(struct exynos_dp_device *dp,
777 if (retval == 0) 787 if (retval == 0)
778 break; 788 break;
779 else 789 else
780 dev_err(dp->dev, "Aux Transaction fail!\n"); 790 dev_dbg(dp->dev,
791 "%s: Aux Transaction fail!\n",
792 __func__);
781 } 793 }
782 /* Check if Rx sends defer */ 794 /* Check if Rx sends defer */
783 reg = readl(dp->reg_base + EXYNOS_DP_AUX_RX_COMM); 795 reg = readl(dp->reg_base + EXYNOS_DP_AUX_RX_COMM);
@@ -883,7 +895,9 @@ void exynos_dp_set_lane0_pre_emphasis(struct exynos_dp_device *dp, u32 level)
883{ 895{
884 u32 reg; 896 u32 reg;
885 897
886 reg = level << PRE_EMPHASIS_SET_SHIFT; 898 reg = readl(dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL);
899 reg &= ~PRE_EMPHASIS_SET_MASK;
900 reg |= level << PRE_EMPHASIS_SET_SHIFT;
887 writel(reg, dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL); 901 writel(reg, dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL);
888} 902}
889 903
@@ -891,7 +905,9 @@ void exynos_dp_set_lane1_pre_emphasis(struct exynos_dp_device *dp, u32 level)
891{ 905{
892 u32 reg; 906 u32 reg;
893 907
894 reg = level << PRE_EMPHASIS_SET_SHIFT; 908 reg = readl(dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL);
909 reg &= ~PRE_EMPHASIS_SET_MASK;
910 reg |= level << PRE_EMPHASIS_SET_SHIFT;
895 writel(reg, dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL); 911 writel(reg, dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL);
896} 912}
897 913
@@ -899,7 +915,9 @@ void exynos_dp_set_lane2_pre_emphasis(struct exynos_dp_device *dp, u32 level)
899{ 915{
900 u32 reg; 916 u32 reg;
901 917
902 reg = level << PRE_EMPHASIS_SET_SHIFT; 918 reg = readl(dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL);
919 reg &= ~PRE_EMPHASIS_SET_MASK;
920 reg |= level << PRE_EMPHASIS_SET_SHIFT;
903 writel(reg, dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL); 921 writel(reg, dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL);
904} 922}
905 923
@@ -907,7 +925,9 @@ void exynos_dp_set_lane3_pre_emphasis(struct exynos_dp_device *dp, u32 level)
907{ 925{
908 u32 reg; 926 u32 reg;
909 927
910 reg = level << PRE_EMPHASIS_SET_SHIFT; 928 reg = readl(dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL);
929 reg &= ~PRE_EMPHASIS_SET_MASK;
930 reg |= level << PRE_EMPHASIS_SET_SHIFT;
911 writel(reg, dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL); 931 writel(reg, dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL);
912} 932}
913 933
@@ -994,7 +1014,7 @@ void exynos_dp_reset_macro(struct exynos_dp_device *dp)
994 writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST); 1014 writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST);
995} 1015}
996 1016
997int exynos_dp_init_video(struct exynos_dp_device *dp) 1017void exynos_dp_init_video(struct exynos_dp_device *dp)
998{ 1018{
999 u32 reg; 1019 u32 reg;
1000 1020
@@ -1012,8 +1032,6 @@ int exynos_dp_init_video(struct exynos_dp_device *dp)
1012 1032
1013 reg = VID_HRES_TH(2) | VID_VRES_TH(0); 1033 reg = VID_HRES_TH(2) | VID_VRES_TH(0);
1014 writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_8); 1034 writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_8);
1015
1016 return 0;
1017} 1035}
1018 1036
1019void exynos_dp_set_video_color_format(struct exynos_dp_device *dp, 1037void exynos_dp_set_video_color_format(struct exynos_dp_device *dp,
diff --git a/drivers/video/exynos/exynos_dp_reg.h b/drivers/video/exynos/exynos_dp_reg.h
index 125b27cd57ae..1f2f014cfe88 100644
--- a/drivers/video/exynos/exynos_dp_reg.h
+++ b/drivers/video/exynos/exynos_dp_reg.h
@@ -187,7 +187,7 @@
187#define PD_RING_OSC (0x1 << 6) 187#define PD_RING_OSC (0x1 << 6)
188#define AUX_TERMINAL_CTRL_50_OHM (0x2 << 4) 188#define AUX_TERMINAL_CTRL_50_OHM (0x2 << 4)
189#define TX_CUR1_2X (0x1 << 2) 189#define TX_CUR1_2X (0x1 << 2)
190#define TX_CUR_8_MA (0x2 << 0) 190#define TX_CUR_16_MA (0x3 << 0)
191 191
192/* EXYNOS_DP_TX_AMP_TUNING_CTL */ 192/* EXYNOS_DP_TX_AMP_TUNING_CTL */
193#define CH3_AMP_400_MV (0x0 << 24) 193#define CH3_AMP_400_MV (0x0 << 24)
@@ -285,6 +285,7 @@
285#define SW_TRAINING_PATTERN_SET_NORMAL (0x0 << 0) 285#define SW_TRAINING_PATTERN_SET_NORMAL (0x0 << 0)
286 286
287/* EXYNOS_DP_LN0_LINK_TRAINING_CTL */ 287/* EXYNOS_DP_LN0_LINK_TRAINING_CTL */
288#define PRE_EMPHASIS_SET_MASK (0x3 << 3)
288#define PRE_EMPHASIS_SET_SHIFT (3) 289#define PRE_EMPHASIS_SET_SHIFT (3)
289 290
290/* EXYNOS_DP_DEBUG_CTL */ 291/* EXYNOS_DP_DEBUG_CTL */
diff --git a/drivers/video/exynos/exynos_mipi_dsi.c b/drivers/video/exynos/exynos_mipi_dsi.c
index 663c308d0e73..07d70a3a628b 100644
--- a/drivers/video/exynos/exynos_mipi_dsi.c
+++ b/drivers/video/exynos/exynos_mipi_dsi.c
@@ -205,7 +205,8 @@ int exynos_mipi_dsi_register_lcd_device(struct mipi_dsim_lcd_device *lcd_dev)
205 return 0; 205 return 0;
206} 206}
207 207
208struct mipi_dsim_ddi *exynos_mipi_dsi_find_lcd_device(struct mipi_dsim_lcd_driver *lcd_drv) 208static struct mipi_dsim_ddi *exynos_mipi_dsi_find_lcd_device(
209 struct mipi_dsim_lcd_driver *lcd_drv)
209{ 210{
210 struct mipi_dsim_ddi *dsim_ddi, *next; 211 struct mipi_dsim_ddi *dsim_ddi, *next;
211 struct mipi_dsim_lcd_device *lcd_dev; 212 struct mipi_dsim_lcd_device *lcd_dev;
@@ -265,7 +266,8 @@ int exynos_mipi_dsi_register_lcd_driver(struct mipi_dsim_lcd_driver *lcd_drv)
265 266
266} 267}
267 268
268struct mipi_dsim_ddi *exynos_mipi_dsi_bind_lcd_ddi(struct mipi_dsim_device *dsim, 269static struct mipi_dsim_ddi *exynos_mipi_dsi_bind_lcd_ddi(
270 struct mipi_dsim_device *dsim,
269 const char *name) 271 const char *name)
270{ 272{
271 struct mipi_dsim_ddi *dsim_ddi, *next; 273 struct mipi_dsim_ddi *dsim_ddi, *next;
@@ -373,6 +375,7 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
373 dsim->clock = clk_get(&pdev->dev, "dsim0"); 375 dsim->clock = clk_get(&pdev->dev, "dsim0");
374 if (IS_ERR(dsim->clock)) { 376 if (IS_ERR(dsim->clock)) {
375 dev_err(&pdev->dev, "failed to get dsim clock source\n"); 377 dev_err(&pdev->dev, "failed to get dsim clock source\n");
378 ret = -ENODEV;
376 goto err_clock_get; 379 goto err_clock_get;
377 } 380 }
378 381
@@ -381,6 +384,7 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
381 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 384 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
382 if (!res) { 385 if (!res) {
383 dev_err(&pdev->dev, "failed to get io memory region\n"); 386 dev_err(&pdev->dev, "failed to get io memory region\n");
387 ret = -ENODEV;
384 goto err_platform_get; 388 goto err_platform_get;
385 } 389 }
386 390
@@ -405,6 +409,7 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
405 dsim_ddi = exynos_mipi_dsi_bind_lcd_ddi(dsim, dsim_pd->lcd_panel_name); 409 dsim_ddi = exynos_mipi_dsi_bind_lcd_ddi(dsim, dsim_pd->lcd_panel_name);
406 if (!dsim_ddi) { 410 if (!dsim_ddi) {
407 dev_err(&pdev->dev, "mipi_dsim_ddi object not found.\n"); 411 dev_err(&pdev->dev, "mipi_dsim_ddi object not found.\n");
412 ret = -EINVAL;
408 goto err_bind; 413 goto err_bind;
409 } 414 }
410 415
diff --git a/drivers/video/exynos/exynos_mipi_dsi_common.c b/drivers/video/exynos/exynos_mipi_dsi_common.c
index 47b533a183be..3cd29a4fc10a 100644
--- a/drivers/video/exynos/exynos_mipi_dsi_common.c
+++ b/drivers/video/exynos/exynos_mipi_dsi_common.c
@@ -79,11 +79,6 @@ irqreturn_t exynos_mipi_dsi_interrupt_handler(int irq, void *dev_id)
79 struct mipi_dsim_device *dsim = dev_id; 79 struct mipi_dsim_device *dsim = dev_id;
80 unsigned int intsrc, intmsk; 80 unsigned int intsrc, intmsk;
81 81
82 if (dsim == NULL) {
83 dev_err(dsim->dev, "%s: wrong parameter\n", __func__);
84 return IRQ_NONE;
85 }
86
87 intsrc = exynos_mipi_dsi_read_interrupt(dsim); 82 intsrc = exynos_mipi_dsi_read_interrupt(dsim);
88 intmsk = exynos_mipi_dsi_read_interrupt_mask(dsim); 83 intmsk = exynos_mipi_dsi_read_interrupt_mask(dsim);
89 intmsk = ~intmsk & intsrc; 84 intmsk = ~intmsk & intsrc;
@@ -288,9 +283,6 @@ int exynos_mipi_dsi_wr_data(struct mipi_dsim_device *dsim, unsigned int data_id,
288 mutex_unlock(&dsim->lock); 283 mutex_unlock(&dsim->lock);
289 return -EINVAL; 284 return -EINVAL;
290 } 285 }
291
292 mutex_unlock(&dsim->lock);
293 return 0;
294} 286}
295 287
296static unsigned int exynos_mipi_dsi_long_data_rd(struct mipi_dsim_device *dsim, 288static unsigned int exynos_mipi_dsi_long_data_rd(struct mipi_dsim_device *dsim,
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index 458c00664ade..ede9e55413f8 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -1501,8 +1501,8 @@ static int __devinit fsl_diu_probe(struct platform_device *pdev)
1501 unsigned int i; 1501 unsigned int i;
1502 int ret; 1502 int ret;
1503 1503
1504 data = dma_alloc_coherent(&pdev->dev, sizeof(struct fsl_diu_data), 1504 data = dmam_alloc_coherent(&pdev->dev, sizeof(struct fsl_diu_data),
1505 &dma_addr, GFP_DMA | __GFP_ZERO); 1505 &dma_addr, GFP_DMA | __GFP_ZERO);
1506 if (!data) 1506 if (!data)
1507 return -ENOMEM; 1507 return -ENOMEM;
1508 data->dma_addr = dma_addr; 1508 data->dma_addr = dma_addr;
@@ -1628,9 +1628,6 @@ error:
1628 1628
1629 iounmap(data->diu_reg); 1629 iounmap(data->diu_reg);
1630 1630
1631 dma_free_coherent(&pdev->dev, sizeof(struct fsl_diu_data), data,
1632 data->dma_addr);
1633
1634 return ret; 1631 return ret;
1635} 1632}
1636 1633
@@ -1648,9 +1645,6 @@ static int fsl_diu_remove(struct platform_device *pdev)
1648 1645
1649 iounmap(data->diu_reg); 1646 iounmap(data->diu_reg);
1650 1647
1651 dma_free_coherent(&pdev->dev, sizeof(struct fsl_diu_data), data,
1652 data->dma_addr);
1653
1654 return 0; 1648 return 0;
1655} 1649}
1656 1650
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
index 05e2a8a99d8f..3dad31975db8 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/gbefb.c
@@ -20,6 +20,7 @@
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/io.h>
23 24
24#ifdef CONFIG_X86 25#ifdef CONFIG_X86
25#include <asm/mtrr.h> 26#include <asm/mtrr.h>
@@ -28,7 +29,6 @@
28#include <asm/addrspace.h> 29#include <asm/addrspace.h>
29#endif 30#endif
30#include <asm/byteorder.h> 31#include <asm/byteorder.h>
31#include <asm/io.h>
32#include <asm/tlbflush.h> 32#include <asm/tlbflush.h>
33 33
34#include <video/gbe.h> 34#include <video/gbe.h>
@@ -1156,7 +1156,8 @@ static int __devinit gbefb_probe(struct platform_device *p_dev)
1156 goto out_release_framebuffer; 1156 goto out_release_framebuffer;
1157 } 1157 }
1158 1158
1159 gbe = (struct sgi_gbe *) ioremap(GBE_BASE, sizeof(struct sgi_gbe)); 1159 gbe = (struct sgi_gbe *) devm_ioremap(&p_dev->dev, GBE_BASE,
1160 sizeof(struct sgi_gbe));
1160 if (!gbe) { 1161 if (!gbe) {
1161 printk(KERN_ERR "gbefb: couldn't map mmio region\n"); 1162 printk(KERN_ERR "gbefb: couldn't map mmio region\n");
1162 ret = -ENXIO; 1163 ret = -ENXIO;
@@ -1170,12 +1171,13 @@ static int __devinit gbefb_probe(struct platform_device *p_dev)
1170 if (!gbe_tiles.cpu) { 1171 if (!gbe_tiles.cpu) {
1171 printk(KERN_ERR "gbefb: couldn't allocate tiles table\n"); 1172 printk(KERN_ERR "gbefb: couldn't allocate tiles table\n");
1172 ret = -ENOMEM; 1173 ret = -ENOMEM;
1173 goto out_unmap; 1174 goto out_release_mem_region;
1174 } 1175 }
1175 1176
1176 if (gbe_mem_phys) { 1177 if (gbe_mem_phys) {
1177 /* memory was allocated at boot time */ 1178 /* memory was allocated at boot time */
1178 gbe_mem = ioremap_nocache(gbe_mem_phys, gbe_mem_size); 1179 gbe_mem = devm_ioremap_nocache(&p_dev->dev, gbe_mem_phys,
1180 gbe_mem_size);
1179 if (!gbe_mem) { 1181 if (!gbe_mem) {
1180 printk(KERN_ERR "gbefb: couldn't map framebuffer\n"); 1182 printk(KERN_ERR "gbefb: couldn't map framebuffer\n");
1181 ret = -ENOMEM; 1183 ret = -ENOMEM;
@@ -1241,13 +1243,9 @@ static int __devinit gbefb_probe(struct platform_device *p_dev)
1241out_gbe_unmap: 1243out_gbe_unmap:
1242 if (gbe_dma_addr) 1244 if (gbe_dma_addr)
1243 dma_free_coherent(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys); 1245 dma_free_coherent(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys);
1244 else
1245 iounmap(gbe_mem);
1246out_tiles_free: 1246out_tiles_free:
1247 dma_free_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t), 1247 dma_free_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t),
1248 (void *)gbe_tiles.cpu, gbe_tiles.dma); 1248 (void *)gbe_tiles.cpu, gbe_tiles.dma);
1249out_unmap:
1250 iounmap(gbe);
1251out_release_mem_region: 1249out_release_mem_region:
1252 release_mem_region(GBE_BASE, sizeof(struct sgi_gbe)); 1250 release_mem_region(GBE_BASE, sizeof(struct sgi_gbe));
1253out_release_framebuffer: 1251out_release_framebuffer:
@@ -1264,12 +1262,9 @@ static int __devexit gbefb_remove(struct platform_device* p_dev)
1264 gbe_turn_off(); 1262 gbe_turn_off();
1265 if (gbe_dma_addr) 1263 if (gbe_dma_addr)
1266 dma_free_coherent(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys); 1264 dma_free_coherent(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys);
1267 else
1268 iounmap(gbe_mem);
1269 dma_free_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t), 1265 dma_free_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t),
1270 (void *)gbe_tiles.cpu, gbe_tiles.dma); 1266 (void *)gbe_tiles.cpu, gbe_tiles.dma);
1271 release_mem_region(GBE_BASE, sizeof(struct sgi_gbe)); 1267 release_mem_region(GBE_BASE, sizeof(struct sgi_gbe));
1272 iounmap(gbe);
1273 gbefb_remove_sysfs(&p_dev->dev); 1268 gbefb_remove_sysfs(&p_dev->dev);
1274 framebuffer_release(info); 1269 framebuffer_release(info);
1275 1270
diff --git a/drivers/video/hpfb.c b/drivers/video/hpfb.c
index ebf8495ff198..7324865f965f 100644
--- a/drivers/video/hpfb.c
+++ b/drivers/video/hpfb.c
@@ -210,6 +210,7 @@ static int __devinit hpfb_init_one(unsigned long phys_base,
210 unsigned long virt_base) 210 unsigned long virt_base)
211{ 211{
212 unsigned long fboff, fb_width, fb_height, fb_start; 212 unsigned long fboff, fb_width, fb_height, fb_start;
213 int ret;
213 214
214 fb_regs = virt_base; 215 fb_regs = virt_base;
215 fboff = (in_8(fb_regs + HPFB_FBOMSB) << 8) | in_8(fb_regs + HPFB_FBOLSB); 216 fboff = (in_8(fb_regs + HPFB_FBOMSB) << 8) | in_8(fb_regs + HPFB_FBOLSB);
@@ -290,19 +291,29 @@ static int __devinit hpfb_init_one(unsigned long phys_base,
290 fb_info.var = hpfb_defined; 291 fb_info.var = hpfb_defined;
291 fb_info.screen_base = (char *)fb_start; 292 fb_info.screen_base = (char *)fb_start;
292 293
293 fb_alloc_cmap(&fb_info.cmap, 1 << hpfb_defined.bits_per_pixel, 0); 294 ret = fb_alloc_cmap(&fb_info.cmap, 1 << hpfb_defined.bits_per_pixel, 0);
295 if (ret < 0)
296 goto unmap_screen_base;
294 297
295 if (register_framebuffer(&fb_info) < 0) { 298 ret = register_framebuffer(&fb_info);
296 fb_dealloc_cmap(&fb_info.cmap); 299 if (ret < 0)
297 iounmap(fb_info.screen_base); 300 goto dealloc_cmap;
298 fb_info.screen_base = NULL;
299 return 1;
300 }
301 301
302 printk(KERN_INFO "fb%d: %s frame buffer device\n", 302 printk(KERN_INFO "fb%d: %s frame buffer device\n",
303 fb_info.node, fb_info.fix.id); 303 fb_info.node, fb_info.fix.id);
304 304
305 return 0; 305 return 0;
306
307dealloc_cmap:
308 fb_dealloc_cmap(&fb_info.cmap);
309
310unmap_screen_base:
311 if (fb_info.screen_base) {
312 iounmap(fb_info.screen_base);
313 fb_info.screen_base = NULL;
314 }
315
316 return ret;
306} 317}
307 318
308/* 319/*
@@ -345,6 +356,9 @@ static void __devexit hpfb_remove_one(struct dio_dev *d)
345 if (d->scode >= DIOII_SCBASE) 356 if (d->scode >= DIOII_SCBASE)
346 iounmap((void *)fb_regs); 357 iounmap((void *)fb_regs);
347 release_mem_region(d->resource.start, resource_size(&d->resource)); 358 release_mem_region(d->resource.start, resource_size(&d->resource));
359 fb_dealloc_cmap(&fb_info.cmap);
360 if (fb_info.screen_base)
361 iounmap(fb_info.screen_base);
348} 362}
349 363
350static struct dio_device_id hpfb_dio_tbl[] = { 364static struct dio_device_id hpfb_dio_tbl[] = {
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 53ffdfc82a75..cf2688de0832 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -803,6 +803,7 @@ static int __init imxfb_probe(struct platform_device *pdev)
803 fbi->regs = ioremap(res->start, resource_size(res)); 803 fbi->regs = ioremap(res->start, resource_size(res));
804 if (fbi->regs == NULL) { 804 if (fbi->regs == NULL) {
805 dev_err(&pdev->dev, "Cannot map frame buffer registers\n"); 805 dev_err(&pdev->dev, "Cannot map frame buffer registers\n");
806 ret = -ENOMEM;
806 goto failed_ioremap; 807 goto failed_ioremap;
807 } 808 }
808 809
diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
index 3c63fc24bb1f..4d25711b9982 100644
--- a/drivers/video/jz4740_fb.c
+++ b/drivers/video/jz4740_fb.c
@@ -632,23 +632,10 @@ static int __devinit jzfb_probe(struct platform_device *pdev)
632 return -ENXIO; 632 return -ENXIO;
633 } 633 }
634 634
635 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
636 if (!mem) {
637 dev_err(&pdev->dev, "Failed to get register memory resource\n");
638 return -ENXIO;
639 }
640
641 mem = request_mem_region(mem->start, resource_size(mem), pdev->name);
642 if (!mem) {
643 dev_err(&pdev->dev, "Failed to request register memory region\n");
644 return -EBUSY;
645 }
646
647 fb = framebuffer_alloc(sizeof(struct jzfb), &pdev->dev); 635 fb = framebuffer_alloc(sizeof(struct jzfb), &pdev->dev);
648 if (!fb) { 636 if (!fb) {
649 dev_err(&pdev->dev, "Failed to allocate framebuffer device\n"); 637 dev_err(&pdev->dev, "Failed to allocate framebuffer device\n");
650 ret = -ENOMEM; 638 return -ENOMEM;
651 goto err_release_mem_region;
652 } 639 }
653 640
654 fb->fbops = &jzfb_ops; 641 fb->fbops = &jzfb_ops;
@@ -657,27 +644,26 @@ static int __devinit jzfb_probe(struct platform_device *pdev)
657 jzfb = fb->par; 644 jzfb = fb->par;
658 jzfb->pdev = pdev; 645 jzfb->pdev = pdev;
659 jzfb->pdata = pdata; 646 jzfb->pdata = pdata;
660 jzfb->mem = mem;
661 647
662 jzfb->ldclk = clk_get(&pdev->dev, "lcd"); 648 jzfb->ldclk = devm_clk_get(&pdev->dev, "lcd");
663 if (IS_ERR(jzfb->ldclk)) { 649 if (IS_ERR(jzfb->ldclk)) {
664 ret = PTR_ERR(jzfb->ldclk); 650 ret = PTR_ERR(jzfb->ldclk);
665 dev_err(&pdev->dev, "Failed to get lcd clock: %d\n", ret); 651 dev_err(&pdev->dev, "Failed to get lcd clock: %d\n", ret);
666 goto err_framebuffer_release; 652 goto err_framebuffer_release;
667 } 653 }
668 654
669 jzfb->lpclk = clk_get(&pdev->dev, "lcd_pclk"); 655 jzfb->lpclk = devm_clk_get(&pdev->dev, "lcd_pclk");
670 if (IS_ERR(jzfb->lpclk)) { 656 if (IS_ERR(jzfb->lpclk)) {
671 ret = PTR_ERR(jzfb->lpclk); 657 ret = PTR_ERR(jzfb->lpclk);
672 dev_err(&pdev->dev, "Failed to get lcd pixel clock: %d\n", ret); 658 dev_err(&pdev->dev, "Failed to get lcd pixel clock: %d\n", ret);
673 goto err_put_ldclk; 659 goto err_framebuffer_release;
674 } 660 }
675 661
676 jzfb->base = ioremap(mem->start, resource_size(mem)); 662 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
663 jzfb->base = devm_request_and_ioremap(&pdev->dev, mem);
677 if (!jzfb->base) { 664 if (!jzfb->base) {
678 dev_err(&pdev->dev, "Failed to ioremap register memory region\n");
679 ret = -EBUSY; 665 ret = -EBUSY;
680 goto err_put_lpclk; 666 goto err_framebuffer_release;
681 } 667 }
682 668
683 platform_set_drvdata(pdev, jzfb); 669 platform_set_drvdata(pdev, jzfb);
@@ -693,7 +679,7 @@ static int __devinit jzfb_probe(struct platform_device *pdev)
693 ret = jzfb_alloc_devmem(jzfb); 679 ret = jzfb_alloc_devmem(jzfb);
694 if (ret) { 680 if (ret) {
695 dev_err(&pdev->dev, "Failed to allocate video memory\n"); 681 dev_err(&pdev->dev, "Failed to allocate video memory\n");
696 goto err_iounmap; 682 goto err_framebuffer_release;
697 } 683 }
698 684
699 fb->fix = jzfb_fix; 685 fb->fix = jzfb_fix;
@@ -734,16 +720,8 @@ err_free_devmem:
734 720
735 fb_dealloc_cmap(&fb->cmap); 721 fb_dealloc_cmap(&fb->cmap);
736 jzfb_free_devmem(jzfb); 722 jzfb_free_devmem(jzfb);
737err_iounmap:
738 iounmap(jzfb->base);
739err_put_lpclk:
740 clk_put(jzfb->lpclk);
741err_put_ldclk:
742 clk_put(jzfb->ldclk);
743err_framebuffer_release: 723err_framebuffer_release:
744 framebuffer_release(fb); 724 framebuffer_release(fb);
745err_release_mem_region:
746 release_mem_region(mem->start, resource_size(mem));
747 return ret; 725 return ret;
748} 726}
749 727
@@ -756,17 +734,11 @@ static int __devexit jzfb_remove(struct platform_device *pdev)
756 jz_gpio_bulk_free(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb)); 734 jz_gpio_bulk_free(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
757 jz_gpio_bulk_free(jz_lcd_data_pins, jzfb_num_data_pins(jzfb)); 735 jz_gpio_bulk_free(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
758 736
759 iounmap(jzfb->base);
760 release_mem_region(jzfb->mem->start, resource_size(jzfb->mem));
761
762 fb_dealloc_cmap(&jzfb->fb->cmap); 737 fb_dealloc_cmap(&jzfb->fb->cmap);
763 jzfb_free_devmem(jzfb); 738 jzfb_free_devmem(jzfb);
764 739
765 platform_set_drvdata(pdev, NULL); 740 platform_set_drvdata(pdev, NULL);
766 741
767 clk_put(jzfb->lpclk);
768 clk_put(jzfb->ldclk);
769
770 framebuffer_release(jzfb->fb); 742 framebuffer_release(jzfb->fb);
771 743
772 return 0; 744 return 0;
diff --git a/drivers/video/mb862xx/mb862xxfbdrv.c b/drivers/video/mb862xx/mb862xxfbdrv.c
index 57d940be5f3d..d68e332aa21c 100644
--- a/drivers/video/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/mb862xx/mb862xxfbdrv.c
@@ -1052,12 +1052,14 @@ static int __devinit mb862xx_pci_probe(struct pci_dev *pdev,
1052 break; 1052 break;
1053 default: 1053 default:
1054 /* should never occur */ 1054 /* should never occur */
1055 ret = -EIO;
1055 goto rel_reg; 1056 goto rel_reg;
1056 } 1057 }
1057 1058
1058 par->fb_base = ioremap(par->fb_base_phys, par->mapped_vram); 1059 par->fb_base = ioremap(par->fb_base_phys, par->mapped_vram);
1059 if (par->fb_base == NULL) { 1060 if (par->fb_base == NULL) {
1060 dev_err(dev, "Cannot map framebuffer\n"); 1061 dev_err(dev, "Cannot map framebuffer\n");
1062 ret = -EIO;
1061 goto rel_reg; 1063 goto rel_reg;
1062 } 1064 }
1063 1065
@@ -1073,11 +1075,13 @@ static int __devinit mb862xx_pci_probe(struct pci_dev *pdev,
1073 dev_dbg(dev, "mmio phys 0x%llx 0x%lx\n", 1075 dev_dbg(dev, "mmio phys 0x%llx 0x%lx\n",
1074 (unsigned long long)par->mmio_base_phys, (ulong)par->mmio_len); 1076 (unsigned long long)par->mmio_base_phys, (ulong)par->mmio_len);
1075 1077
1076 if (mb862xx_pci_gdc_init(par)) 1078 ret = mb862xx_pci_gdc_init(par);
1079 if (ret)
1077 goto io_unmap; 1080 goto io_unmap;
1078 1081
1079 if (request_irq(par->irq, mb862xx_intr, IRQF_SHARED, 1082 ret = request_irq(par->irq, mb862xx_intr, IRQF_SHARED,
1080 DRV_NAME, (void *)par)) { 1083 DRV_NAME, (void *)par);
1084 if (ret) {
1081 dev_err(dev, "Cannot request irq\n"); 1085 dev_err(dev, "Cannot request irq\n");
1082 goto io_unmap; 1086 goto io_unmap;
1083 } 1087 }
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c
index 85e4f44bfa61..6563e50413c1 100644
--- a/drivers/video/mbx/mbxfb.c
+++ b/drivers/video/mbx/mbxfb.c
@@ -26,8 +26,7 @@
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/uaccess.h> 28#include <linux/uaccess.h>
29 29#include <linux/io.h>
30#include <asm/io.h>
31 30
32#include <video/mbxfb.h> 31#include <video/mbxfb.h>
33 32
@@ -939,8 +938,9 @@ static int __devinit mbxfb_probe(struct platform_device *dev)
939 } 938 }
940 mfbi->reg_phys_addr = mfbi->reg_res->start; 939 mfbi->reg_phys_addr = mfbi->reg_res->start;
941 940
942 mfbi->reg_virt_addr = ioremap_nocache(mfbi->reg_phys_addr, 941 mfbi->reg_virt_addr = devm_ioremap_nocache(&dev->dev,
943 res_size(mfbi->reg_req)); 942 mfbi->reg_phys_addr,
943 res_size(mfbi->reg_req));
944 if (!mfbi->reg_virt_addr) { 944 if (!mfbi->reg_virt_addr) {
945 dev_err(&dev->dev, "failed to ioremap Marathon registers\n"); 945 dev_err(&dev->dev, "failed to ioremap Marathon registers\n");
946 ret = -EINVAL; 946 ret = -EINVAL;
@@ -948,12 +948,12 @@ static int __devinit mbxfb_probe(struct platform_device *dev)
948 } 948 }
949 virt_base_2700 = mfbi->reg_virt_addr; 949 virt_base_2700 = mfbi->reg_virt_addr;
950 950
951 mfbi->fb_virt_addr = ioremap_nocache(mfbi->fb_phys_addr, 951 mfbi->fb_virt_addr = devm_ioremap_nocache(&dev->dev, mfbi->fb_phys_addr,
952 res_size(mfbi->fb_req)); 952 res_size(mfbi->fb_req));
953 if (!mfbi->fb_virt_addr) { 953 if (!mfbi->fb_virt_addr) {
954 dev_err(&dev->dev, "failed to ioremap frame buffer\n"); 954 dev_err(&dev->dev, "failed to ioremap frame buffer\n");
955 ret = -EINVAL; 955 ret = -EINVAL;
956 goto err4; 956 goto err3;
957 } 957 }
958 958
959 fbi->screen_base = (char __iomem *)(mfbi->fb_virt_addr + 0x60000); 959 fbi->screen_base = (char __iomem *)(mfbi->fb_virt_addr + 0x60000);
@@ -971,7 +971,7 @@ static int __devinit mbxfb_probe(struct platform_device *dev)
971 if (ret < 0) { 971 if (ret < 0) {
972 dev_err(&dev->dev, "fb_alloc_cmap failed\n"); 972 dev_err(&dev->dev, "fb_alloc_cmap failed\n");
973 ret = -EINVAL; 973 ret = -EINVAL;
974 goto err5; 974 goto err3;
975 } 975 }
976 976
977 platform_set_drvdata(dev, fbi); 977 platform_set_drvdata(dev, fbi);
@@ -996,10 +996,6 @@ static int __devinit mbxfb_probe(struct platform_device *dev)
996 996
997err6: 997err6:
998 fb_dealloc_cmap(&fbi->cmap); 998 fb_dealloc_cmap(&fbi->cmap);
999err5:
1000 iounmap(mfbi->fb_virt_addr);
1001err4:
1002 iounmap(mfbi->reg_virt_addr);
1003err3: 999err3:
1004 release_mem_region(mfbi->reg_res->start, res_size(mfbi->reg_res)); 1000 release_mem_region(mfbi->reg_res->start, res_size(mfbi->reg_res));
1005err2: 1001err2:
@@ -1026,10 +1022,7 @@ static int __devexit mbxfb_remove(struct platform_device *dev)
1026 if (mfbi->platform_remove) 1022 if (mfbi->platform_remove)
1027 mfbi->platform_remove(fbi); 1023 mfbi->platform_remove(fbi);
1028 1024
1029 if (mfbi->fb_virt_addr) 1025
1030 iounmap(mfbi->fb_virt_addr);
1031 if (mfbi->reg_virt_addr)
1032 iounmap(mfbi->reg_virt_addr);
1033 if (mfbi->reg_req) 1026 if (mfbi->reg_req)
1034 release_mem_region(mfbi->reg_req->start, 1027 release_mem_region(mfbi->reg_req->start,
1035 res_size(mfbi->reg_req)); 1028 res_size(mfbi->reg_req));
diff --git a/drivers/video/msm/mddi.c b/drivers/video/msm/mddi.c
index bf73f0480061..35ac9e8bee63 100644
--- a/drivers/video/msm/mddi.c
+++ b/drivers/video/msm/mddi.c
@@ -26,9 +26,6 @@
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/io.h> 27#include <linux/io.h>
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <mach/msm_iomap.h>
30#include <mach/irqs.h>
31#include <mach/board.h>
32#include <linux/platform_data/video-msm_fb.h> 29#include <linux/platform_data/video-msm_fb.h>
33#include "mddi_hw.h" 30#include "mddi_hw.h"
34 31
diff --git a/drivers/video/msm/mddi_client_nt35399.c b/drivers/video/msm/mddi_client_nt35399.c
index d7a5bf84fb2a..f96df32e5509 100644
--- a/drivers/video/msm/mddi_client_nt35399.c
+++ b/drivers/video/msm/mddi_client_nt35399.c
@@ -189,8 +189,9 @@ static int mddi_nt35399_probe(struct platform_device *pdev)
189 189
190 int ret; 190 int ret;
191 191
192 struct panel_info *panel = kzalloc(sizeof(struct panel_info), 192 struct panel_info *panel = devm_kzalloc(&pdev->dev,
193 GFP_KERNEL); 193 sizeof(struct panel_info),
194 GFP_KERNEL);
194 195
195 printk(KERN_DEBUG "%s: enter.\n", __func__); 196 printk(KERN_DEBUG "%s: enter.\n", __func__);
196 197
@@ -233,7 +234,6 @@ static int mddi_nt35399_remove(struct platform_device *pdev)
233 struct panel_info *panel = platform_get_drvdata(pdev); 234 struct panel_info *panel = platform_get_drvdata(pdev);
234 235
235 setup_vsync(panel, 0); 236 setup_vsync(panel, 0);
236 kfree(panel);
237 return 0; 237 return 0;
238} 238}
239 239
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index 2e0f3bab6114..f2566c19e71c 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -25,7 +25,6 @@
25#include <linux/major.h> 25#include <linux/major.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27 27
28#include <mach/msm_iomap.h>
29#include <linux/platform_data/video-msm_fb.h> 28#include <linux/platform_data/video-msm_fb.h>
30#include <linux/platform_device.h> 29#include <linux/platform_device.h>
31#include <linux/export.h> 30#include <linux/export.h>
diff --git a/drivers/video/msm/mdp_hw.h b/drivers/video/msm/mdp_hw.h
index a0bacf581b32..35848d741001 100644
--- a/drivers/video/msm/mdp_hw.h
+++ b/drivers/video/msm/mdp_hw.h
@@ -15,7 +15,6 @@
15#ifndef _MDP_HW_H_ 15#ifndef _MDP_HW_H_
16#define _MDP_HW_H_ 16#define _MDP_HW_H_
17 17
18#include <mach/msm_iomap.h>
19#include <linux/platform_data/video-msm_fb.h> 18#include <linux/platform_data/video-msm_fb.h>
20 19
21struct mdp_info { 20struct mdp_info {
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index d7381088a180..ce1d452464ed 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -1568,7 +1568,8 @@ static int mx3fb_remove(struct platform_device *dev)
1568 1568
1569static struct platform_driver mx3fb_driver = { 1569static struct platform_driver mx3fb_driver = {
1570 .driver = { 1570 .driver = {
1571 .name = MX3FB_NAME, 1571 .name = MX3FB_NAME,
1572 .owner = THIS_MODULE,
1572 }, 1573 },
1573 .probe = mx3fb_probe, 1574 .probe = mx3fb_probe,
1574 .remove = mx3fb_remove, 1575 .remove = mx3fb_remove,
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c
index 93387555337e..475dfee82c4a 100644
--- a/drivers/video/nuc900fb.c
+++ b/drivers/video/nuc900fb.c
@@ -387,7 +387,7 @@ static int nuc900fb_init_registers(struct fb_info *info)
387 * The buffer should be a non-cached, non-buffered, memory region 387 * The buffer should be a non-cached, non-buffered, memory region
388 * to allow palette and pixel writes without flushing the cache. 388 * to allow palette and pixel writes without flushing the cache.
389 */ 389 */
390static int __init nuc900fb_map_video_memory(struct fb_info *info) 390static int __devinit nuc900fb_map_video_memory(struct fb_info *info)
391{ 391{
392 struct nuc900fb_info *fbi = info->par; 392 struct nuc900fb_info *fbi = info->par;
393 dma_addr_t map_dma; 393 dma_addr_t map_dma;
diff --git a/drivers/video/omap/hwa742.c b/drivers/video/omap/hwa742.c
index 9f1d23c319cb..f349ee6f0cea 100644
--- a/drivers/video/omap/hwa742.c
+++ b/drivers/video/omap/hwa742.c
@@ -27,7 +27,6 @@
27#include <linux/clk.h> 27#include <linux/clk.h>
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
29 29
30#include <plat/dma.h>
31#include "omapfb.h" 30#include "omapfb.h"
32 31
33#define HWA742_REV_CODE_REG 0x0 32#define HWA742_REV_CODE_REG 0x0
diff --git a/drivers/video/omap/lcd_palmte.c b/drivers/video/omap/lcd_palmte.c
index 88c31eb0cd6c..ff4fb624b904 100644
--- a/drivers/video/omap/lcd_palmte.c
+++ b/drivers/video/omap/lcd_palmte.c
@@ -23,7 +23,6 @@
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/io.h> 24#include <linux/io.h>
25 25
26#include <plat/fpga.h>
27#include "omapfb.h" 26#include "omapfb.h"
28 27
29static int palmte_panel_init(struct lcd_panel *panel, 28static int palmte_panel_init(struct lcd_panel *panel,
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index f54b463709e9..4351c438b76f 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -131,15 +131,6 @@ static void omapfb_rqueue_unlock(struct omapfb_device *fbdev)
131 * LCD controller and LCD DMA 131 * LCD controller and LCD DMA
132 * --------------------------------------------------------------------------- 132 * ---------------------------------------------------------------------------
133 */ 133 */
134/* Lookup table to map elem size to elem type. */
135static const int dma_elem_type[] = {
136 0,
137 OMAP_DMA_DATA_TYPE_S8,
138 OMAP_DMA_DATA_TYPE_S16,
139 0,
140 OMAP_DMA_DATA_TYPE_S32,
141};
142
143/* 134/*
144 * Allocate resources needed for LCD controller and LCD DMA operations. Video 135 * Allocate resources needed for LCD controller and LCD DMA operations. Video
145 * memory is allocated from system memory according to the virtual display 136 * memory is allocated from system memory according to the virtual display
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
index eaeed4340e04..c835aa70f96f 100644
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -600,6 +600,9 @@ static int acx_panel_power_on(struct omap_dss_device *dssdev)
600 600
601 mutex_lock(&md->mutex); 601 mutex_lock(&md->mutex);
602 602
603 omapdss_sdi_set_timings(dssdev, &dssdev->panel.timings);
604 omapdss_sdi_set_datapairs(dssdev, dssdev->phy.sdi.datapairs);
605
603 r = omapdss_sdi_display_enable(dssdev); 606 r = omapdss_sdi_display_enable(dssdev);
604 if (r) { 607 if (r) {
605 pr_err("%s sdi enable failed\n", __func__); 608 pr_err("%s sdi enable failed\n", __func__);
@@ -731,18 +734,9 @@ static int acx_panel_resume(struct omap_dss_device *dssdev)
731static void acx_panel_set_timings(struct omap_dss_device *dssdev, 734static void acx_panel_set_timings(struct omap_dss_device *dssdev,
732 struct omap_video_timings *timings) 735 struct omap_video_timings *timings)
733{ 736{
734 int r; 737 omapdss_sdi_set_timings(dssdev, timings);
735
736 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
737 omapdss_sdi_display_disable(dssdev);
738 738
739 dssdev->panel.timings = *timings; 739 dssdev->panel.timings = *timings;
740
741 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
742 r = omapdss_sdi_display_enable(dssdev);
743 if (r)
744 dev_err(&dssdev->dev, "%s enable failed\n", __func__);
745 }
746} 740}
747 741
748static int acx_panel_check_timings(struct omap_dss_device *dssdev, 742static int acx_panel_check_timings(struct omap_dss_device *dssdev,
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
index bc5af2500eb9..88295c526815 100644
--- a/drivers/video/omap2/displays/panel-generic-dpi.c
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -545,6 +545,8 @@ struct panel_drv_data {
545 struct omap_dss_device *dssdev; 545 struct omap_dss_device *dssdev;
546 546
547 struct panel_config *panel_config; 547 struct panel_config *panel_config;
548
549 struct mutex lock;
548}; 550};
549 551
550static inline struct panel_generic_dpi_data 552static inline struct panel_generic_dpi_data
@@ -563,6 +565,9 @@ static int generic_dpi_panel_power_on(struct omap_dss_device *dssdev)
563 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) 565 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
564 return 0; 566 return 0;
565 567
568 omapdss_dpi_set_timings(dssdev, &dssdev->panel.timings);
569 omapdss_dpi_set_data_lines(dssdev, dssdev->phy.dpi.data_lines);
570
566 r = omapdss_dpi_display_enable(dssdev); 571 r = omapdss_dpi_display_enable(dssdev);
567 if (r) 572 if (r)
568 goto err0; 573 goto err0;
@@ -634,6 +639,8 @@ static int generic_dpi_panel_probe(struct omap_dss_device *dssdev)
634 drv_data->dssdev = dssdev; 639 drv_data->dssdev = dssdev;
635 drv_data->panel_config = panel_config; 640 drv_data->panel_config = panel_config;
636 641
642 mutex_init(&drv_data->lock);
643
637 dev_set_drvdata(&dssdev->dev, drv_data); 644 dev_set_drvdata(&dssdev->dev, drv_data);
638 645
639 return 0; 646 return 0;
@@ -652,56 +659,108 @@ static void __exit generic_dpi_panel_remove(struct omap_dss_device *dssdev)
652 659
653static int generic_dpi_panel_enable(struct omap_dss_device *dssdev) 660static int generic_dpi_panel_enable(struct omap_dss_device *dssdev)
654{ 661{
655 int r = 0; 662 struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
663 int r;
664
665 mutex_lock(&drv_data->lock);
656 666
657 r = generic_dpi_panel_power_on(dssdev); 667 r = generic_dpi_panel_power_on(dssdev);
658 if (r) 668 if (r)
659 return r; 669 goto err;
660 670
661 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; 671 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
672err:
673 mutex_unlock(&drv_data->lock);
662 674
663 return 0; 675 return r;
664} 676}
665 677
666static void generic_dpi_panel_disable(struct omap_dss_device *dssdev) 678static void generic_dpi_panel_disable(struct omap_dss_device *dssdev)
667{ 679{
680 struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
681
682 mutex_lock(&drv_data->lock);
683
668 generic_dpi_panel_power_off(dssdev); 684 generic_dpi_panel_power_off(dssdev);
669 685
670 dssdev->state = OMAP_DSS_DISPLAY_DISABLED; 686 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
687
688 mutex_unlock(&drv_data->lock);
671} 689}
672 690
673static int generic_dpi_panel_suspend(struct omap_dss_device *dssdev) 691static int generic_dpi_panel_suspend(struct omap_dss_device *dssdev)
674{ 692{
693 struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
694
695 mutex_lock(&drv_data->lock);
696
675 generic_dpi_panel_power_off(dssdev); 697 generic_dpi_panel_power_off(dssdev);
676 698
677 dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED; 699 dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
678 700
701 mutex_unlock(&drv_data->lock);
702
679 return 0; 703 return 0;
680} 704}
681 705
682static int generic_dpi_panel_resume(struct omap_dss_device *dssdev) 706static int generic_dpi_panel_resume(struct omap_dss_device *dssdev)
683{ 707{
684 int r = 0; 708 struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
709 int r;
710
711 mutex_lock(&drv_data->lock);
685 712
686 r = generic_dpi_panel_power_on(dssdev); 713 r = generic_dpi_panel_power_on(dssdev);
687 if (r) 714 if (r)
688 return r; 715 goto err;
689 716
690 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; 717 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
691 718
692 return 0; 719err:
720 mutex_unlock(&drv_data->lock);
721
722 return r;
693} 723}
694 724
695static void generic_dpi_panel_set_timings(struct omap_dss_device *dssdev, 725static void generic_dpi_panel_set_timings(struct omap_dss_device *dssdev,
696 struct omap_video_timings *timings) 726 struct omap_video_timings *timings)
697{ 727{
698 dpi_set_timings(dssdev, timings); 728 struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
729
730 mutex_lock(&drv_data->lock);
731
732 omapdss_dpi_set_timings(dssdev, timings);
733
734 dssdev->panel.timings = *timings;
735
736 mutex_unlock(&drv_data->lock);
737}
738
739static void generic_dpi_panel_get_timings(struct omap_dss_device *dssdev,
740 struct omap_video_timings *timings)
741{
742 struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
743
744 mutex_lock(&drv_data->lock);
745
746 *timings = dssdev->panel.timings;
747
748 mutex_unlock(&drv_data->lock);
699} 749}
700 750
701static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev, 751static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev,
702 struct omap_video_timings *timings) 752 struct omap_video_timings *timings)
703{ 753{
704 return dpi_check_timings(dssdev, timings); 754 struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
755 int r;
756
757 mutex_lock(&drv_data->lock);
758
759 r = dpi_check_timings(dssdev, timings);
760
761 mutex_unlock(&drv_data->lock);
762
763 return r;
705} 764}
706 765
707static struct omap_dss_driver dpi_driver = { 766static struct omap_dss_driver dpi_driver = {
@@ -714,6 +773,7 @@ static struct omap_dss_driver dpi_driver = {
714 .resume = generic_dpi_panel_resume, 773 .resume = generic_dpi_panel_resume,
715 774
716 .set_timings = generic_dpi_panel_set_timings, 775 .set_timings = generic_dpi_panel_set_timings,
776 .get_timings = generic_dpi_panel_get_timings,
717 .check_timings = generic_dpi_panel_check_timings, 777 .check_timings = generic_dpi_panel_check_timings,
718 778
719 .driver = { 779 .driver = {
diff --git a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
index 802807798846..90c1cabf244e 100644
--- a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
@@ -55,6 +55,9 @@ static int lb035q02_panel_power_on(struct omap_dss_device *dssdev)
55 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) 55 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
56 return 0; 56 return 0;
57 57
58 omapdss_dpi_set_timings(dssdev, &dssdev->panel.timings);
59 omapdss_dpi_set_data_lines(dssdev, dssdev->phy.dpi.data_lines);
60
58 r = omapdss_dpi_display_enable(dssdev); 61 r = omapdss_dpi_display_enable(dssdev);
59 if (r) 62 if (r)
60 goto err0; 63 goto err0;
diff --git a/drivers/video/omap2/displays/panel-n8x0.c b/drivers/video/omap2/displays/panel-n8x0.c
index e6c115373c00..3fc5ad081a21 100644
--- a/drivers/video/omap2/displays/panel-n8x0.c
+++ b/drivers/video/omap2/displays/panel-n8x0.c
@@ -150,11 +150,17 @@ static void blizzard_ctrl_setup_update(struct omap_dss_device *dssdev,
150 BLIZZARD_SRC_WRITE_LCD : 150 BLIZZARD_SRC_WRITE_LCD :
151 BLIZZARD_SRC_WRITE_LCD_DESTRUCTIVE; 151 BLIZZARD_SRC_WRITE_LCD_DESTRUCTIVE;
152 152
153 omap_rfbi_configure(dssdev, 16, 8); 153 omapdss_rfbi_set_pixel_size(dssdev, 16);
154 omapdss_rfbi_set_data_lines(dssdev, 8);
155
156 omap_rfbi_configure(dssdev);
154 157
155 blizzard_write(BLIZZARD_INPUT_WIN_X_START_0, tmp, 18); 158 blizzard_write(BLIZZARD_INPUT_WIN_X_START_0, tmp, 18);
156 159
157 omap_rfbi_configure(dssdev, 16, 16); 160 omapdss_rfbi_set_pixel_size(dssdev, 16);
161 omapdss_rfbi_set_data_lines(dssdev, 16);
162
163 omap_rfbi_configure(dssdev);
158} 164}
159 165
160static void mipid_transfer(struct spi_device *spi, int cmd, const u8 *wbuf, 166static void mipid_transfer(struct spi_device *spi, int cmd, const u8 *wbuf,
@@ -297,6 +303,12 @@ static int n8x0_panel_power_on(struct omap_dss_device *dssdev)
297 goto err_plat_en; 303 goto err_plat_en;
298 } 304 }
299 305
306 omapdss_rfbi_set_size(dssdev, dssdev->panel.timings.x_res,
307 dssdev->panel.timings.y_res);
308 omapdss_rfbi_set_pixel_size(dssdev, dssdev->ctrl.pixel_size);
309 omapdss_rfbi_set_data_lines(dssdev, dssdev->phy.rfbi.data_lines);
310 omapdss_rfbi_set_interface_timings(dssdev, &dssdev->ctrl.rfbi_timings);
311
300 r = omapdss_rfbi_display_enable(dssdev); 312 r = omapdss_rfbi_display_enable(dssdev);
301 if (r) 313 if (r)
302 goto err_rfbi_en; 314 goto err_rfbi_en;
@@ -477,6 +489,7 @@ static int n8x0_panel_probe(struct omap_dss_device *dssdev)
477 dssdev->panel.timings.y_res = 480; 489 dssdev->panel.timings.y_res = 480;
478 dssdev->ctrl.pixel_size = 16; 490 dssdev->ctrl.pixel_size = 16;
479 dssdev->ctrl.rfbi_timings = n8x0_panel_timings; 491 dssdev->ctrl.rfbi_timings = n8x0_panel_timings;
492 dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
480 493
481 memset(&props, 0, sizeof(props)); 494 memset(&props, 0, sizeof(props));
482 props.max_brightness = 127; 495 props.max_brightness = 127;
@@ -625,17 +638,25 @@ static int n8x0_panel_update(struct omap_dss_device *dssdev,
625 u16 x, u16 y, u16 w, u16 h) 638 u16 x, u16 y, u16 w, u16 h)
626{ 639{
627 struct panel_drv_data *ddata = get_drv_data(dssdev); 640 struct panel_drv_data *ddata = get_drv_data(dssdev);
641 u16 dw, dh;
628 642
629 dev_dbg(&dssdev->dev, "update\n"); 643 dev_dbg(&dssdev->dev, "update\n");
630 644
645 dw = dssdev->panel.timings.x_res;
646 dh = dssdev->panel.timings.y_res;
647
648 if (x != 0 || y != 0 || w != dw || h != dh) {
649 dev_err(&dssdev->dev, "invaid update region %d, %d, %d, %d\n",
650 x, y, w, h);
651 return -EINVAL;
652 }
653
631 mutex_lock(&ddata->lock); 654 mutex_lock(&ddata->lock);
632 rfbi_bus_lock(); 655 rfbi_bus_lock();
633 656
634 omap_rfbi_prepare_update(dssdev, &x, &y, &w, &h);
635
636 blizzard_ctrl_setup_update(dssdev, x, y, w, h); 657 blizzard_ctrl_setup_update(dssdev, x, y, w, h);
637 658
638 omap_rfbi_update(dssdev, x, y, w, h, update_done, NULL); 659 omap_rfbi_update(dssdev, update_done, NULL);
639 660
640 mutex_unlock(&ddata->lock); 661 mutex_unlock(&ddata->lock);
641 662
diff --git a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
index b122b0f31c43..908fd268f3dc 100644
--- a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
+++ b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
@@ -175,6 +175,9 @@ static int nec_8048_panel_power_on(struct omap_dss_device *dssdev)
175 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) 175 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
176 return 0; 176 return 0;
177 177
178 omapdss_dpi_set_timings(dssdev, &dssdev->panel.timings);
179 omapdss_dpi_set_data_lines(dssdev, dssdev->phy.dpi.data_lines);
180
178 r = omapdss_dpi_display_enable(dssdev); 181 r = omapdss_dpi_display_enable(dssdev);
179 if (r) 182 if (r)
180 goto err0; 183 goto err0;
diff --git a/drivers/video/omap2/displays/panel-picodlp.c b/drivers/video/omap2/displays/panel-picodlp.c
index 2d35bd388860..9df87640ddd2 100644
--- a/drivers/video/omap2/displays/panel-picodlp.c
+++ b/drivers/video/omap2/displays/panel-picodlp.c
@@ -377,6 +377,10 @@ static int picodlp_panel_power_on(struct omap_dss_device *dssdev)
377 * then only i2c commands can be successfully sent to dpp2600 377 * then only i2c commands can be successfully sent to dpp2600
378 */ 378 */
379 msleep(1000); 379 msleep(1000);
380
381 omapdss_dpi_set_timings(dssdev, &dssdev->panel.timings);
382 omapdss_dpi_set_data_lines(dssdev, dssdev->phy.dpi.data_lines);
383
380 r = omapdss_dpi_display_enable(dssdev); 384 r = omapdss_dpi_display_enable(dssdev);
381 if (r) { 385 if (r) {
382 dev_err(&dssdev->dev, "failed to enable DPI\n"); 386 dev_err(&dssdev->dev, "failed to enable DPI\n");
diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
index bd86ba9ccf76..1ec3b277ff15 100644
--- a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
@@ -142,6 +142,9 @@ static int sharp_ls_power_on(struct omap_dss_device *dssdev)
142 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) 142 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
143 return 0; 143 return 0;
144 144
145 omapdss_dpi_set_timings(dssdev, &dssdev->panel.timings);
146 omapdss_dpi_set_data_lines(dssdev, dssdev->phy.dpi.data_lines);
147
145 r = omapdss_dpi_display_enable(dssdev); 148 r = omapdss_dpi_display_enable(dssdev);
146 if (r) 149 if (r)
147 goto err0; 150 goto err0;
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index 6b5e6e0e202f..f2f644680ca8 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -121,6 +121,18 @@ struct taal_data {
121 121
122 struct omap_dss_device *dssdev; 122 struct omap_dss_device *dssdev;
123 123
124 /* panel specific HW info */
125 struct panel_config *panel_config;
126
127 /* panel HW configuration from DT or platform data */
128 int reset_gpio;
129 int ext_te_gpio;
130
131 bool use_dsi_backlight;
132
133 struct omap_dsi_pin_config pin_config;
134
135 /* runtime variables */
124 bool enabled; 136 bool enabled;
125 u8 rotate; 137 u8 rotate;
126 bool mirror; 138 bool mirror;
@@ -145,16 +157,8 @@ struct taal_data {
145 bool ulps_enabled; 157 bool ulps_enabled;
146 unsigned ulps_timeout; 158 unsigned ulps_timeout;
147 struct delayed_work ulps_work; 159 struct delayed_work ulps_work;
148
149 struct panel_config *panel_config;
150}; 160};
151 161
152static inline struct nokia_dsi_panel_data
153*get_panel_data(const struct omap_dss_device *dssdev)
154{
155 return (struct nokia_dsi_panel_data *) dssdev->data;
156}
157
158static void taal_esd_work(struct work_struct *work); 162static void taal_esd_work(struct work_struct *work);
159static void taal_ulps_work(struct work_struct *work); 163static void taal_ulps_work(struct work_struct *work);
160 164
@@ -371,7 +375,6 @@ static void taal_cancel_ulps_work(struct omap_dss_device *dssdev)
371static int taal_enter_ulps(struct omap_dss_device *dssdev) 375static int taal_enter_ulps(struct omap_dss_device *dssdev)
372{ 376{
373 struct taal_data *td = dev_get_drvdata(&dssdev->dev); 377 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
374 struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
375 int r; 378 int r;
376 379
377 if (td->ulps_enabled) 380 if (td->ulps_enabled)
@@ -383,7 +386,8 @@ static int taal_enter_ulps(struct omap_dss_device *dssdev)
383 if (r) 386 if (r)
384 goto err; 387 goto err;
385 388
386 disable_irq(gpio_to_irq(panel_data->ext_te_gpio)); 389 if (gpio_is_valid(td->ext_te_gpio))
390 disable_irq(gpio_to_irq(td->ext_te_gpio));
387 391
388 omapdss_dsi_display_disable(dssdev, false, true); 392 omapdss_dsi_display_disable(dssdev, false, true);
389 393
@@ -405,7 +409,6 @@ err:
405static int taal_exit_ulps(struct omap_dss_device *dssdev) 409static int taal_exit_ulps(struct omap_dss_device *dssdev)
406{ 410{
407 struct taal_data *td = dev_get_drvdata(&dssdev->dev); 411 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
408 struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
409 int r; 412 int r;
410 413
411 if (!td->ulps_enabled) 414 if (!td->ulps_enabled)
@@ -425,7 +428,8 @@ static int taal_exit_ulps(struct omap_dss_device *dssdev)
425 goto err2; 428 goto err2;
426 } 429 }
427 430
428 enable_irq(gpio_to_irq(panel_data->ext_te_gpio)); 431 if (gpio_is_valid(td->ext_te_gpio))
432 enable_irq(gpio_to_irq(td->ext_te_gpio));
429 433
430 taal_queue_ulps_work(dssdev); 434 taal_queue_ulps_work(dssdev);
431 435
@@ -438,7 +442,8 @@ err2:
438 442
439 r = taal_panel_reset(dssdev); 443 r = taal_panel_reset(dssdev);
440 if (!r) { 444 if (!r) {
441 enable_irq(gpio_to_irq(panel_data->ext_te_gpio)); 445 if (gpio_is_valid(td->ext_te_gpio))
446 enable_irq(gpio_to_irq(td->ext_te_gpio));
442 td->ulps_enabled = false; 447 td->ulps_enabled = false;
443 } 448 }
444err1: 449err1:
@@ -835,94 +840,135 @@ static struct attribute_group taal_attr_group = {
835static void taal_hw_reset(struct omap_dss_device *dssdev) 840static void taal_hw_reset(struct omap_dss_device *dssdev)
836{ 841{
837 struct taal_data *td = dev_get_drvdata(&dssdev->dev); 842 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
838 struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
839 843
840 if (panel_data->reset_gpio == -1) 844 if (!gpio_is_valid(td->reset_gpio))
841 return; 845 return;
842 846
843 gpio_set_value(panel_data->reset_gpio, 1); 847 gpio_set_value(td->reset_gpio, 1);
844 if (td->panel_config->reset_sequence.high) 848 if (td->panel_config->reset_sequence.high)
845 udelay(td->panel_config->reset_sequence.high); 849 udelay(td->panel_config->reset_sequence.high);
846 /* reset the panel */ 850 /* reset the panel */
847 gpio_set_value(panel_data->reset_gpio, 0); 851 gpio_set_value(td->reset_gpio, 0);
848 /* assert reset */ 852 /* assert reset */
849 if (td->panel_config->reset_sequence.low) 853 if (td->panel_config->reset_sequence.low)
850 udelay(td->panel_config->reset_sequence.low); 854 udelay(td->panel_config->reset_sequence.low);
851 gpio_set_value(panel_data->reset_gpio, 1); 855 gpio_set_value(td->reset_gpio, 1);
852 /* wait after releasing reset */ 856 /* wait after releasing reset */
853 if (td->panel_config->sleep.hw_reset) 857 if (td->panel_config->sleep.hw_reset)
854 msleep(td->panel_config->sleep.hw_reset); 858 msleep(td->panel_config->sleep.hw_reset);
855} 859}
856 860
861static void taal_probe_pdata(struct taal_data *td,
862 const struct nokia_dsi_panel_data *pdata)
863{
864 td->reset_gpio = pdata->reset_gpio;
865
866 if (pdata->use_ext_te)
867 td->ext_te_gpio = pdata->ext_te_gpio;
868 else
869 td->ext_te_gpio = -1;
870
871 td->esd_interval = pdata->esd_interval;
872 td->ulps_timeout = pdata->ulps_timeout;
873
874 td->use_dsi_backlight = pdata->use_dsi_backlight;
875
876 td->pin_config = pdata->pin_config;
877}
878
857static int taal_probe(struct omap_dss_device *dssdev) 879static int taal_probe(struct omap_dss_device *dssdev)
858{ 880{
859 struct backlight_properties props; 881 struct backlight_properties props;
860 struct taal_data *td; 882 struct taal_data *td;
861 struct backlight_device *bldev = NULL; 883 struct backlight_device *bldev = NULL;
862 struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
863 struct panel_config *panel_config = NULL;
864 int r, i; 884 int r, i;
885 const char *panel_name;
865 886
866 dev_dbg(&dssdev->dev, "probe\n"); 887 dev_dbg(&dssdev->dev, "probe\n");
867 888
868 if (!panel_data || !panel_data->name) { 889 td = devm_kzalloc(&dssdev->dev, sizeof(*td), GFP_KERNEL);
869 r = -EINVAL; 890 if (!td)
870 goto err; 891 return -ENOMEM;
892
893 dev_set_drvdata(&dssdev->dev, td);
894 td->dssdev = dssdev;
895
896 if (dssdev->data) {
897 const struct nokia_dsi_panel_data *pdata = dssdev->data;
898
899 taal_probe_pdata(td, pdata);
900
901 panel_name = pdata->name;
902 } else {
903 return -ENODEV;
871 } 904 }
872 905
906 if (panel_name == NULL)
907 return -EINVAL;
908
873 for (i = 0; i < ARRAY_SIZE(panel_configs); i++) { 909 for (i = 0; i < ARRAY_SIZE(panel_configs); i++) {
874 if (strcmp(panel_data->name, panel_configs[i].name) == 0) { 910 if (strcmp(panel_name, panel_configs[i].name) == 0) {
875 panel_config = &panel_configs[i]; 911 td->panel_config = &panel_configs[i];
876 break; 912 break;
877 } 913 }
878 } 914 }
879 915
880 if (!panel_config) { 916 if (!td->panel_config)
881 r = -EINVAL; 917 return -EINVAL;
882 goto err;
883 }
884 918
885 dssdev->panel.timings = panel_config->timings; 919 dssdev->panel.timings = td->panel_config->timings;
886 dssdev->panel.dsi_pix_fmt = OMAP_DSS_DSI_FMT_RGB888; 920 dssdev->panel.dsi_pix_fmt = OMAP_DSS_DSI_FMT_RGB888;
887 921 dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
888 td = kzalloc(sizeof(*td), GFP_KERNEL); 922 OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
889 if (!td) {
890 r = -ENOMEM;
891 goto err;
892 }
893 td->dssdev = dssdev;
894 td->panel_config = panel_config;
895 td->esd_interval = panel_data->esd_interval;
896 td->ulps_enabled = false;
897 td->ulps_timeout = panel_data->ulps_timeout;
898 923
899 mutex_init(&td->lock); 924 mutex_init(&td->lock);
900 925
901 atomic_set(&td->do_update, 0); 926 atomic_set(&td->do_update, 0);
902 927
903 td->workqueue = create_singlethread_workqueue("taal_esd"); 928 if (gpio_is_valid(td->reset_gpio)) {
904 if (td->workqueue == NULL) { 929 r = devm_gpio_request_one(&dssdev->dev, td->reset_gpio,
905 dev_err(&dssdev->dev, "can't create ESD workqueue\n"); 930 GPIOF_OUT_INIT_LOW, "taal rst");
906 r = -ENOMEM; 931 if (r) {
907 goto err_wq; 932 dev_err(&dssdev->dev, "failed to request reset gpio\n");
933 return r;
934 }
908 } 935 }
909 INIT_DEFERRABLE_WORK(&td->esd_work, taal_esd_work);
910 INIT_DELAYED_WORK(&td->ulps_work, taal_ulps_work);
911 936
912 dev_set_drvdata(&dssdev->dev, td); 937 if (gpio_is_valid(td->ext_te_gpio)) {
938 r = devm_gpio_request_one(&dssdev->dev, td->ext_te_gpio,
939 GPIOF_IN, "taal irq");
940 if (r) {
941 dev_err(&dssdev->dev, "GPIO request failed\n");
942 return r;
943 }
944
945 r = devm_request_irq(&dssdev->dev, gpio_to_irq(td->ext_te_gpio),
946 taal_te_isr,
947 IRQF_TRIGGER_RISING,
948 "taal vsync", dssdev);
913 949
914 if (gpio_is_valid(panel_data->reset_gpio)) {
915 r = gpio_request_one(panel_data->reset_gpio, GPIOF_OUT_INIT_LOW,
916 "taal rst");
917 if (r) { 950 if (r) {
918 dev_err(&dssdev->dev, "failed to request reset gpio\n"); 951 dev_err(&dssdev->dev, "IRQ request failed\n");
919 goto err_rst_gpio; 952 return r;
920 } 953 }
954
955 INIT_DEFERRABLE_WORK(&td->te_timeout_work,
956 taal_te_timeout_work_callback);
957
958 dev_dbg(&dssdev->dev, "Using GPIO TE\n");
921 } 959 }
922 960
961 td->workqueue = create_singlethread_workqueue("taal_esd");
962 if (td->workqueue == NULL) {
963 dev_err(&dssdev->dev, "can't create ESD workqueue\n");
964 return -ENOMEM;
965 }
966 INIT_DEFERRABLE_WORK(&td->esd_work, taal_esd_work);
967 INIT_DELAYED_WORK(&td->ulps_work, taal_ulps_work);
968
923 taal_hw_reset(dssdev); 969 taal_hw_reset(dssdev);
924 970
925 if (panel_data->use_dsi_backlight) { 971 if (td->use_dsi_backlight) {
926 memset(&props, 0, sizeof(struct backlight_properties)); 972 memset(&props, 0, sizeof(struct backlight_properties));
927 props.max_brightness = 255; 973 props.max_brightness = 255;
928 974
@@ -943,31 +989,6 @@ static int taal_probe(struct omap_dss_device *dssdev)
943 taal_bl_update_status(bldev); 989 taal_bl_update_status(bldev);
944 } 990 }
945 991
946 if (panel_data->use_ext_te) {
947 int gpio = panel_data->ext_te_gpio;
948
949 r = gpio_request_one(gpio, GPIOF_IN, "taal irq");
950 if (r) {
951 dev_err(&dssdev->dev, "GPIO request failed\n");
952 goto err_gpio;
953 }
954
955 r = request_irq(gpio_to_irq(gpio), taal_te_isr,
956 IRQF_TRIGGER_RISING,
957 "taal vsync", dssdev);
958
959 if (r) {
960 dev_err(&dssdev->dev, "IRQ request failed\n");
961 gpio_free(gpio);
962 goto err_irq;
963 }
964
965 INIT_DEFERRABLE_WORK(&td->te_timeout_work,
966 taal_te_timeout_work_callback);
967
968 dev_dbg(&dssdev->dev, "Using GPIO TE\n");
969 }
970
971 r = omap_dsi_request_vc(dssdev, &td->channel); 992 r = omap_dsi_request_vc(dssdev, &td->channel);
972 if (r) { 993 if (r) {
973 dev_err(&dssdev->dev, "failed to get virtual channel\n"); 994 dev_err(&dssdev->dev, "failed to get virtual channel\n");
@@ -991,29 +1012,16 @@ static int taal_probe(struct omap_dss_device *dssdev)
991err_vc_id: 1012err_vc_id:
992 omap_dsi_release_vc(dssdev, td->channel); 1013 omap_dsi_release_vc(dssdev, td->channel);
993err_req_vc: 1014err_req_vc:
994 if (panel_data->use_ext_te)
995 free_irq(gpio_to_irq(panel_data->ext_te_gpio), dssdev);
996err_irq:
997 if (panel_data->use_ext_te)
998 gpio_free(panel_data->ext_te_gpio);
999err_gpio:
1000 if (bldev != NULL) 1015 if (bldev != NULL)
1001 backlight_device_unregister(bldev); 1016 backlight_device_unregister(bldev);
1002err_bl: 1017err_bl:
1003 if (gpio_is_valid(panel_data->reset_gpio))
1004 gpio_free(panel_data->reset_gpio);
1005err_rst_gpio:
1006 destroy_workqueue(td->workqueue); 1018 destroy_workqueue(td->workqueue);
1007err_wq:
1008 kfree(td);
1009err:
1010 return r; 1019 return r;
1011} 1020}
1012 1021
1013static void __exit taal_remove(struct omap_dss_device *dssdev) 1022static void __exit taal_remove(struct omap_dss_device *dssdev)
1014{ 1023{
1015 struct taal_data *td = dev_get_drvdata(&dssdev->dev); 1024 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
1016 struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
1017 struct backlight_device *bldev; 1025 struct backlight_device *bldev;
1018 1026
1019 dev_dbg(&dssdev->dev, "remove\n"); 1027 dev_dbg(&dssdev->dev, "remove\n");
@@ -1021,12 +1029,6 @@ static void __exit taal_remove(struct omap_dss_device *dssdev)
1021 sysfs_remove_group(&dssdev->dev.kobj, &taal_attr_group); 1029 sysfs_remove_group(&dssdev->dev.kobj, &taal_attr_group);
1022 omap_dsi_release_vc(dssdev, td->channel); 1030 omap_dsi_release_vc(dssdev, td->channel);
1023 1031
1024 if (panel_data->use_ext_te) {
1025 int gpio = panel_data->ext_te_gpio;
1026 free_irq(gpio_to_irq(gpio), dssdev);
1027 gpio_free(gpio);
1028 }
1029
1030 bldev = td->bldev; 1032 bldev = td->bldev;
1031 if (bldev != NULL) { 1033 if (bldev != NULL) {
1032 bldev->props.power = FB_BLANK_POWERDOWN; 1034 bldev->props.power = FB_BLANK_POWERDOWN;
@@ -1040,26 +1042,31 @@ static void __exit taal_remove(struct omap_dss_device *dssdev)
1040 1042
1041 /* reset, to be sure that the panel is in a valid state */ 1043 /* reset, to be sure that the panel is in a valid state */
1042 taal_hw_reset(dssdev); 1044 taal_hw_reset(dssdev);
1043
1044 if (gpio_is_valid(panel_data->reset_gpio))
1045 gpio_free(panel_data->reset_gpio);
1046
1047 kfree(td);
1048} 1045}
1049 1046
1050static int taal_power_on(struct omap_dss_device *dssdev) 1047static int taal_power_on(struct omap_dss_device *dssdev)
1051{ 1048{
1052 struct taal_data *td = dev_get_drvdata(&dssdev->dev); 1049 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
1053 struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
1054 u8 id1, id2, id3; 1050 u8 id1, id2, id3;
1055 int r; 1051 int r;
1056 1052
1057 r = omapdss_dsi_configure_pins(dssdev, &panel_data->pin_config); 1053 r = omapdss_dsi_configure_pins(dssdev, &td->pin_config);
1058 if (r) { 1054 if (r) {
1059 dev_err(&dssdev->dev, "failed to configure DSI pins\n"); 1055 dev_err(&dssdev->dev, "failed to configure DSI pins\n");
1060 goto err0; 1056 goto err0;
1061 }; 1057 };
1062 1058
1059 omapdss_dsi_set_size(dssdev, dssdev->panel.timings.x_res,
1060 dssdev->panel.timings.y_res);
1061 omapdss_dsi_set_pixel_format(dssdev, OMAP_DSS_DSI_FMT_RGB888);
1062 omapdss_dsi_set_operation_mode(dssdev, OMAP_DSS_DSI_CMD_MODE);
1063
1064 r = omapdss_dsi_set_clocks(dssdev, 216000000, 10000000);
1065 if (r) {
1066 dev_err(&dssdev->dev, "failed to set HS and LP clocks\n");
1067 goto err0;
1068 }
1069
1063 r = omapdss_dsi_display_enable(dssdev); 1070 r = omapdss_dsi_display_enable(dssdev);
1064 if (r) { 1071 if (r) {
1065 dev_err(&dssdev->dev, "failed to enable DSI\n"); 1072 dev_err(&dssdev->dev, "failed to enable DSI\n");
@@ -1356,7 +1363,6 @@ static int taal_update(struct omap_dss_device *dssdev,
1356 u16 x, u16 y, u16 w, u16 h) 1363 u16 x, u16 y, u16 w, u16 h)
1357{ 1364{
1358 struct taal_data *td = dev_get_drvdata(&dssdev->dev); 1365 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
1359 struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
1360 int r; 1366 int r;
1361 1367
1362 dev_dbg(&dssdev->dev, "update %d, %d, %d x %d\n", x, y, w, h); 1368 dev_dbg(&dssdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
@@ -1380,7 +1386,7 @@ static int taal_update(struct omap_dss_device *dssdev,
1380 if (r) 1386 if (r)
1381 goto err; 1387 goto err;
1382 1388
1383 if (td->te_enabled && panel_data->use_ext_te) { 1389 if (td->te_enabled && gpio_is_valid(td->ext_te_gpio)) {
1384 schedule_delayed_work(&td->te_timeout_work, 1390 schedule_delayed_work(&td->te_timeout_work,
1385 msecs_to_jiffies(250)); 1391 msecs_to_jiffies(250));
1386 atomic_set(&td->do_update, 1); 1392 atomic_set(&td->do_update, 1);
@@ -1419,7 +1425,6 @@ static int taal_sync(struct omap_dss_device *dssdev)
1419static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable) 1425static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable)
1420{ 1426{
1421 struct taal_data *td = dev_get_drvdata(&dssdev->dev); 1427 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
1422 struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
1423 int r; 1428 int r;
1424 1429
1425 if (enable) 1430 if (enable)
@@ -1427,7 +1432,7 @@ static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable)
1427 else 1432 else
1428 r = taal_dcs_write_0(td, MIPI_DCS_SET_TEAR_OFF); 1433 r = taal_dcs_write_0(td, MIPI_DCS_SET_TEAR_OFF);
1429 1434
1430 if (!panel_data->use_ext_te) 1435 if (!gpio_is_valid(td->ext_te_gpio))
1431 omapdss_dsi_enable_te(dssdev, enable); 1436 omapdss_dsi_enable_te(dssdev, enable);
1432 1437
1433 if (td->panel_config->sleep.enable_te) 1438 if (td->panel_config->sleep.enable_te)
@@ -1487,6 +1492,7 @@ static int taal_get_te(struct omap_dss_device *dssdev)
1487static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate) 1492static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
1488{ 1493{
1489 struct taal_data *td = dev_get_drvdata(&dssdev->dev); 1494 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
1495 u16 dw, dh;
1490 int r; 1496 int r;
1491 1497
1492 dev_dbg(&dssdev->dev, "rotate %d\n", rotate); 1498 dev_dbg(&dssdev->dev, "rotate %d\n", rotate);
@@ -1508,6 +1514,16 @@ static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
1508 goto err; 1514 goto err;
1509 } 1515 }
1510 1516
1517 if (rotate == 0 || rotate == 2) {
1518 dw = dssdev->panel.timings.x_res;
1519 dh = dssdev->panel.timings.y_res;
1520 } else {
1521 dw = dssdev->panel.timings.y_res;
1522 dh = dssdev->panel.timings.x_res;
1523 }
1524
1525 omapdss_dsi_set_size(dssdev, dw, dh);
1526
1511 td->rotate = rotate; 1527 td->rotate = rotate;
1512 1528
1513 dsi_bus_unlock(dssdev); 1529 dsi_bus_unlock(dssdev);
@@ -1726,7 +1742,6 @@ static void taal_esd_work(struct work_struct *work)
1726 struct taal_data *td = container_of(work, struct taal_data, 1742 struct taal_data *td = container_of(work, struct taal_data,
1727 esd_work.work); 1743 esd_work.work);
1728 struct omap_dss_device *dssdev = td->dssdev; 1744 struct omap_dss_device *dssdev = td->dssdev;
1729 struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
1730 u8 state1, state2; 1745 u8 state1, state2;
1731 int r; 1746 int r;
1732 1747
@@ -1773,7 +1788,7 @@ static void taal_esd_work(struct work_struct *work)
1773 } 1788 }
1774 /* Self-diagnostics result is also shown on TE GPIO line. We need 1789 /* Self-diagnostics result is also shown on TE GPIO line. We need
1775 * to re-enable TE after self diagnostics */ 1790 * to re-enable TE after self diagnostics */
1776 if (td->te_enabled && panel_data->use_ext_te) { 1791 if (td->te_enabled && gpio_is_valid(td->ext_te_gpio)) {
1777 r = taal_dcs_write_1(td, MIPI_DCS_SET_TEAR_ON, 0); 1792 r = taal_dcs_write_1(td, MIPI_DCS_SET_TEAR_ON, 0);
1778 if (r) 1793 if (r)
1779 goto err; 1794 goto err;
diff --git a/drivers/video/omap2/displays/panel-tfp410.c b/drivers/video/omap2/displays/panel-tfp410.c
index 40cc0cfa5d17..383811cf8648 100644
--- a/drivers/video/omap2/displays/panel-tfp410.c
+++ b/drivers/video/omap2/displays/panel-tfp410.c
@@ -65,6 +65,9 @@ static int tfp410_power_on(struct omap_dss_device *dssdev)
65 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) 65 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
66 return 0; 66 return 0;
67 67
68 omapdss_dpi_set_timings(dssdev, &dssdev->panel.timings);
69 omapdss_dpi_set_data_lines(dssdev, dssdev->phy.dpi.data_lines);
70
68 r = omapdss_dpi_display_enable(dssdev); 71 r = omapdss_dpi_display_enable(dssdev);
69 if (r) 72 if (r)
70 goto err0; 73 goto err0;
@@ -116,8 +119,8 @@ static int tfp410_probe(struct omap_dss_device *dssdev)
116 } 119 }
117 120
118 if (gpio_is_valid(ddata->pd_gpio)) { 121 if (gpio_is_valid(ddata->pd_gpio)) {
119 r = gpio_request_one(ddata->pd_gpio, GPIOF_OUT_INIT_LOW, 122 r = devm_gpio_request_one(&dssdev->dev, ddata->pd_gpio,
120 "tfp410 pd"); 123 GPIOF_OUT_INIT_LOW, "tfp410 pd");
121 if (r) { 124 if (r) {
122 dev_err(&dssdev->dev, "Failed to request PD GPIO %d\n", 125 dev_err(&dssdev->dev, "Failed to request PD GPIO %d\n",
123 ddata->pd_gpio); 126 ddata->pd_gpio);
@@ -132,8 +135,7 @@ static int tfp410_probe(struct omap_dss_device *dssdev)
132 if (!adapter) { 135 if (!adapter) {
133 dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n", 136 dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n",
134 i2c_bus_num); 137 i2c_bus_num);
135 r = -EINVAL; 138 return -EINVAL;
136 goto err_i2c;
137 } 139 }
138 140
139 ddata->i2c_adapter = adapter; 141 ddata->i2c_adapter = adapter;
@@ -142,10 +144,6 @@ static int tfp410_probe(struct omap_dss_device *dssdev)
142 dev_set_drvdata(&dssdev->dev, ddata); 144 dev_set_drvdata(&dssdev->dev, ddata);
143 145
144 return 0; 146 return 0;
145err_i2c:
146 if (gpio_is_valid(ddata->pd_gpio))
147 gpio_free(ddata->pd_gpio);
148 return r;
149} 147}
150 148
151static void __exit tfp410_remove(struct omap_dss_device *dssdev) 149static void __exit tfp410_remove(struct omap_dss_device *dssdev)
@@ -157,9 +155,6 @@ static void __exit tfp410_remove(struct omap_dss_device *dssdev)
157 if (ddata->i2c_adapter) 155 if (ddata->i2c_adapter)
158 i2c_put_adapter(ddata->i2c_adapter); 156 i2c_put_adapter(ddata->i2c_adapter);
159 157
160 if (gpio_is_valid(ddata->pd_gpio))
161 gpio_free(ddata->pd_gpio);
162
163 dev_set_drvdata(&dssdev->dev, NULL); 158 dev_set_drvdata(&dssdev->dev, NULL);
164 159
165 mutex_unlock(&ddata->lock); 160 mutex_unlock(&ddata->lock);
@@ -231,7 +226,8 @@ static void tfp410_set_timings(struct omap_dss_device *dssdev,
231 struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev); 226 struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
232 227
233 mutex_lock(&ddata->lock); 228 mutex_lock(&ddata->lock);
234 dpi_set_timings(dssdev, timings); 229 omapdss_dpi_set_timings(dssdev, timings);
230 dssdev->panel.timings = *timings;
235 mutex_unlock(&ddata->lock); 231 mutex_unlock(&ddata->lock);
236} 232}
237 233
diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
index fa7baa650ae0..b5e6dbc59f0a 100644
--- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
@@ -337,6 +337,9 @@ static int tpo_td043_enable_dss(struct omap_dss_device *dssdev)
337 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) 337 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
338 return 0; 338 return 0;
339 339
340 omapdss_dpi_set_timings(dssdev, &dssdev->panel.timings);
341 omapdss_dpi_set_data_lines(dssdev, dssdev->phy.dpi.data_lines);
342
340 r = omapdss_dpi_display_enable(dssdev); 343 r = omapdss_dpi_display_enable(dssdev);
341 if (r) 344 if (r)
342 goto err0; 345 goto err0;
@@ -480,7 +483,9 @@ static void tpo_td043_remove(struct omap_dss_device *dssdev)
480static void tpo_td043_set_timings(struct omap_dss_device *dssdev, 483static void tpo_td043_set_timings(struct omap_dss_device *dssdev,
481 struct omap_video_timings *timings) 484 struct omap_video_timings *timings)
482{ 485{
483 dpi_set_timings(dssdev, timings); 486 omapdss_dpi_set_timings(dssdev, timings);
487
488 dssdev->panel.timings = *timings;
484} 489}
485 490
486static int tpo_td043_check_timings(struct omap_dss_device *dssdev, 491static int tpo_td043_check_timings(struct omap_dss_device *dssdev,
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index b337a8469fd8..80f5390aa136 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -84,7 +84,7 @@ config OMAP2_DSS_SDI
84 84
85config OMAP2_DSS_DSI 85config OMAP2_DSS_DSI
86 bool "DSI support" 86 bool "DSI support"
87 depends on ARCH_OMAP3 || ARCH_OMAP4 87 depends on ARCH_OMAP3 || ARCH_OMAP4 || ARCH_OMAP5
88 default n 88 default n
89 help 89 help
90 MIPI DSI (Display Serial Interface) support. 90 MIPI DSI (Display Serial Interface) support.
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
index 5c450b0f94d0..4549869bfe1a 100644
--- a/drivers/video/omap2/dss/Makefile
+++ b/drivers/video/omap2/dss/Makefile
@@ -1,9 +1,9 @@
1obj-$(CONFIG_OMAP2_DSS) += omapdss.o 1obj-$(CONFIG_OMAP2_DSS) += omapdss.o
2omapdss-y := core.o dss.o dss_features.o dispc.o dispc_coefs.o display.o \ 2omapdss-y := core.o dss.o dss_features.o dispc.o dispc_coefs.o display.o \
3 manager.o overlay.o apply.o 3 manager.o manager-sysfs.o overlay.o overlay-sysfs.o output.o apply.o
4omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o 4omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
5omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o 5omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
6omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o 6omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o venc_panel.o
7omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o 7omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
8omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o 8omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
9omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi.o \ 9omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi.o \
diff --git a/drivers/video/omap2/dss/apply.c b/drivers/video/omap2/dss/apply.c
index 0fefc68372b9..19d66f471b4b 100644
--- a/drivers/video/omap2/dss/apply.c
+++ b/drivers/video/omap2/dss/apply.c
@@ -111,9 +111,6 @@ static struct {
111 struct ovl_priv_data ovl_priv_data_array[MAX_DSS_OVERLAYS]; 111 struct ovl_priv_data ovl_priv_data_array[MAX_DSS_OVERLAYS];
112 struct mgr_priv_data mgr_priv_data_array[MAX_DSS_MANAGERS]; 112 struct mgr_priv_data mgr_priv_data_array[MAX_DSS_MANAGERS];
113 113
114 bool fifo_merge_dirty;
115 bool fifo_merge;
116
117 bool irq_enabled; 114 bool irq_enabled;
118} dss_data; 115} dss_data;
119 116
@@ -424,17 +421,25 @@ static void wait_pending_extra_info_updates(void)
424int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr) 421int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
425{ 422{
426 unsigned long timeout = msecs_to_jiffies(500); 423 unsigned long timeout = msecs_to_jiffies(500);
427 struct mgr_priv_data *mp; 424 struct mgr_priv_data *mp = get_mgr_priv(mgr);
428 u32 irq; 425 u32 irq;
426 unsigned long flags;
429 int r; 427 int r;
430 int i; 428 int i;
431 struct omap_dss_device *dssdev = mgr->device;
432 429
433 if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) 430 spin_lock_irqsave(&data_lock, flags);
431
432 if (mgr_manual_update(mgr)) {
433 spin_unlock_irqrestore(&data_lock, flags);
434 return 0; 434 return 0;
435 }
435 436
436 if (mgr_manual_update(mgr)) 437 if (!mp->enabled) {
438 spin_unlock_irqrestore(&data_lock, flags);
437 return 0; 439 return 0;
440 }
441
442 spin_unlock_irqrestore(&data_lock, flags);
438 443
439 r = dispc_runtime_get(); 444 r = dispc_runtime_get();
440 if (r) 445 if (r)
@@ -442,10 +447,8 @@ int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
442 447
443 irq = dispc_mgr_get_vsync_irq(mgr->id); 448 irq = dispc_mgr_get_vsync_irq(mgr->id);
444 449
445 mp = get_mgr_priv(mgr);
446 i = 0; 450 i = 0;
447 while (1) { 451 while (1) {
448 unsigned long flags;
449 bool shadow_dirty, dirty; 452 bool shadow_dirty, dirty;
450 453
451 spin_lock_irqsave(&data_lock, flags); 454 spin_lock_irqsave(&data_lock, flags);
@@ -489,21 +492,30 @@ int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
489{ 492{
490 unsigned long timeout = msecs_to_jiffies(500); 493 unsigned long timeout = msecs_to_jiffies(500);
491 struct ovl_priv_data *op; 494 struct ovl_priv_data *op;
492 struct omap_dss_device *dssdev; 495 struct mgr_priv_data *mp;
493 u32 irq; 496 u32 irq;
497 unsigned long flags;
494 int r; 498 int r;
495 int i; 499 int i;
496 500
497 if (!ovl->manager) 501 if (!ovl->manager)
498 return 0; 502 return 0;
499 503
500 dssdev = ovl->manager->device; 504 mp = get_mgr_priv(ovl->manager);
505
506 spin_lock_irqsave(&data_lock, flags);
501 507
502 if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) 508 if (ovl_manual_update(ovl)) {
509 spin_unlock_irqrestore(&data_lock, flags);
503 return 0; 510 return 0;
511 }
504 512
505 if (ovl_manual_update(ovl)) 513 if (!mp->enabled) {
514 spin_unlock_irqrestore(&data_lock, flags);
506 return 0; 515 return 0;
516 }
517
518 spin_unlock_irqrestore(&data_lock, flags);
507 519
508 r = dispc_runtime_get(); 520 r = dispc_runtime_get();
509 if (r) 521 if (r)
@@ -514,7 +526,6 @@ int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
514 op = get_ovl_priv(ovl); 526 op = get_ovl_priv(ovl);
515 i = 0; 527 i = 0;
516 while (1) { 528 while (1) {
517 unsigned long flags;
518 bool shadow_dirty, dirty; 529 bool shadow_dirty, dirty;
519 530
520 spin_lock_irqsave(&data_lock, flags); 531 spin_lock_irqsave(&data_lock, flags);
@@ -573,7 +584,7 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl)
573 584
574 replication = dss_ovl_use_replication(mp->lcd_config, oi->color_mode); 585 replication = dss_ovl_use_replication(mp->lcd_config, oi->color_mode);
575 586
576 r = dispc_ovl_setup(ovl->id, oi, replication, &mp->timings); 587 r = dispc_ovl_setup(ovl->id, oi, replication, &mp->timings, false);
577 if (r) { 588 if (r) {
578 /* 589 /*
579 * We can't do much here, as this function can be called from 590 * We can't do much here, as this function can be called from
@@ -677,40 +688,11 @@ static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr)
677 mp->shadow_extra_info_dirty = true; 688 mp->shadow_extra_info_dirty = true;
678} 689}
679 690
680static void dss_write_regs_common(void)
681{
682 const int num_mgrs = omap_dss_get_num_overlay_managers();
683 int i;
684
685 if (!dss_data.fifo_merge_dirty)
686 return;
687
688 for (i = 0; i < num_mgrs; ++i) {
689 struct omap_overlay_manager *mgr;
690 struct mgr_priv_data *mp;
691
692 mgr = omap_dss_get_overlay_manager(i);
693 mp = get_mgr_priv(mgr);
694
695 if (mp->enabled) {
696 if (dss_data.fifo_merge_dirty) {
697 dispc_enable_fifomerge(dss_data.fifo_merge);
698 dss_data.fifo_merge_dirty = false;
699 }
700
701 if (mp->updating)
702 mp->shadow_info_dirty = true;
703 }
704 }
705}
706
707static void dss_write_regs(void) 691static void dss_write_regs(void)
708{ 692{
709 const int num_mgrs = omap_dss_get_num_overlay_managers(); 693 const int num_mgrs = omap_dss_get_num_overlay_managers();
710 int i; 694 int i;
711 695
712 dss_write_regs_common();
713
714 for (i = 0; i < num_mgrs; ++i) { 696 for (i = 0; i < num_mgrs; ++i) {
715 struct omap_overlay_manager *mgr; 697 struct omap_overlay_manager *mgr;
716 struct mgr_priv_data *mp; 698 struct mgr_priv_data *mp;
@@ -799,8 +781,6 @@ void dss_mgr_start_update(struct omap_overlay_manager *mgr)
799 dss_mgr_write_regs(mgr); 781 dss_mgr_write_regs(mgr);
800 dss_mgr_write_regs_extra(mgr); 782 dss_mgr_write_regs_extra(mgr);
801 783
802 dss_write_regs_common();
803
804 mp->updating = true; 784 mp->updating = true;
805 785
806 if (!dss_data.irq_enabled && need_isr()) 786 if (!dss_data.irq_enabled && need_isr())
@@ -984,20 +964,11 @@ static void dss_apply_ovl_fifo_thresholds(struct omap_overlay *ovl,
984 op->extra_info_dirty = true; 964 op->extra_info_dirty = true;
985} 965}
986 966
987static void dss_apply_fifo_merge(bool use_fifo_merge) 967static void dss_ovl_setup_fifo(struct omap_overlay *ovl)
988{
989 if (dss_data.fifo_merge == use_fifo_merge)
990 return;
991
992 dss_data.fifo_merge = use_fifo_merge;
993 dss_data.fifo_merge_dirty = true;
994}
995
996static void dss_ovl_setup_fifo(struct omap_overlay *ovl,
997 bool use_fifo_merge)
998{ 968{
999 struct ovl_priv_data *op = get_ovl_priv(ovl); 969 struct ovl_priv_data *op = get_ovl_priv(ovl);
1000 u32 fifo_low, fifo_high; 970 u32 fifo_low, fifo_high;
971 bool use_fifo_merge = false;
1001 972
1002 if (!op->enabled && !op->enabling) 973 if (!op->enabled && !op->enabling)
1003 return; 974 return;
@@ -1008,8 +979,7 @@ static void dss_ovl_setup_fifo(struct omap_overlay *ovl,
1008 dss_apply_ovl_fifo_thresholds(ovl, fifo_low, fifo_high); 979 dss_apply_ovl_fifo_thresholds(ovl, fifo_low, fifo_high);
1009} 980}
1010 981
1011static void dss_mgr_setup_fifos(struct omap_overlay_manager *mgr, 982static void dss_mgr_setup_fifos(struct omap_overlay_manager *mgr)
1012 bool use_fifo_merge)
1013{ 983{
1014 struct omap_overlay *ovl; 984 struct omap_overlay *ovl;
1015 struct mgr_priv_data *mp; 985 struct mgr_priv_data *mp;
@@ -1020,94 +990,19 @@ static void dss_mgr_setup_fifos(struct omap_overlay_manager *mgr,
1020 return; 990 return;
1021 991
1022 list_for_each_entry(ovl, &mgr->overlays, list) 992 list_for_each_entry(ovl, &mgr->overlays, list)
1023 dss_ovl_setup_fifo(ovl, use_fifo_merge); 993 dss_ovl_setup_fifo(ovl);
1024}
1025
1026static void dss_setup_fifos(bool use_fifo_merge)
1027{
1028 const int num_mgrs = omap_dss_get_num_overlay_managers();
1029 struct omap_overlay_manager *mgr;
1030 int i;
1031
1032 for (i = 0; i < num_mgrs; ++i) {
1033 mgr = omap_dss_get_overlay_manager(i);
1034 dss_mgr_setup_fifos(mgr, use_fifo_merge);
1035 }
1036} 994}
1037 995
1038static int get_num_used_managers(void) 996static void dss_setup_fifos(void)
1039{ 997{
1040 const int num_mgrs = omap_dss_get_num_overlay_managers(); 998 const int num_mgrs = omap_dss_get_num_overlay_managers();
1041 struct omap_overlay_manager *mgr; 999 struct omap_overlay_manager *mgr;
1042 struct mgr_priv_data *mp;
1043 int i; 1000 int i;
1044 int enabled_mgrs;
1045
1046 enabled_mgrs = 0;
1047 1001
1048 for (i = 0; i < num_mgrs; ++i) { 1002 for (i = 0; i < num_mgrs; ++i) {
1049 mgr = omap_dss_get_overlay_manager(i); 1003 mgr = omap_dss_get_overlay_manager(i);
1050 mp = get_mgr_priv(mgr); 1004 dss_mgr_setup_fifos(mgr);
1051
1052 if (!mp->enabled)
1053 continue;
1054
1055 enabled_mgrs++;
1056 } 1005 }
1057
1058 return enabled_mgrs;
1059}
1060
1061static int get_num_used_overlays(void)
1062{
1063 const int num_ovls = omap_dss_get_num_overlays();
1064 struct omap_overlay *ovl;
1065 struct ovl_priv_data *op;
1066 struct mgr_priv_data *mp;
1067 int i;
1068 int enabled_ovls;
1069
1070 enabled_ovls = 0;
1071
1072 for (i = 0; i < num_ovls; ++i) {
1073 ovl = omap_dss_get_overlay(i);
1074 op = get_ovl_priv(ovl);
1075
1076 if (!op->enabled && !op->enabling)
1077 continue;
1078
1079 mp = get_mgr_priv(ovl->manager);
1080
1081 if (!mp->enabled)
1082 continue;
1083
1084 enabled_ovls++;
1085 }
1086
1087 return enabled_ovls;
1088}
1089
1090static bool get_use_fifo_merge(void)
1091{
1092 int enabled_mgrs = get_num_used_managers();
1093 int enabled_ovls = get_num_used_overlays();
1094
1095 if (!dss_has_feature(FEAT_FIFO_MERGE))
1096 return false;
1097
1098 /*
1099 * In theory the only requirement for fifomerge is enabled_ovls <= 1.
1100 * However, if we have two managers enabled and set/unset the fifomerge,
1101 * we need to set the GO bits in particular sequence for the managers,
1102 * and wait in between.
1103 *
1104 * This is rather difficult as new apply calls can happen at any time,
1105 * so we simplify the problem by requiring also that enabled_mgrs <= 1.
1106 * In practice this shouldn't matter, because when only one overlay is
1107 * enabled, most likely only one output is enabled.
1108 */
1109
1110 return enabled_mgrs <= 1 && enabled_ovls <= 1;
1111} 1006}
1112 1007
1113int dss_mgr_enable(struct omap_overlay_manager *mgr) 1008int dss_mgr_enable(struct omap_overlay_manager *mgr)
@@ -1115,7 +1010,6 @@ int dss_mgr_enable(struct omap_overlay_manager *mgr)
1115 struct mgr_priv_data *mp = get_mgr_priv(mgr); 1010 struct mgr_priv_data *mp = get_mgr_priv(mgr);
1116 unsigned long flags; 1011 unsigned long flags;
1117 int r; 1012 int r;
1118 bool fifo_merge;
1119 1013
1120 mutex_lock(&apply_lock); 1014 mutex_lock(&apply_lock);
1121 1015
@@ -1133,23 +1027,11 @@ int dss_mgr_enable(struct omap_overlay_manager *mgr)
1133 goto err; 1027 goto err;
1134 } 1028 }
1135 1029
1136 /* step 1: setup fifos/fifomerge before enabling the manager */ 1030 dss_setup_fifos();
1137
1138 fifo_merge = get_use_fifo_merge();
1139 dss_setup_fifos(fifo_merge);
1140 dss_apply_fifo_merge(fifo_merge);
1141 1031
1142 dss_write_regs(); 1032 dss_write_regs();
1143 dss_set_go_bits(); 1033 dss_set_go_bits();
1144 1034
1145 spin_unlock_irqrestore(&data_lock, flags);
1146
1147 /* wait until fifo config is in */
1148 wait_pending_extra_info_updates();
1149
1150 /* step 2: enable the manager */
1151 spin_lock_irqsave(&data_lock, flags);
1152
1153 if (!mgr_manual_update(mgr)) 1035 if (!mgr_manual_update(mgr))
1154 mp->updating = true; 1036 mp->updating = true;
1155 1037
@@ -1174,7 +1056,6 @@ void dss_mgr_disable(struct omap_overlay_manager *mgr)
1174{ 1056{
1175 struct mgr_priv_data *mp = get_mgr_priv(mgr); 1057 struct mgr_priv_data *mp = get_mgr_priv(mgr);
1176 unsigned long flags; 1058 unsigned long flags;
1177 bool fifo_merge;
1178 1059
1179 mutex_lock(&apply_lock); 1060 mutex_lock(&apply_lock);
1180 1061
@@ -1189,16 +1070,8 @@ void dss_mgr_disable(struct omap_overlay_manager *mgr)
1189 mp->updating = false; 1070 mp->updating = false;
1190 mp->enabled = false; 1071 mp->enabled = false;
1191 1072
1192 fifo_merge = get_use_fifo_merge();
1193 dss_setup_fifos(fifo_merge);
1194 dss_apply_fifo_merge(fifo_merge);
1195
1196 dss_write_regs();
1197 dss_set_go_bits();
1198
1199 spin_unlock_irqrestore(&data_lock, flags); 1073 spin_unlock_irqrestore(&data_lock, flags);
1200 1074
1201 wait_pending_extra_info_updates();
1202out: 1075out:
1203 mutex_unlock(&apply_lock); 1076 mutex_unlock(&apply_lock);
1204} 1077}
@@ -1237,29 +1110,29 @@ void dss_mgr_get_info(struct omap_overlay_manager *mgr,
1237 spin_unlock_irqrestore(&data_lock, flags); 1110 spin_unlock_irqrestore(&data_lock, flags);
1238} 1111}
1239 1112
1240int dss_mgr_set_device(struct omap_overlay_manager *mgr, 1113int dss_mgr_set_output(struct omap_overlay_manager *mgr,
1241 struct omap_dss_device *dssdev) 1114 struct omap_dss_output *output)
1242{ 1115{
1243 int r; 1116 int r;
1244 1117
1245 mutex_lock(&apply_lock); 1118 mutex_lock(&apply_lock);
1246 1119
1247 if (dssdev->manager) { 1120 if (mgr->output) {
1248 DSSERR("display '%s' already has a manager '%s'\n", 1121 DSSERR("manager %s is already connected to an output\n",
1249 dssdev->name, dssdev->manager->name); 1122 mgr->name);
1250 r = -EINVAL; 1123 r = -EINVAL;
1251 goto err; 1124 goto err;
1252 } 1125 }
1253 1126
1254 if ((mgr->supported_displays & dssdev->type) == 0) { 1127 if ((mgr->supported_outputs & output->id) == 0) {
1255 DSSERR("display '%s' does not support manager '%s'\n", 1128 DSSERR("output does not support manager %s\n",
1256 dssdev->name, mgr->name); 1129 mgr->name);
1257 r = -EINVAL; 1130 r = -EINVAL;
1258 goto err; 1131 goto err;
1259 } 1132 }
1260 1133
1261 dssdev->manager = mgr; 1134 output->manager = mgr;
1262 mgr->device = dssdev; 1135 mgr->output = output;
1263 1136
1264 mutex_unlock(&apply_lock); 1137 mutex_unlock(&apply_lock);
1265 1138
@@ -1269,40 +1142,46 @@ err:
1269 return r; 1142 return r;
1270} 1143}
1271 1144
1272int dss_mgr_unset_device(struct omap_overlay_manager *mgr) 1145int dss_mgr_unset_output(struct omap_overlay_manager *mgr)
1273{ 1146{
1274 int r; 1147 int r;
1148 struct mgr_priv_data *mp = get_mgr_priv(mgr);
1149 unsigned long flags;
1275 1150
1276 mutex_lock(&apply_lock); 1151 mutex_lock(&apply_lock);
1277 1152
1278 if (!mgr->device) { 1153 if (!mgr->output) {
1279 DSSERR("failed to unset display, display not set.\n"); 1154 DSSERR("failed to unset output, output not set\n");
1280 r = -EINVAL; 1155 r = -EINVAL;
1281 goto err; 1156 goto err;
1282 } 1157 }
1283 1158
1284 /* 1159 spin_lock_irqsave(&data_lock, flags);
1285 * Don't allow currently enabled displays to have the overlay manager 1160
1286 * pulled out from underneath them 1161 if (mp->enabled) {
1287 */ 1162 DSSERR("output can't be unset when manager is enabled\n");
1288 if (mgr->device->state != OMAP_DSS_DISPLAY_DISABLED) {
1289 r = -EINVAL; 1163 r = -EINVAL;
1290 goto err; 1164 goto err1;
1291 } 1165 }
1292 1166
1293 mgr->device->manager = NULL; 1167 spin_unlock_irqrestore(&data_lock, flags);
1294 mgr->device = NULL; 1168
1169 mgr->output->manager = NULL;
1170 mgr->output = NULL;
1295 1171
1296 mutex_unlock(&apply_lock); 1172 mutex_unlock(&apply_lock);
1297 1173
1298 return 0; 1174 return 0;
1175err1:
1176 spin_unlock_irqrestore(&data_lock, flags);
1299err: 1177err:
1300 mutex_unlock(&apply_lock); 1178 mutex_unlock(&apply_lock);
1179
1301 return r; 1180 return r;
1302} 1181}
1303 1182
1304static void dss_apply_mgr_timings(struct omap_overlay_manager *mgr, 1183static void dss_apply_mgr_timings(struct omap_overlay_manager *mgr,
1305 struct omap_video_timings *timings) 1184 const struct omap_video_timings *timings)
1306{ 1185{
1307 struct mgr_priv_data *mp = get_mgr_priv(mgr); 1186 struct mgr_priv_data *mp = get_mgr_priv(mgr);
1308 1187
@@ -1311,24 +1190,22 @@ static void dss_apply_mgr_timings(struct omap_overlay_manager *mgr,
1311} 1190}
1312 1191
1313void dss_mgr_set_timings(struct omap_overlay_manager *mgr, 1192void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
1314 struct omap_video_timings *timings) 1193 const struct omap_video_timings *timings)
1315{ 1194{
1316 unsigned long flags; 1195 unsigned long flags;
1317 1196 struct mgr_priv_data *mp = get_mgr_priv(mgr);
1318 mutex_lock(&apply_lock);
1319 1197
1320 spin_lock_irqsave(&data_lock, flags); 1198 spin_lock_irqsave(&data_lock, flags);
1321 1199
1322 dss_apply_mgr_timings(mgr, timings); 1200 if (mp->updating) {
1323 1201 DSSERR("cannot set timings for %s: manager needs to be disabled\n",
1324 dss_write_regs(); 1202 mgr->name);
1325 dss_set_go_bits(); 1203 goto out;
1204 }
1326 1205
1206 dss_apply_mgr_timings(mgr, timings);
1207out:
1327 spin_unlock_irqrestore(&data_lock, flags); 1208 spin_unlock_irqrestore(&data_lock, flags);
1328
1329 wait_pending_extra_info_updates();
1330
1331 mutex_unlock(&apply_lock);
1332} 1209}
1333 1210
1334static void dss_apply_mgr_lcd_config(struct omap_overlay_manager *mgr, 1211static void dss_apply_mgr_lcd_config(struct omap_overlay_manager *mgr,
@@ -1346,7 +1223,7 @@ void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr,
1346 unsigned long flags; 1223 unsigned long flags;
1347 struct mgr_priv_data *mp = get_mgr_priv(mgr); 1224 struct mgr_priv_data *mp = get_mgr_priv(mgr);
1348 1225
1349 mutex_lock(&apply_lock); 1226 spin_lock_irqsave(&data_lock, flags);
1350 1227
1351 if (mp->enabled) { 1228 if (mp->enabled) {
1352 DSSERR("cannot apply lcd config for %s: manager needs to be disabled\n", 1229 DSSERR("cannot apply lcd config for %s: manager needs to be disabled\n",
@@ -1354,19 +1231,9 @@ void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr,
1354 goto out; 1231 goto out;
1355 } 1232 }
1356 1233
1357 spin_lock_irqsave(&data_lock, flags);
1358
1359 dss_apply_mgr_lcd_config(mgr, config); 1234 dss_apply_mgr_lcd_config(mgr, config);
1360
1361 dss_write_regs();
1362 dss_set_go_bits();
1363
1364 spin_unlock_irqrestore(&data_lock, flags);
1365
1366 wait_pending_extra_info_updates();
1367
1368out: 1235out:
1369 mutex_unlock(&apply_lock); 1236 spin_unlock_irqrestore(&data_lock, flags);
1370} 1237}
1371 1238
1372int dss_ovl_set_info(struct omap_overlay *ovl, 1239int dss_ovl_set_info(struct omap_overlay *ovl,
@@ -1483,6 +1350,13 @@ int dss_ovl_unset_manager(struct omap_overlay *ovl)
1483 goto err; 1350 goto err;
1484 } 1351 }
1485 1352
1353 spin_unlock_irqrestore(&data_lock, flags);
1354
1355 /* wait for pending extra_info updates to ensure the ovl is disabled */
1356 wait_pending_extra_info_updates();
1357
1358 spin_lock_irqsave(&data_lock, flags);
1359
1486 op->channel = -1; 1360 op->channel = -1;
1487 1361
1488 ovl->manager = NULL; 1362 ovl->manager = NULL;
@@ -1517,7 +1391,6 @@ int dss_ovl_enable(struct omap_overlay *ovl)
1517{ 1391{
1518 struct ovl_priv_data *op = get_ovl_priv(ovl); 1392 struct ovl_priv_data *op = get_ovl_priv(ovl);
1519 unsigned long flags; 1393 unsigned long flags;
1520 bool fifo_merge;
1521 int r; 1394 int r;
1522 1395
1523 mutex_lock(&apply_lock); 1396 mutex_lock(&apply_lock);
@@ -1527,7 +1400,7 @@ int dss_ovl_enable(struct omap_overlay *ovl)
1527 goto err1; 1400 goto err1;
1528 } 1401 }
1529 1402
1530 if (ovl->manager == NULL || ovl->manager->device == NULL) { 1403 if (ovl->manager == NULL || ovl->manager->output == NULL) {
1531 r = -EINVAL; 1404 r = -EINVAL;
1532 goto err1; 1405 goto err1;
1533 } 1406 }
@@ -1543,22 +1416,7 @@ int dss_ovl_enable(struct omap_overlay *ovl)
1543 goto err2; 1416 goto err2;
1544 } 1417 }
1545 1418
1546 /* step 1: configure fifos/fifomerge for currently enabled ovls */ 1419 dss_setup_fifos();
1547
1548 fifo_merge = get_use_fifo_merge();
1549 dss_setup_fifos(fifo_merge);
1550 dss_apply_fifo_merge(fifo_merge);
1551
1552 dss_write_regs();
1553 dss_set_go_bits();
1554
1555 spin_unlock_irqrestore(&data_lock, flags);
1556
1557 /* wait for fifo configs to go in */
1558 wait_pending_extra_info_updates();
1559
1560 /* step 2: enable the overlay */
1561 spin_lock_irqsave(&data_lock, flags);
1562 1420
1563 op->enabling = false; 1421 op->enabling = false;
1564 dss_apply_ovl_enable(ovl, true); 1422 dss_apply_ovl_enable(ovl, true);
@@ -1568,9 +1426,6 @@ int dss_ovl_enable(struct omap_overlay *ovl)
1568 1426
1569 spin_unlock_irqrestore(&data_lock, flags); 1427 spin_unlock_irqrestore(&data_lock, flags);
1570 1428
1571 /* wait for overlay to be enabled */
1572 wait_pending_extra_info_updates();
1573
1574 mutex_unlock(&apply_lock); 1429 mutex_unlock(&apply_lock);
1575 1430
1576 return 0; 1431 return 0;
@@ -1586,7 +1441,6 @@ int dss_ovl_disable(struct omap_overlay *ovl)
1586{ 1441{
1587 struct ovl_priv_data *op = get_ovl_priv(ovl); 1442 struct ovl_priv_data *op = get_ovl_priv(ovl);
1588 unsigned long flags; 1443 unsigned long flags;
1589 bool fifo_merge;
1590 int r; 1444 int r;
1591 1445
1592 mutex_lock(&apply_lock); 1446 mutex_lock(&apply_lock);
@@ -1596,39 +1450,19 @@ int dss_ovl_disable(struct omap_overlay *ovl)
1596 goto err; 1450 goto err;
1597 } 1451 }
1598 1452
1599 if (ovl->manager == NULL || ovl->manager->device == NULL) { 1453 if (ovl->manager == NULL || ovl->manager->output == NULL) {
1600 r = -EINVAL; 1454 r = -EINVAL;
1601 goto err; 1455 goto err;
1602 } 1456 }
1603 1457
1604 /* step 1: disable the overlay */
1605 spin_lock_irqsave(&data_lock, flags); 1458 spin_lock_irqsave(&data_lock, flags);
1606 1459
1607 dss_apply_ovl_enable(ovl, false); 1460 dss_apply_ovl_enable(ovl, false);
1608
1609 dss_write_regs(); 1461 dss_write_regs();
1610 dss_set_go_bits(); 1462 dss_set_go_bits();
1611 1463
1612 spin_unlock_irqrestore(&data_lock, flags); 1464 spin_unlock_irqrestore(&data_lock, flags);
1613 1465
1614 /* wait for the overlay to be disabled */
1615 wait_pending_extra_info_updates();
1616
1617 /* step 2: configure fifos/fifomerge */
1618 spin_lock_irqsave(&data_lock, flags);
1619
1620 fifo_merge = get_use_fifo_merge();
1621 dss_setup_fifos(fifo_merge);
1622 dss_apply_fifo_merge(fifo_merge);
1623
1624 dss_write_regs();
1625 dss_set_go_bits();
1626
1627 spin_unlock_irqrestore(&data_lock, flags);
1628
1629 /* wait for fifo config to go in */
1630 wait_pending_extra_info_updates();
1631
1632 mutex_unlock(&apply_lock); 1466 mutex_unlock(&apply_lock);
1633 1467
1634 return 0; 1468 return 0;
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 58bd9c27369d..b2af72dc20bd 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -33,6 +33,7 @@
33#include <linux/device.h> 33#include <linux/device.h>
34#include <linux/regulator/consumer.h> 34#include <linux/regulator/consumer.h>
35#include <linux/suspend.h> 35#include <linux/suspend.h>
36#include <linux/slab.h>
36 37
37#include <video/omapdss.h> 38#include <video/omapdss.h>
38 39
@@ -57,6 +58,11 @@ bool dss_debug;
57module_param_named(debug, dss_debug, bool, 0644); 58module_param_named(debug, dss_debug, bool, 0644);
58#endif 59#endif
59 60
61const char *dss_get_default_display_name(void)
62{
63 return core.default_display_name;
64}
65
60/* REGULATORS */ 66/* REGULATORS */
61 67
62struct regulator *dss_get_vdds_dsi(void) 68struct regulator *dss_get_vdds_dsi(void)
@@ -347,17 +353,14 @@ static int dss_driver_probe(struct device *dev)
347 int r; 353 int r;
348 struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver); 354 struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver);
349 struct omap_dss_device *dssdev = to_dss_device(dev); 355 struct omap_dss_device *dssdev = to_dss_device(dev);
350 bool force;
351 356
352 DSSDBG("driver_probe: dev %s/%s, drv %s\n", 357 DSSDBG("driver_probe: dev %s/%s, drv %s\n",
353 dev_name(dev), dssdev->driver_name, 358 dev_name(dev), dssdev->driver_name,
354 dssdrv->driver.name); 359 dssdrv->driver.name);
355 360
356 dss_init_device(core.pdev, dssdev); 361 r = dss_init_device(core.pdev, dssdev);
357 362 if (r)
358 force = core.default_display_name && 363 return r;
359 strcmp(core.default_display_name, dssdev->name) == 0;
360 dss_recheck_connections(dssdev, force);
361 364
362 r = dssdrv->probe(dssdev); 365 r = dssdrv->probe(dssdev);
363 366
@@ -416,54 +419,44 @@ void omap_dss_unregister_driver(struct omap_dss_driver *dssdriver)
416EXPORT_SYMBOL(omap_dss_unregister_driver); 419EXPORT_SYMBOL(omap_dss_unregister_driver);
417 420
418/* DEVICE */ 421/* DEVICE */
419static void reset_device(struct device *dev, int check)
420{
421 u8 *dev_p = (u8 *)dev;
422 u8 *dev_end = dev_p + sizeof(*dev);
423 void *saved_pdata;
424
425 saved_pdata = dev->platform_data;
426 if (check) {
427 /*
428 * Check if there is any other setting than platform_data
429 * in struct device; warn that these will be reset by our
430 * init.
431 */
432 dev->platform_data = NULL;
433 while (dev_p < dev_end) {
434 if (*dev_p) {
435 WARN("%s: struct device fields will be "
436 "discarded\n",
437 __func__);
438 break;
439 }
440 dev_p++;
441 }
442 }
443 memset(dev, 0, sizeof(*dev));
444 dev->platform_data = saved_pdata;
445}
446
447 422
448static void omap_dss_dev_release(struct device *dev) 423static void omap_dss_dev_release(struct device *dev)
449{ 424{
450 reset_device(dev, 0); 425 struct omap_dss_device *dssdev = to_dss_device(dev);
426 kfree(dssdev);
451} 427}
452 428
453int omap_dss_register_device(struct omap_dss_device *dssdev, 429static int disp_num_counter;
454 struct device *parent, int disp_num) 430
431struct omap_dss_device *dss_alloc_and_init_device(struct device *parent)
455{ 432{
456 WARN_ON(!dssdev->driver_name); 433 struct omap_dss_device *dssdev;
434
435 dssdev = kzalloc(sizeof(*dssdev), GFP_KERNEL);
436 if (!dssdev)
437 return NULL;
457 438
458 reset_device(&dssdev->dev, 1);
459 dssdev->dev.bus = &dss_bus_type; 439 dssdev->dev.bus = &dss_bus_type;
460 dssdev->dev.parent = parent; 440 dssdev->dev.parent = parent;
461 dssdev->dev.release = omap_dss_dev_release; 441 dssdev->dev.release = omap_dss_dev_release;
462 dev_set_name(&dssdev->dev, "display%d", disp_num); 442 dev_set_name(&dssdev->dev, "display%d", disp_num_counter++);
463 return device_register(&dssdev->dev); 443
444 device_initialize(&dssdev->dev);
445
446 return dssdev;
447}
448
449int dss_add_device(struct omap_dss_device *dssdev)
450{
451 return device_add(&dssdev->dev);
452}
453
454void dss_put_device(struct omap_dss_device *dssdev)
455{
456 put_device(&dssdev->dev);
464} 457}
465 458
466void omap_dss_unregister_device(struct omap_dss_device *dssdev) 459void dss_unregister_device(struct omap_dss_device *dssdev)
467{ 460{
468 device_unregister(&dssdev->dev); 461 device_unregister(&dssdev->dev);
469} 462}
@@ -471,15 +464,25 @@ void omap_dss_unregister_device(struct omap_dss_device *dssdev)
471static int dss_unregister_dss_dev(struct device *dev, void *data) 464static int dss_unregister_dss_dev(struct device *dev, void *data)
472{ 465{
473 struct omap_dss_device *dssdev = to_dss_device(dev); 466 struct omap_dss_device *dssdev = to_dss_device(dev);
474 omap_dss_unregister_device(dssdev); 467 dss_unregister_device(dssdev);
475 return 0; 468 return 0;
476} 469}
477 470
478void omap_dss_unregister_child_devices(struct device *parent) 471void dss_unregister_child_devices(struct device *parent)
479{ 472{
480 device_for_each_child(parent, NULL, dss_unregister_dss_dev); 473 device_for_each_child(parent, NULL, dss_unregister_dss_dev);
481} 474}
482 475
476void dss_copy_device_pdata(struct omap_dss_device *dst,
477 const struct omap_dss_device *src)
478{
479 u8 *d = (u8 *)dst;
480 u8 *s = (u8 *)src;
481 size_t dsize = sizeof(struct device);
482
483 memcpy(d + dsize, s + dsize, sizeof(struct omap_dss_device) - dsize);
484}
485
483/* BUS */ 486/* BUS */
484static int __init omap_dss_bus_register(void) 487static int __init omap_dss_bus_register(void)
485{ 488{
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index ee9e29639dcc..b43477a5fae8 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -38,7 +38,6 @@
38#include <linux/pm_runtime.h> 38#include <linux/pm_runtime.h>
39 39
40#include <plat/cpu.h> 40#include <plat/cpu.h>
41#include <plat/clock.h>
42 41
43#include <video/omapdss.h> 42#include <video/omapdss.h>
44 43
@@ -82,6 +81,30 @@ struct dispc_irq_stats {
82 unsigned irqs[32]; 81 unsigned irqs[32];
83}; 82};
84 83
84struct dispc_features {
85 u8 sw_start;
86 u8 fp_start;
87 u8 bp_start;
88 u16 sw_max;
89 u16 vp_max;
90 u16 hp_max;
91 int (*calc_scaling) (enum omap_plane plane,
92 const struct omap_video_timings *mgr_timings,
93 u16 width, u16 height, u16 out_width, u16 out_height,
94 enum omap_color_mode color_mode, bool *five_taps,
95 int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
96 u16 pos_x, unsigned long *core_clk, bool mem_to_mem);
97 unsigned long (*calc_core_clk) (enum omap_plane plane,
98 u16 width, u16 height, u16 out_width, u16 out_height,
99 bool mem_to_mem);
100 u8 num_fifos;
101
102 /* swap GFX & WB fifos */
103 bool gfx_fifo_workaround:1;
104};
105
106#define DISPC_MAX_NR_FIFOS 5
107
85static struct { 108static struct {
86 struct platform_device *pdev; 109 struct platform_device *pdev;
87 void __iomem *base; 110 void __iomem *base;
@@ -91,7 +114,9 @@ static struct {
91 int irq; 114 int irq;
92 struct clk *dss_clk; 115 struct clk *dss_clk;
93 116
94 u32 fifo_size[MAX_DSS_OVERLAYS]; 117 u32 fifo_size[DISPC_MAX_NR_FIFOS];
118 /* maps which plane is using a fifo. fifo-id -> plane-id */
119 int fifo_assignment[DISPC_MAX_NR_FIFOS];
95 120
96 spinlock_t irq_lock; 121 spinlock_t irq_lock;
97 u32 irq_error_mask; 122 u32 irq_error_mask;
@@ -102,6 +127,8 @@ static struct {
102 bool ctx_valid; 127 bool ctx_valid;
103 u32 ctx[DISPC_SZ_REGS / sizeof(u32)]; 128 u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
104 129
130 const struct dispc_features *feat;
131
105#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 132#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
106 spinlock_t irq_stats_lock; 133 spinlock_t irq_stats_lock;
107 struct dispc_irq_stats irq_stats; 134 struct dispc_irq_stats irq_stats;
@@ -211,7 +238,14 @@ static const struct {
211 }, 238 },
212}; 239};
213 240
241struct color_conv_coef {
242 int ry, rcr, rcb, gy, gcr, gcb, by, bcr, bcb;
243 int full_range;
244};
245
214static void _omap_dispc_set_irqs(void); 246static void _omap_dispc_set_irqs(void);
247static unsigned long dispc_plane_pclk_rate(enum omap_plane plane);
248static unsigned long dispc_plane_lclk_rate(enum omap_plane plane);
215 249
216static inline void dispc_write_reg(const u16 idx, u32 val) 250static inline void dispc_write_reg(const u16 idx, u32 val)
217{ 251{
@@ -509,6 +543,11 @@ u32 dispc_mgr_get_framedone_irq(enum omap_channel channel)
509 return mgr_desc[channel].framedone_irq; 543 return mgr_desc[channel].framedone_irq;
510} 544}
511 545
546u32 dispc_wb_get_framedone_irq(void)
547{
548 return DISPC_IRQ_FRAMEDONEWB;
549}
550
512bool dispc_mgr_go_busy(enum omap_channel channel) 551bool dispc_mgr_go_busy(enum omap_channel channel)
513{ 552{
514 return mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1; 553 return mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1;
@@ -536,6 +575,30 @@ void dispc_mgr_go(enum omap_channel channel)
536 mgr_fld_write(channel, DISPC_MGR_FLD_GO, 1); 575 mgr_fld_write(channel, DISPC_MGR_FLD_GO, 1);
537} 576}
538 577
578bool dispc_wb_go_busy(void)
579{
580 return REG_GET(DISPC_CONTROL2, 6, 6) == 1;
581}
582
583void dispc_wb_go(void)
584{
585 enum omap_plane plane = OMAP_DSS_WB;
586 bool enable, go;
587
588 enable = REG_GET(DISPC_OVL_ATTRIBUTES(plane), 0, 0) == 1;
589
590 if (!enable)
591 return;
592
593 go = REG_GET(DISPC_CONTROL2, 6, 6) == 1;
594 if (go) {
595 DSSERR("GO bit not down for WB\n");
596 return;
597 }
598
599 REG_FLD_MOD(DISPC_CONTROL2, 1, 6, 6);
600}
601
539static void dispc_ovl_write_firh_reg(enum omap_plane plane, int reg, u32 value) 602static void dispc_ovl_write_firh_reg(enum omap_plane plane, int reg, u32 value)
540{ 603{
541 dispc_write_reg(DISPC_OVL_FIR_COEF_H(plane, reg), value); 604 dispc_write_reg(DISPC_OVL_FIR_COEF_H(plane, reg), value);
@@ -618,41 +681,41 @@ static void dispc_ovl_set_scale_coef(enum omap_plane plane, int fir_hinc,
618 } 681 }
619} 682}
620 683
621static void _dispc_setup_color_conv_coef(void)
622{
623 int i;
624 const struct color_conv_coef {
625 int ry, rcr, rcb, gy, gcr, gcb, by, bcr, bcb;
626 int full_range;
627 } ctbl_bt601_5 = {
628 298, 409, 0, 298, -208, -100, 298, 0, 517, 0,
629 };
630
631 const struct color_conv_coef *ct;
632 684
685static void dispc_ovl_write_color_conv_coef(enum omap_plane plane,
686 const struct color_conv_coef *ct)
687{
633#define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0)) 688#define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0))
634 689
635 ct = &ctbl_bt601_5; 690 dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 0), CVAL(ct->rcr, ct->ry));
691 dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 1), CVAL(ct->gy, ct->rcb));
692 dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 2), CVAL(ct->gcb, ct->gcr));
693 dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 3), CVAL(ct->bcr, ct->by));
694 dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 4), CVAL(0, ct->bcb));
636 695
637 for (i = 1; i < dss_feat_get_num_ovls(); i++) { 696 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), ct->full_range, 11, 11);
638 dispc_write_reg(DISPC_OVL_CONV_COEF(i, 0),
639 CVAL(ct->rcr, ct->ry));
640 dispc_write_reg(DISPC_OVL_CONV_COEF(i, 1),
641 CVAL(ct->gy, ct->rcb));
642 dispc_write_reg(DISPC_OVL_CONV_COEF(i, 2),
643 CVAL(ct->gcb, ct->gcr));
644 dispc_write_reg(DISPC_OVL_CONV_COEF(i, 3),
645 CVAL(ct->bcr, ct->by));
646 dispc_write_reg(DISPC_OVL_CONV_COEF(i, 4),
647 CVAL(0, ct->bcb));
648
649 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(i), ct->full_range,
650 11, 11);
651 }
652 697
653#undef CVAL 698#undef CVAL
654} 699}
655 700
701static void dispc_setup_color_conv_coef(void)
702{
703 int i;
704 int num_ovl = dss_feat_get_num_ovls();
705 int num_wb = dss_feat_get_num_wbs();
706 const struct color_conv_coef ctbl_bt601_5_ovl = {
707 298, 409, 0, 298, -208, -100, 298, 0, 517, 0,
708 };
709 const struct color_conv_coef ctbl_bt601_5_wb = {
710 66, 112, -38, 129, -94, -74, 25, -18, 112, 0,
711 };
712
713 for (i = 1; i < num_ovl; i++)
714 dispc_ovl_write_color_conv_coef(i, &ctbl_bt601_5_ovl);
715
716 for (; i < num_wb; i++)
717 dispc_ovl_write_color_conv_coef(i, &ctbl_bt601_5_wb);
718}
656 719
657static void dispc_ovl_set_ba0(enum omap_plane plane, u32 paddr) 720static void dispc_ovl_set_ba0(enum omap_plane plane, u32 paddr)
658{ 721{
@@ -674,24 +737,32 @@ static void dispc_ovl_set_ba1_uv(enum omap_plane plane, u32 paddr)
674 dispc_write_reg(DISPC_OVL_BA1_UV(plane), paddr); 737 dispc_write_reg(DISPC_OVL_BA1_UV(plane), paddr);
675} 738}
676 739
677static void dispc_ovl_set_pos(enum omap_plane plane, int x, int y) 740static void dispc_ovl_set_pos(enum omap_plane plane,
741 enum omap_overlay_caps caps, int x, int y)
678{ 742{
679 u32 val = FLD_VAL(y, 26, 16) | FLD_VAL(x, 10, 0); 743 u32 val;
744
745 if ((caps & OMAP_DSS_OVL_CAP_POS) == 0)
746 return;
747
748 val = FLD_VAL(y, 26, 16) | FLD_VAL(x, 10, 0);
680 749
681 dispc_write_reg(DISPC_OVL_POSITION(plane), val); 750 dispc_write_reg(DISPC_OVL_POSITION(plane), val);
682} 751}
683 752
684static void dispc_ovl_set_pic_size(enum omap_plane plane, int width, int height) 753static void dispc_ovl_set_input_size(enum omap_plane plane, int width,
754 int height)
685{ 755{
686 u32 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); 756 u32 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
687 757
688 if (plane == OMAP_DSS_GFX) 758 if (plane == OMAP_DSS_GFX || plane == OMAP_DSS_WB)
689 dispc_write_reg(DISPC_OVL_SIZE(plane), val); 759 dispc_write_reg(DISPC_OVL_SIZE(plane), val);
690 else 760 else
691 dispc_write_reg(DISPC_OVL_PICTURE_SIZE(plane), val); 761 dispc_write_reg(DISPC_OVL_PICTURE_SIZE(plane), val);
692} 762}
693 763
694static void dispc_ovl_set_vid_size(enum omap_plane plane, int width, int height) 764static void dispc_ovl_set_output_size(enum omap_plane plane, int width,
765 int height)
695{ 766{
696 u32 val; 767 u32 val;
697 768
@@ -699,14 +770,16 @@ static void dispc_ovl_set_vid_size(enum omap_plane plane, int width, int height)
699 770
700 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); 771 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
701 772
702 dispc_write_reg(DISPC_OVL_SIZE(plane), val); 773 if (plane == OMAP_DSS_WB)
774 dispc_write_reg(DISPC_OVL_PICTURE_SIZE(plane), val);
775 else
776 dispc_write_reg(DISPC_OVL_SIZE(plane), val);
703} 777}
704 778
705static void dispc_ovl_set_zorder(enum omap_plane plane, u8 zorder) 779static void dispc_ovl_set_zorder(enum omap_plane plane,
780 enum omap_overlay_caps caps, u8 zorder)
706{ 781{
707 struct omap_overlay *ovl = omap_dss_get_overlay(plane); 782 if ((caps & OMAP_DSS_OVL_CAP_ZORDER) == 0)
708
709 if ((ovl->caps & OMAP_DSS_OVL_CAP_ZORDER) == 0)
710 return; 783 return;
711 784
712 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), zorder, 27, 26); 785 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), zorder, 27, 26);
@@ -723,23 +796,22 @@ static void dispc_ovl_enable_zorder_planes(void)
723 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(i), 1, 25, 25); 796 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(i), 1, 25, 25);
724} 797}
725 798
726static void dispc_ovl_set_pre_mult_alpha(enum omap_plane plane, bool enable) 799static void dispc_ovl_set_pre_mult_alpha(enum omap_plane plane,
800 enum omap_overlay_caps caps, bool enable)
727{ 801{
728 struct omap_overlay *ovl = omap_dss_get_overlay(plane); 802 if ((caps & OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA) == 0)
729
730 if ((ovl->caps & OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA) == 0)
731 return; 803 return;
732 804
733 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 28, 28); 805 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 28, 28);
734} 806}
735 807
736static void dispc_ovl_setup_global_alpha(enum omap_plane plane, u8 global_alpha) 808static void dispc_ovl_setup_global_alpha(enum omap_plane plane,
809 enum omap_overlay_caps caps, u8 global_alpha)
737{ 810{
738 static const unsigned shifts[] = { 0, 8, 16, 24, }; 811 static const unsigned shifts[] = { 0, 8, 16, 24, };
739 int shift; 812 int shift;
740 struct omap_overlay *ovl = omap_dss_get_overlay(plane);
741 813
742 if ((ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0) 814 if ((caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0)
743 return; 815 return;
744 816
745 shift = shifts[plane]; 817 shift = shifts[plane];
@@ -947,10 +1019,17 @@ static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane plane)
947 return channel; 1019 return channel;
948} 1020}
949 1021
1022void dispc_wb_set_channel_in(enum dss_writeback_channel channel)
1023{
1024 enum omap_plane plane = OMAP_DSS_WB;
1025
1026 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), channel, 18, 16);
1027}
1028
950static void dispc_ovl_set_burst_size(enum omap_plane plane, 1029static void dispc_ovl_set_burst_size(enum omap_plane plane,
951 enum omap_burst_size burst_size) 1030 enum omap_burst_size burst_size)
952{ 1031{
953 static const unsigned shifts[] = { 6, 14, 14, 14, }; 1032 static const unsigned shifts[] = { 6, 14, 14, 14, 14, };
954 int shift; 1033 int shift;
955 1034
956 shift = shifts[plane]; 1035 shift = shifts[plane];
@@ -1027,11 +1106,15 @@ static void dispc_ovl_set_vid_color_conv(enum omap_plane plane, bool enable)
1027 dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val); 1106 dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
1028} 1107}
1029 1108
1030static void dispc_ovl_enable_replication(enum omap_plane plane, bool enable) 1109static void dispc_ovl_enable_replication(enum omap_plane plane,
1110 enum omap_overlay_caps caps, bool enable)
1031{ 1111{
1032 static const unsigned shifts[] = { 5, 10, 10, 10 }; 1112 static const unsigned shifts[] = { 5, 10, 10, 10 };
1033 int shift; 1113 int shift;
1034 1114
1115 if ((caps & OMAP_DSS_OVL_CAP_REPLICATION) == 0)
1116 return;
1117
1035 shift = shifts[plane]; 1118 shift = shifts[plane];
1036 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, shift, shift); 1119 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, shift, shift);
1037} 1120}
@@ -1045,10 +1128,10 @@ static void dispc_mgr_set_size(enum omap_channel channel, u16 width,
1045 dispc_write_reg(DISPC_SIZE_MGR(channel), val); 1128 dispc_write_reg(DISPC_SIZE_MGR(channel), val);
1046} 1129}
1047 1130
1048static void dispc_read_plane_fifo_sizes(void) 1131static void dispc_init_fifos(void)
1049{ 1132{
1050 u32 size; 1133 u32 size;
1051 int plane; 1134 int fifo;
1052 u8 start, end; 1135 u8 start, end;
1053 u32 unit; 1136 u32 unit;
1054 1137
@@ -1056,16 +1139,53 @@ static void dispc_read_plane_fifo_sizes(void)
1056 1139
1057 dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end); 1140 dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end);
1058 1141
1059 for (plane = 0; plane < dss_feat_get_num_ovls(); ++plane) { 1142 for (fifo = 0; fifo < dispc.feat->num_fifos; ++fifo) {
1060 size = REG_GET(DISPC_OVL_FIFO_SIZE_STATUS(plane), start, end); 1143 size = REG_GET(DISPC_OVL_FIFO_SIZE_STATUS(fifo), start, end);
1061 size *= unit; 1144 size *= unit;
1062 dispc.fifo_size[plane] = size; 1145 dispc.fifo_size[fifo] = size;
1146
1147 /*
1148 * By default fifos are mapped directly to overlays, fifo 0 to
1149 * ovl 0, fifo 1 to ovl 1, etc.
1150 */
1151 dispc.fifo_assignment[fifo] = fifo;
1152 }
1153
1154 /*
1155 * The GFX fifo on OMAP4 is smaller than the other fifos. The small fifo
1156 * causes problems with certain use cases, like using the tiler in 2D
1157 * mode. The below hack swaps the fifos of GFX and WB planes, thus
1158 * giving GFX plane a larger fifo. WB but should work fine with a
1159 * smaller fifo.
1160 */
1161 if (dispc.feat->gfx_fifo_workaround) {
1162 u32 v;
1163
1164 v = dispc_read_reg(DISPC_GLOBAL_BUFFER);
1165
1166 v = FLD_MOD(v, 4, 2, 0); /* GFX BUF top to WB */
1167 v = FLD_MOD(v, 4, 5, 3); /* GFX BUF bottom to WB */
1168 v = FLD_MOD(v, 0, 26, 24); /* WB BUF top to GFX */
1169 v = FLD_MOD(v, 0, 29, 27); /* WB BUF bottom to GFX */
1170
1171 dispc_write_reg(DISPC_GLOBAL_BUFFER, v);
1172
1173 dispc.fifo_assignment[OMAP_DSS_GFX] = OMAP_DSS_WB;
1174 dispc.fifo_assignment[OMAP_DSS_WB] = OMAP_DSS_GFX;
1063 } 1175 }
1064} 1176}
1065 1177
1066static u32 dispc_ovl_get_fifo_size(enum omap_plane plane) 1178static u32 dispc_ovl_get_fifo_size(enum omap_plane plane)
1067{ 1179{
1068 return dispc.fifo_size[plane]; 1180 int fifo;
1181 u32 size = 0;
1182
1183 for (fifo = 0; fifo < dispc.feat->num_fifos; ++fifo) {
1184 if (dispc.fifo_assignment[fifo] == plane)
1185 size += dispc.fifo_size[fifo];
1186 }
1187
1188 return size;
1069} 1189}
1070 1190
1071void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high) 1191void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high)
@@ -1141,6 +1261,14 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
1141 if (manual_update && dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) { 1261 if (manual_update && dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) {
1142 *fifo_low = ovl_fifo_size - burst_size * 2; 1262 *fifo_low = ovl_fifo_size - burst_size * 2;
1143 *fifo_high = total_fifo_size - burst_size; 1263 *fifo_high = total_fifo_size - burst_size;
1264 } else if (plane == OMAP_DSS_WB) {
1265 /*
1266 * Most optimal configuration for writeback is to push out data
1267 * to the interconnect the moment writeback pushes enough pixels
1268 * in the FIFO to form a burst
1269 */
1270 *fifo_low = 0;
1271 *fifo_high = burst_size;
1144 } else { 1272 } else {
1145 *fifo_low = ovl_fifo_size - burst_size; 1273 *fifo_low = ovl_fifo_size - burst_size;
1146 *fifo_high = total_fifo_size - buf_unit; 1274 *fifo_high = total_fifo_size - buf_unit;
@@ -1383,6 +1511,7 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
1383{ 1511{
1384 int scale_x = out_width != orig_width; 1512 int scale_x = out_width != orig_width;
1385 int scale_y = out_height != orig_height; 1513 int scale_y = out_height != orig_height;
1514 bool chroma_upscale = plane != OMAP_DSS_WB ? true : false;
1386 1515
1387 if (!dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) 1516 if (!dss_has_feature(FEAT_HANDLE_UV_SEPARATE))
1388 return; 1517 return;
@@ -1390,7 +1519,8 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
1390 color_mode != OMAP_DSS_COLOR_UYVY && 1519 color_mode != OMAP_DSS_COLOR_UYVY &&
1391 color_mode != OMAP_DSS_COLOR_NV12)) { 1520 color_mode != OMAP_DSS_COLOR_NV12)) {
1392 /* reset chroma resampling for RGB formats */ 1521 /* reset chroma resampling for RGB formats */
1393 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 8, 8); 1522 if (plane != OMAP_DSS_WB)
1523 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 8, 8);
1394 return; 1524 return;
1395 } 1525 }
1396 1526
@@ -1399,23 +1529,34 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
1399 1529
1400 switch (color_mode) { 1530 switch (color_mode) {
1401 case OMAP_DSS_COLOR_NV12: 1531 case OMAP_DSS_COLOR_NV12:
1402 /* UV is subsampled by 2 vertically*/ 1532 if (chroma_upscale) {
1403 orig_height >>= 1; 1533 /* UV is subsampled by 2 horizontally and vertically */
1404 /* UV is subsampled by 2 horz.*/ 1534 orig_height >>= 1;
1405 orig_width >>= 1; 1535 orig_width >>= 1;
1536 } else {
1537 /* UV is downsampled by 2 horizontally and vertically */
1538 orig_height <<= 1;
1539 orig_width <<= 1;
1540 }
1541
1406 break; 1542 break;
1407 case OMAP_DSS_COLOR_YUV2: 1543 case OMAP_DSS_COLOR_YUV2:
1408 case OMAP_DSS_COLOR_UYVY: 1544 case OMAP_DSS_COLOR_UYVY:
1409 /*For YUV422 with 90/270 rotation, 1545 /* For YUV422 with 90/270 rotation, we don't upsample chroma */
1410 *we don't upsample chroma
1411 */
1412 if (rotation == OMAP_DSS_ROT_0 || 1546 if (rotation == OMAP_DSS_ROT_0 ||
1413 rotation == OMAP_DSS_ROT_180) 1547 rotation == OMAP_DSS_ROT_180) {
1414 /* UV is subsampled by 2 hrz*/ 1548 if (chroma_upscale)
1415 orig_width >>= 1; 1549 /* UV is subsampled by 2 horizontally */
1550 orig_width >>= 1;
1551 else
1552 /* UV is downsampled by 2 horizontally */
1553 orig_width <<= 1;
1554 }
1555
1416 /* must use FIR for YUV422 if rotated */ 1556 /* must use FIR for YUV422 if rotated */
1417 if (rotation != OMAP_DSS_ROT_0) 1557 if (rotation != OMAP_DSS_ROT_0)
1418 scale_x = scale_y = true; 1558 scale_x = scale_y = true;
1559
1419 break; 1560 break;
1420 default: 1561 default:
1421 BUG(); 1562 BUG();
@@ -1431,8 +1572,10 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
1431 out_width, out_height, five_taps, 1572 out_width, out_height, five_taps,
1432 rotation, DISPC_COLOR_COMPONENT_UV); 1573 rotation, DISPC_COLOR_COMPONENT_UV);
1433 1574
1434 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 1575 if (plane != OMAP_DSS_WB)
1435 (scale_x || scale_y) ? 1 : 0, 8, 8); 1576 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane),
1577 (scale_x || scale_y) ? 1 : 0, 8, 8);
1578
1436 /* set H scaling */ 1579 /* set H scaling */
1437 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5); 1580 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5);
1438 /* set V scaling */ 1581 /* set V scaling */
@@ -1848,22 +1991,19 @@ static void calc_tiler_rotation_offset(u16 screen_width, u16 width,
1848 * This function is used to avoid synclosts in OMAP3, because of some 1991 * This function is used to avoid synclosts in OMAP3, because of some
1849 * undocumented horizontal position and timing related limitations. 1992 * undocumented horizontal position and timing related limitations.
1850 */ 1993 */
1851static int check_horiz_timing_omap3(enum omap_channel channel, 1994static int check_horiz_timing_omap3(enum omap_plane plane,
1852 const struct omap_video_timings *t, u16 pos_x, 1995 const struct omap_video_timings *t, u16 pos_x,
1853 u16 width, u16 height, u16 out_width, u16 out_height) 1996 u16 width, u16 height, u16 out_width, u16 out_height)
1854{ 1997{
1855 int DS = DIV_ROUND_UP(height, out_height); 1998 int DS = DIV_ROUND_UP(height, out_height);
1856 unsigned long nonactive, lclk, pclk; 1999 unsigned long nonactive;
1857 static const u8 limits[3] = { 8, 10, 20 }; 2000 static const u8 limits[3] = { 8, 10, 20 };
1858 u64 val, blank; 2001 u64 val, blank;
2002 unsigned long pclk = dispc_plane_pclk_rate(plane);
2003 unsigned long lclk = dispc_plane_lclk_rate(plane);
1859 int i; 2004 int i;
1860 2005
1861 nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width; 2006 nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width;
1862 pclk = dispc_mgr_pclk_rate(channel);
1863 if (dss_mgr_is_lcd(channel))
1864 lclk = dispc_mgr_lclk_rate(channel);
1865 else
1866 lclk = dispc_fclk_rate();
1867 2007
1868 i = 0; 2008 i = 0;
1869 if (out_height < height) 2009 if (out_height < height)
@@ -1900,13 +2040,14 @@ static int check_horiz_timing_omap3(enum omap_channel channel,
1900 return 0; 2040 return 0;
1901} 2041}
1902 2042
1903static unsigned long calc_core_clk_five_taps(enum omap_channel channel, 2043static unsigned long calc_core_clk_five_taps(enum omap_plane plane,
1904 const struct omap_video_timings *mgr_timings, u16 width, 2044 const struct omap_video_timings *mgr_timings, u16 width,
1905 u16 height, u16 out_width, u16 out_height, 2045 u16 height, u16 out_width, u16 out_height,
1906 enum omap_color_mode color_mode) 2046 enum omap_color_mode color_mode)
1907{ 2047{
1908 u32 core_clk = 0; 2048 u32 core_clk = 0;
1909 u64 tmp, pclk = dispc_mgr_pclk_rate(channel); 2049 u64 tmp;
2050 unsigned long pclk = dispc_plane_pclk_rate(plane);
1910 2051
1911 if (height <= out_height && width <= out_width) 2052 if (height <= out_height && width <= out_width)
1912 return (unsigned long) pclk; 2053 return (unsigned long) pclk;
@@ -1940,11 +2081,22 @@ static unsigned long calc_core_clk_five_taps(enum omap_channel channel,
1940 return core_clk; 2081 return core_clk;
1941} 2082}
1942 2083
1943static unsigned long calc_core_clk(enum omap_channel channel, u16 width, 2084static unsigned long calc_core_clk_24xx(enum omap_plane plane, u16 width,
1944 u16 height, u16 out_width, u16 out_height) 2085 u16 height, u16 out_width, u16 out_height, bool mem_to_mem)
2086{
2087 unsigned long pclk = dispc_plane_pclk_rate(plane);
2088
2089 if (height > out_height && width > out_width)
2090 return pclk * 4;
2091 else
2092 return pclk * 2;
2093}
2094
2095static unsigned long calc_core_clk_34xx(enum omap_plane plane, u16 width,
2096 u16 height, u16 out_width, u16 out_height, bool mem_to_mem)
1945{ 2097{
1946 unsigned int hf, vf; 2098 unsigned int hf, vf;
1947 unsigned long pclk = dispc_mgr_pclk_rate(channel); 2099 unsigned long pclk = dispc_plane_pclk_rate(plane);
1948 2100
1949 /* 2101 /*
1950 * FIXME how to determine the 'A' factor 2102 * FIXME how to determine the 'A' factor
@@ -1959,51 +2111,207 @@ static unsigned long calc_core_clk(enum omap_channel channel, u16 width,
1959 hf = 2; 2111 hf = 2;
1960 else 2112 else
1961 hf = 1; 2113 hf = 1;
1962
1963 if (height > out_height) 2114 if (height > out_height)
1964 vf = 2; 2115 vf = 2;
1965 else 2116 else
1966 vf = 1; 2117 vf = 1;
1967 2118
1968 if (cpu_is_omap24xx()) { 2119 return pclk * vf * hf;
1969 if (vf > 1 && hf > 1) 2120}
1970 return pclk * 4; 2121
1971 else 2122static unsigned long calc_core_clk_44xx(enum omap_plane plane, u16 width,
1972 return pclk * 2; 2123 u16 height, u16 out_width, u16 out_height, bool mem_to_mem)
1973 } else if (cpu_is_omap34xx()) { 2124{
1974 return pclk * vf * hf; 2125 unsigned long pclk;
1975 } else { 2126
1976 if (hf > 1) 2127 /*
1977 return DIV_ROUND_UP(pclk, out_width) * width; 2128 * If the overlay/writeback is in mem to mem mode, there are no
1978 else 2129 * downscaling limitations with respect to pixel clock, return 1 as
1979 return pclk; 2130 * required core clock to represent that we have sufficient enough
2131 * core clock to do maximum downscaling
2132 */
2133 if (mem_to_mem)
2134 return 1;
2135
2136 pclk = dispc_plane_pclk_rate(plane);
2137
2138 if (width > out_width)
2139 return DIV_ROUND_UP(pclk, out_width) * width;
2140 else
2141 return pclk;
2142}
2143
2144static int dispc_ovl_calc_scaling_24xx(enum omap_plane plane,
2145 const struct omap_video_timings *mgr_timings,
2146 u16 width, u16 height, u16 out_width, u16 out_height,
2147 enum omap_color_mode color_mode, bool *five_taps,
2148 int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
2149 u16 pos_x, unsigned long *core_clk, bool mem_to_mem)
2150{
2151 int error;
2152 u16 in_width, in_height;
2153 int min_factor = min(*decim_x, *decim_y);
2154 const int maxsinglelinewidth =
2155 dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
2156
2157 *five_taps = false;
2158
2159 do {
2160 in_height = DIV_ROUND_UP(height, *decim_y);
2161 in_width = DIV_ROUND_UP(width, *decim_x);
2162 *core_clk = dispc.feat->calc_core_clk(plane, in_width,
2163 in_height, out_width, out_height, mem_to_mem);
2164 error = (in_width > maxsinglelinewidth || !*core_clk ||
2165 *core_clk > dispc_core_clk_rate());
2166 if (error) {
2167 if (*decim_x == *decim_y) {
2168 *decim_x = min_factor;
2169 ++*decim_y;
2170 } else {
2171 swap(*decim_x, *decim_y);
2172 if (*decim_x < *decim_y)
2173 ++*decim_x;
2174 }
2175 }
2176 } while (*decim_x <= *x_predecim && *decim_y <= *y_predecim && error);
2177
2178 if (in_width > maxsinglelinewidth) {
2179 DSSERR("Cannot scale max input width exceeded");
2180 return -EINVAL;
1980 } 2181 }
2182 return 0;
1981} 2183}
1982 2184
1983static int dispc_ovl_calc_scaling(enum omap_plane plane, 2185static int dispc_ovl_calc_scaling_34xx(enum omap_plane plane,
1984 enum omap_channel channel,
1985 const struct omap_video_timings *mgr_timings, 2186 const struct omap_video_timings *mgr_timings,
1986 u16 width, u16 height, u16 out_width, u16 out_height, 2187 u16 width, u16 height, u16 out_width, u16 out_height,
1987 enum omap_color_mode color_mode, bool *five_taps, 2188 enum omap_color_mode color_mode, bool *five_taps,
1988 int *x_predecim, int *y_predecim, u16 pos_x) 2189 int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
2190 u16 pos_x, unsigned long *core_clk, bool mem_to_mem)
1989{ 2191{
1990 struct omap_overlay *ovl = omap_dss_get_overlay(plane); 2192 int error;
1991 const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE); 2193 u16 in_width, in_height;
2194 int min_factor = min(*decim_x, *decim_y);
2195 const int maxsinglelinewidth =
2196 dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
2197
2198 do {
2199 in_height = DIV_ROUND_UP(height, *decim_y);
2200 in_width = DIV_ROUND_UP(width, *decim_x);
2201 *core_clk = calc_core_clk_five_taps(plane, mgr_timings,
2202 in_width, in_height, out_width, out_height, color_mode);
2203
2204 error = check_horiz_timing_omap3(plane, mgr_timings,
2205 pos_x, in_width, in_height, out_width,
2206 out_height);
2207
2208 if (in_width > maxsinglelinewidth)
2209 if (in_height > out_height &&
2210 in_height < out_height * 2)
2211 *five_taps = false;
2212 if (!*five_taps)
2213 *core_clk = dispc.feat->calc_core_clk(plane, in_width,
2214 in_height, out_width, out_height,
2215 mem_to_mem);
2216
2217 error = (error || in_width > maxsinglelinewidth * 2 ||
2218 (in_width > maxsinglelinewidth && *five_taps) ||
2219 !*core_clk || *core_clk > dispc_core_clk_rate());
2220 if (error) {
2221 if (*decim_x == *decim_y) {
2222 *decim_x = min_factor;
2223 ++*decim_y;
2224 } else {
2225 swap(*decim_x, *decim_y);
2226 if (*decim_x < *decim_y)
2227 ++*decim_x;
2228 }
2229 }
2230 } while (*decim_x <= *x_predecim && *decim_y <= *y_predecim && error);
2231
2232 if (check_horiz_timing_omap3(plane, mgr_timings, pos_x, width, height,
2233 out_width, out_height)){
2234 DSSERR("horizontal timing too tight\n");
2235 return -EINVAL;
2236 }
2237
2238 if (in_width > (maxsinglelinewidth * 2)) {
2239 DSSERR("Cannot setup scaling");
2240 DSSERR("width exceeds maximum width possible");
2241 return -EINVAL;
2242 }
2243
2244 if (in_width > maxsinglelinewidth && *five_taps) {
2245 DSSERR("cannot setup scaling with five taps");
2246 return -EINVAL;
2247 }
2248 return 0;
2249}
2250
2251static int dispc_ovl_calc_scaling_44xx(enum omap_plane plane,
2252 const struct omap_video_timings *mgr_timings,
2253 u16 width, u16 height, u16 out_width, u16 out_height,
2254 enum omap_color_mode color_mode, bool *five_taps,
2255 int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
2256 u16 pos_x, unsigned long *core_clk, bool mem_to_mem)
2257{
2258 u16 in_width, in_width_max;
2259 int decim_x_min = *decim_x;
2260 u16 in_height = DIV_ROUND_UP(height, *decim_y);
1992 const int maxsinglelinewidth = 2261 const int maxsinglelinewidth =
1993 dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH); 2262 dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
2263 unsigned long pclk = dispc_plane_pclk_rate(plane);
2264 const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
2265
2266 if (mem_to_mem)
2267 in_width_max = DIV_ROUND_UP(out_width, maxdownscale);
2268 else
2269 in_width_max = dispc_core_clk_rate() /
2270 DIV_ROUND_UP(pclk, out_width);
2271
2272 *decim_x = DIV_ROUND_UP(width, in_width_max);
2273
2274 *decim_x = *decim_x > decim_x_min ? *decim_x : decim_x_min;
2275 if (*decim_x > *x_predecim)
2276 return -EINVAL;
2277
2278 do {
2279 in_width = DIV_ROUND_UP(width, *decim_x);
2280 } while (*decim_x <= *x_predecim &&
2281 in_width > maxsinglelinewidth && ++*decim_x);
2282
2283 if (in_width > maxsinglelinewidth) {
2284 DSSERR("Cannot scale width exceeds max line width");
2285 return -EINVAL;
2286 }
2287
2288 *core_clk = dispc.feat->calc_core_clk(plane, in_width, in_height,
2289 out_width, out_height, mem_to_mem);
2290 return 0;
2291}
2292
2293static int dispc_ovl_calc_scaling(enum omap_plane plane,
2294 enum omap_overlay_caps caps,
2295 const struct omap_video_timings *mgr_timings,
2296 u16 width, u16 height, u16 out_width, u16 out_height,
2297 enum omap_color_mode color_mode, bool *five_taps,
2298 int *x_predecim, int *y_predecim, u16 pos_x,
2299 enum omap_dss_rotation_type rotation_type, bool mem_to_mem)
2300{
2301 const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
1994 const int max_decim_limit = 16; 2302 const int max_decim_limit = 16;
1995 unsigned long core_clk = 0; 2303 unsigned long core_clk = 0;
1996 int decim_x, decim_y, error, min_factor; 2304 int decim_x, decim_y, ret;
1997 u16 in_width, in_height, in_width_max = 0;
1998 2305
1999 if (width == out_width && height == out_height) 2306 if (width == out_width && height == out_height)
2000 return 0; 2307 return 0;
2001 2308
2002 if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) 2309 if ((caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
2003 return -EINVAL; 2310 return -EINVAL;
2004 2311
2005 *x_predecim = max_decim_limit; 2312 *x_predecim = max_decim_limit;
2006 *y_predecim = max_decim_limit; 2313 *y_predecim = (rotation_type == OMAP_DSS_ROT_TILER &&
2314 dss_has_feature(FEAT_BURST_2D)) ? 2 : max_decim_limit;
2007 2315
2008 if (color_mode == OMAP_DSS_COLOR_CLUT1 || 2316 if (color_mode == OMAP_DSS_COLOR_CLUT1 ||
2009 color_mode == OMAP_DSS_COLOR_CLUT2 || 2317 color_mode == OMAP_DSS_COLOR_CLUT2 ||
@@ -2018,118 +2326,18 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
2018 decim_x = DIV_ROUND_UP(DIV_ROUND_UP(width, out_width), maxdownscale); 2326 decim_x = DIV_ROUND_UP(DIV_ROUND_UP(width, out_width), maxdownscale);
2019 decim_y = DIV_ROUND_UP(DIV_ROUND_UP(height, out_height), maxdownscale); 2327 decim_y = DIV_ROUND_UP(DIV_ROUND_UP(height, out_height), maxdownscale);
2020 2328
2021 min_factor = min(decim_x, decim_y);
2022
2023 if (decim_x > *x_predecim || out_width > width * 8) 2329 if (decim_x > *x_predecim || out_width > width * 8)
2024 return -EINVAL; 2330 return -EINVAL;
2025 2331
2026 if (decim_y > *y_predecim || out_height > height * 8) 2332 if (decim_y > *y_predecim || out_height > height * 8)
2027 return -EINVAL; 2333 return -EINVAL;
2028 2334
2029 if (cpu_is_omap24xx()) { 2335 ret = dispc.feat->calc_scaling(plane, mgr_timings, width, height,
2030 *five_taps = false; 2336 out_width, out_height, color_mode, five_taps,
2031 2337 x_predecim, y_predecim, &decim_x, &decim_y, pos_x, &core_clk,
2032 do { 2338 mem_to_mem);
2033 in_height = DIV_ROUND_UP(height, decim_y); 2339 if (ret)
2034 in_width = DIV_ROUND_UP(width, decim_x); 2340 return ret;
2035 core_clk = calc_core_clk(channel, in_width, in_height,
2036 out_width, out_height);
2037 error = (in_width > maxsinglelinewidth || !core_clk ||
2038 core_clk > dispc_core_clk_rate());
2039 if (error) {
2040 if (decim_x == decim_y) {
2041 decim_x = min_factor;
2042 decim_y++;
2043 } else {
2044 swap(decim_x, decim_y);
2045 if (decim_x < decim_y)
2046 decim_x++;
2047 }
2048 }
2049 } while (decim_x <= *x_predecim && decim_y <= *y_predecim &&
2050 error);
2051
2052 if (in_width > maxsinglelinewidth) {
2053 DSSERR("Cannot scale max input width exceeded");
2054 return -EINVAL;
2055 }
2056 } else if (cpu_is_omap34xx()) {
2057
2058 do {
2059 in_height = DIV_ROUND_UP(height, decim_y);
2060 in_width = DIV_ROUND_UP(width, decim_x);
2061 core_clk = calc_core_clk_five_taps(channel, mgr_timings,
2062 in_width, in_height, out_width, out_height,
2063 color_mode);
2064
2065 error = check_horiz_timing_omap3(channel, mgr_timings,
2066 pos_x, in_width, in_height, out_width,
2067 out_height);
2068
2069 if (in_width > maxsinglelinewidth)
2070 if (in_height > out_height &&
2071 in_height < out_height * 2)
2072 *five_taps = false;
2073 if (!*five_taps)
2074 core_clk = calc_core_clk(channel, in_width,
2075 in_height, out_width, out_height);
2076 error = (error || in_width > maxsinglelinewidth * 2 ||
2077 (in_width > maxsinglelinewidth && *five_taps) ||
2078 !core_clk || core_clk > dispc_core_clk_rate());
2079 if (error) {
2080 if (decim_x == decim_y) {
2081 decim_x = min_factor;
2082 decim_y++;
2083 } else {
2084 swap(decim_x, decim_y);
2085 if (decim_x < decim_y)
2086 decim_x++;
2087 }
2088 }
2089 } while (decim_x <= *x_predecim && decim_y <= *y_predecim
2090 && error);
2091
2092 if (check_horiz_timing_omap3(channel, mgr_timings, pos_x, width,
2093 height, out_width, out_height)){
2094 DSSERR("horizontal timing too tight\n");
2095 return -EINVAL;
2096 }
2097
2098 if (in_width > (maxsinglelinewidth * 2)) {
2099 DSSERR("Cannot setup scaling");
2100 DSSERR("width exceeds maximum width possible");
2101 return -EINVAL;
2102 }
2103
2104 if (in_width > maxsinglelinewidth && *five_taps) {
2105 DSSERR("cannot setup scaling with five taps");
2106 return -EINVAL;
2107 }
2108 } else {
2109 int decim_x_min = decim_x;
2110 in_height = DIV_ROUND_UP(height, decim_y);
2111 in_width_max = dispc_core_clk_rate() /
2112 DIV_ROUND_UP(dispc_mgr_pclk_rate(channel),
2113 out_width);
2114 decim_x = DIV_ROUND_UP(width, in_width_max);
2115
2116 decim_x = decim_x > decim_x_min ? decim_x : decim_x_min;
2117 if (decim_x > *x_predecim)
2118 return -EINVAL;
2119
2120 do {
2121 in_width = DIV_ROUND_UP(width, decim_x);
2122 } while (decim_x <= *x_predecim &&
2123 in_width > maxsinglelinewidth && decim_x++);
2124
2125 if (in_width > maxsinglelinewidth) {
2126 DSSERR("Cannot scale width exceeds max line width");
2127 return -EINVAL;
2128 }
2129
2130 core_clk = calc_core_clk(channel, in_width, in_height,
2131 out_width, out_height);
2132 }
2133 2341
2134 DSSDBG("required core clk rate = %lu Hz\n", core_clk); 2342 DSSDBG("required core clk rate = %lu Hz\n", core_clk);
2135 DSSDBG("current core clk rate = %lu Hz\n", dispc_core_clk_rate()); 2343 DSSDBG("current core clk rate = %lu Hz\n", dispc_core_clk_rate());
@@ -2147,69 +2355,64 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
2147 return 0; 2355 return 0;
2148} 2356}
2149 2357
2150int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi, 2358static int dispc_ovl_setup_common(enum omap_plane plane,
2151 bool replication, const struct omap_video_timings *mgr_timings) 2359 enum omap_overlay_caps caps, u32 paddr, u32 p_uv_addr,
2360 u16 screen_width, int pos_x, int pos_y, u16 width, u16 height,
2361 u16 out_width, u16 out_height, enum omap_color_mode color_mode,
2362 u8 rotation, bool mirror, u8 zorder, u8 pre_mult_alpha,
2363 u8 global_alpha, enum omap_dss_rotation_type rotation_type,
2364 bool replication, const struct omap_video_timings *mgr_timings,
2365 bool mem_to_mem)
2152{ 2366{
2153 struct omap_overlay *ovl = omap_dss_get_overlay(plane);
2154 bool five_taps = true; 2367 bool five_taps = true;
2155 bool fieldmode = 0; 2368 bool fieldmode = 0;
2156 int r, cconv = 0; 2369 int r, cconv = 0;
2157 unsigned offset0, offset1; 2370 unsigned offset0, offset1;
2158 s32 row_inc; 2371 s32 row_inc;
2159 s32 pix_inc; 2372 s32 pix_inc;
2160 u16 frame_height = oi->height; 2373 u16 frame_height = height;
2161 unsigned int field_offset = 0; 2374 unsigned int field_offset = 0;
2162 u16 in_height = oi->height; 2375 u16 in_height = height;
2163 u16 in_width = oi->width; 2376 u16 in_width = width;
2164 u16 out_width, out_height;
2165 enum omap_channel channel;
2166 int x_predecim = 1, y_predecim = 1; 2377 int x_predecim = 1, y_predecim = 1;
2167 bool ilace = mgr_timings->interlace; 2378 bool ilace = mgr_timings->interlace;
2168 2379
2169 channel = dispc_ovl_get_channel_out(plane); 2380 if (paddr == 0)
2170
2171 DSSDBG("dispc_ovl_setup %d, pa %x, pa_uv %x, sw %d, %d,%d, %dx%d -> "
2172 "%dx%d, cmode %x, rot %d, mir %d, ilace %d chan %d repl %d\n",
2173 plane, oi->paddr, oi->p_uv_addr,
2174 oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
2175 oi->out_width, oi->out_height, oi->color_mode, oi->rotation,
2176 oi->mirror, ilace, channel, replication);
2177
2178 if (oi->paddr == 0)
2179 return -EINVAL; 2381 return -EINVAL;
2180 2382
2181 out_width = oi->out_width == 0 ? oi->width : oi->out_width; 2383 out_width = out_width == 0 ? width : out_width;
2182 out_height = oi->out_height == 0 ? oi->height : oi->out_height; 2384 out_height = out_height == 0 ? height : out_height;
2183 2385
2184 if (ilace && oi->height == out_height) 2386 if (ilace && height == out_height)
2185 fieldmode = 1; 2387 fieldmode = 1;
2186 2388
2187 if (ilace) { 2389 if (ilace) {
2188 if (fieldmode) 2390 if (fieldmode)
2189 in_height /= 2; 2391 in_height /= 2;
2190 oi->pos_y /= 2; 2392 pos_y /= 2;
2191 out_height /= 2; 2393 out_height /= 2;
2192 2394
2193 DSSDBG("adjusting for ilace: height %d, pos_y %d, " 2395 DSSDBG("adjusting for ilace: height %d, pos_y %d, "
2194 "out_height %d\n", 2396 "out_height %d\n", in_height, pos_y,
2195 in_height, oi->pos_y, out_height); 2397 out_height);
2196 } 2398 }
2197 2399
2198 if (!dss_feat_color_mode_supported(plane, oi->color_mode)) 2400 if (!dss_feat_color_mode_supported(plane, color_mode))
2199 return -EINVAL; 2401 return -EINVAL;
2200 2402
2201 r = dispc_ovl_calc_scaling(plane, channel, mgr_timings, in_width, 2403 r = dispc_ovl_calc_scaling(plane, caps, mgr_timings, in_width,
2202 in_height, out_width, out_height, oi->color_mode, 2404 in_height, out_width, out_height, color_mode,
2203 &five_taps, &x_predecim, &y_predecim, oi->pos_x); 2405 &five_taps, &x_predecim, &y_predecim, pos_x,
2406 rotation_type, mem_to_mem);
2204 if (r) 2407 if (r)
2205 return r; 2408 return r;
2206 2409
2207 in_width = DIV_ROUND_UP(in_width, x_predecim); 2410 in_width = DIV_ROUND_UP(in_width, x_predecim);
2208 in_height = DIV_ROUND_UP(in_height, y_predecim); 2411 in_height = DIV_ROUND_UP(in_height, y_predecim);
2209 2412
2210 if (oi->color_mode == OMAP_DSS_COLOR_YUV2 || 2413 if (color_mode == OMAP_DSS_COLOR_YUV2 ||
2211 oi->color_mode == OMAP_DSS_COLOR_UYVY || 2414 color_mode == OMAP_DSS_COLOR_UYVY ||
2212 oi->color_mode == OMAP_DSS_COLOR_NV12) 2415 color_mode == OMAP_DSS_COLOR_NV12)
2213 cconv = 1; 2416 cconv = 1;
2214 2417
2215 if (ilace && !fieldmode) { 2418 if (ilace && !fieldmode) {
@@ -2235,70 +2438,144 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
2235 row_inc = 0; 2438 row_inc = 0;
2236 pix_inc = 0; 2439 pix_inc = 0;
2237 2440
2238 if (oi->rotation_type == OMAP_DSS_ROT_TILER) 2441 if (rotation_type == OMAP_DSS_ROT_TILER)
2239 calc_tiler_rotation_offset(oi->screen_width, in_width, 2442 calc_tiler_rotation_offset(screen_width, in_width,
2240 oi->color_mode, fieldmode, field_offset, 2443 color_mode, fieldmode, field_offset,
2241 &offset0, &offset1, &row_inc, &pix_inc, 2444 &offset0, &offset1, &row_inc, &pix_inc,
2242 x_predecim, y_predecim); 2445 x_predecim, y_predecim);
2243 else if (oi->rotation_type == OMAP_DSS_ROT_DMA) 2446 else if (rotation_type == OMAP_DSS_ROT_DMA)
2244 calc_dma_rotation_offset(oi->rotation, oi->mirror, 2447 calc_dma_rotation_offset(rotation, mirror,
2245 oi->screen_width, in_width, frame_height, 2448 screen_width, in_width, frame_height,
2246 oi->color_mode, fieldmode, field_offset, 2449 color_mode, fieldmode, field_offset,
2247 &offset0, &offset1, &row_inc, &pix_inc, 2450 &offset0, &offset1, &row_inc, &pix_inc,
2248 x_predecim, y_predecim); 2451 x_predecim, y_predecim);
2249 else 2452 else
2250 calc_vrfb_rotation_offset(oi->rotation, oi->mirror, 2453 calc_vrfb_rotation_offset(rotation, mirror,
2251 oi->screen_width, in_width, frame_height, 2454 screen_width, in_width, frame_height,
2252 oi->color_mode, fieldmode, field_offset, 2455 color_mode, fieldmode, field_offset,
2253 &offset0, &offset1, &row_inc, &pix_inc, 2456 &offset0, &offset1, &row_inc, &pix_inc,
2254 x_predecim, y_predecim); 2457 x_predecim, y_predecim);
2255 2458
2256 DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n", 2459 DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n",
2257 offset0, offset1, row_inc, pix_inc); 2460 offset0, offset1, row_inc, pix_inc);
2258 2461
2259 dispc_ovl_set_color_mode(plane, oi->color_mode); 2462 dispc_ovl_set_color_mode(plane, color_mode);
2260 2463
2261 dispc_ovl_configure_burst_type(plane, oi->rotation_type); 2464 dispc_ovl_configure_burst_type(plane, rotation_type);
2262 2465
2263 dispc_ovl_set_ba0(plane, oi->paddr + offset0); 2466 dispc_ovl_set_ba0(plane, paddr + offset0);
2264 dispc_ovl_set_ba1(plane, oi->paddr + offset1); 2467 dispc_ovl_set_ba1(plane, paddr + offset1);
2265 2468
2266 if (OMAP_DSS_COLOR_NV12 == oi->color_mode) { 2469 if (OMAP_DSS_COLOR_NV12 == color_mode) {
2267 dispc_ovl_set_ba0_uv(plane, oi->p_uv_addr + offset0); 2470 dispc_ovl_set_ba0_uv(plane, p_uv_addr + offset0);
2268 dispc_ovl_set_ba1_uv(plane, oi->p_uv_addr + offset1); 2471 dispc_ovl_set_ba1_uv(plane, p_uv_addr + offset1);
2269 } 2472 }
2270 2473
2271
2272 dispc_ovl_set_row_inc(plane, row_inc); 2474 dispc_ovl_set_row_inc(plane, row_inc);
2273 dispc_ovl_set_pix_inc(plane, pix_inc); 2475 dispc_ovl_set_pix_inc(plane, pix_inc);
2274 2476
2275 DSSDBG("%d,%d %dx%d -> %dx%d\n", oi->pos_x, oi->pos_y, in_width, 2477 DSSDBG("%d,%d %dx%d -> %dx%d\n", pos_x, pos_y, in_width,
2276 in_height, out_width, out_height); 2478 in_height, out_width, out_height);
2277 2479
2278 dispc_ovl_set_pos(plane, oi->pos_x, oi->pos_y); 2480 dispc_ovl_set_pos(plane, caps, pos_x, pos_y);
2279 2481
2280 dispc_ovl_set_pic_size(plane, in_width, in_height); 2482 dispc_ovl_set_input_size(plane, in_width, in_height);
2281 2483
2282 if (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) { 2484 if (caps & OMAP_DSS_OVL_CAP_SCALE) {
2283 dispc_ovl_set_scaling(plane, in_width, in_height, out_width, 2485 dispc_ovl_set_scaling(plane, in_width, in_height, out_width,
2284 out_height, ilace, five_taps, fieldmode, 2486 out_height, ilace, five_taps, fieldmode,
2285 oi->color_mode, oi->rotation); 2487 color_mode, rotation);
2286 dispc_ovl_set_vid_size(plane, out_width, out_height); 2488 dispc_ovl_set_output_size(plane, out_width, out_height);
2287 dispc_ovl_set_vid_color_conv(plane, cconv); 2489 dispc_ovl_set_vid_color_conv(plane, cconv);
2288 } 2490 }
2289 2491
2290 dispc_ovl_set_rotation_attrs(plane, oi->rotation, oi->mirror, 2492 dispc_ovl_set_rotation_attrs(plane, rotation, mirror, color_mode);
2291 oi->color_mode);
2292 2493
2293 dispc_ovl_set_zorder(plane, oi->zorder); 2494 dispc_ovl_set_zorder(plane, caps, zorder);
2294 dispc_ovl_set_pre_mult_alpha(plane, oi->pre_mult_alpha); 2495 dispc_ovl_set_pre_mult_alpha(plane, caps, pre_mult_alpha);
2295 dispc_ovl_setup_global_alpha(plane, oi->global_alpha); 2496 dispc_ovl_setup_global_alpha(plane, caps, global_alpha);
2296 2497
2297 dispc_ovl_enable_replication(plane, replication); 2498 dispc_ovl_enable_replication(plane, caps, replication);
2298 2499
2299 return 0; 2500 return 0;
2300} 2501}
2301 2502
2503int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
2504 bool replication, const struct omap_video_timings *mgr_timings,
2505 bool mem_to_mem)
2506{
2507 int r;
2508 struct omap_overlay *ovl = omap_dss_get_overlay(plane);
2509 enum omap_channel channel;
2510
2511 channel = dispc_ovl_get_channel_out(plane);
2512
2513 DSSDBG("dispc_ovl_setup %d, pa %x, pa_uv %x, sw %d, %d,%d, %dx%d -> "
2514 "%dx%d, cmode %x, rot %d, mir %d, chan %d repl %d\n",
2515 plane, oi->paddr, oi->p_uv_addr, oi->screen_width, oi->pos_x,
2516 oi->pos_y, oi->width, oi->height, oi->out_width, oi->out_height,
2517 oi->color_mode, oi->rotation, oi->mirror, channel, replication);
2518
2519 r = dispc_ovl_setup_common(plane, ovl->caps, oi->paddr, oi->p_uv_addr,
2520 oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
2521 oi->out_width, oi->out_height, oi->color_mode, oi->rotation,
2522 oi->mirror, oi->zorder, oi->pre_mult_alpha, oi->global_alpha,
2523 oi->rotation_type, replication, mgr_timings, mem_to_mem);
2524
2525 return r;
2526}
2527
2528int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
2529 bool mem_to_mem, const struct omap_video_timings *mgr_timings)
2530{
2531 int r;
2532 u32 l;
2533 enum omap_plane plane = OMAP_DSS_WB;
2534 const int pos_x = 0, pos_y = 0;
2535 const u8 zorder = 0, global_alpha = 0;
2536 const bool replication = false;
2537 bool truncation;
2538 int in_width = mgr_timings->x_res;
2539 int in_height = mgr_timings->y_res;
2540 enum omap_overlay_caps caps =
2541 OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA;
2542
2543 DSSDBG("dispc_wb_setup, pa %x, pa_uv %x, %d,%d -> %dx%d, cmode %x, "
2544 "rot %d, mir %d\n", wi->paddr, wi->p_uv_addr, in_width,
2545 in_height, wi->width, wi->height, wi->color_mode, wi->rotation,
2546 wi->mirror);
2547
2548 r = dispc_ovl_setup_common(plane, caps, wi->paddr, wi->p_uv_addr,
2549 wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width,
2550 wi->height, wi->color_mode, wi->rotation, wi->mirror, zorder,
2551 wi->pre_mult_alpha, global_alpha, wi->rotation_type,
2552 replication, mgr_timings, mem_to_mem);
2553
2554 switch (wi->color_mode) {
2555 case OMAP_DSS_COLOR_RGB16:
2556 case OMAP_DSS_COLOR_RGB24P:
2557 case OMAP_DSS_COLOR_ARGB16:
2558 case OMAP_DSS_COLOR_RGBA16:
2559 case OMAP_DSS_COLOR_RGB12U:
2560 case OMAP_DSS_COLOR_ARGB16_1555:
2561 case OMAP_DSS_COLOR_XRGB16_1555:
2562 case OMAP_DSS_COLOR_RGBX16:
2563 truncation = true;
2564 break;
2565 default:
2566 truncation = false;
2567 break;
2568 }
2569
2570 /* setup extra DISPC_WB_ATTRIBUTES */
2571 l = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
2572 l = FLD_MOD(l, truncation, 10, 10); /* TRUNCATIONENABLE */
2573 l = FLD_MOD(l, mem_to_mem, 19, 19); /* WRITEBACKMODE */
2574 dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), l);
2575
2576 return r;
2577}
2578
2302int dispc_ovl_enable(enum omap_plane plane, bool enable) 2579int dispc_ovl_enable(enum omap_plane plane, bool enable)
2303{ 2580{
2304 DSSDBG("dispc_enable_plane %d, %d\n", plane, enable); 2581 DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
@@ -2451,6 +2728,47 @@ void dispc_mgr_enable(enum omap_channel channel, bool enable)
2451 BUG(); 2728 BUG();
2452} 2729}
2453 2730
2731void dispc_wb_enable(bool enable)
2732{
2733 enum omap_plane plane = OMAP_DSS_WB;
2734 struct completion frame_done_completion;
2735 bool is_on;
2736 int r;
2737 u32 irq;
2738
2739 is_on = REG_GET(DISPC_OVL_ATTRIBUTES(plane), 0, 0);
2740 irq = DISPC_IRQ_FRAMEDONEWB;
2741
2742 if (!enable && is_on) {
2743 init_completion(&frame_done_completion);
2744
2745 r = omap_dispc_register_isr(dispc_disable_isr,
2746 &frame_done_completion, irq);
2747 if (r)
2748 DSSERR("failed to register FRAMEDONEWB isr\n");
2749 }
2750
2751 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0);
2752
2753 if (!enable && is_on) {
2754 if (!wait_for_completion_timeout(&frame_done_completion,
2755 msecs_to_jiffies(100)))
2756 DSSERR("timeout waiting for FRAMEDONEWB\n");
2757
2758 r = omap_dispc_unregister_isr(dispc_disable_isr,
2759 &frame_done_completion, irq);
2760 if (r)
2761 DSSERR("failed to unregister FRAMEDONEWB isr\n");
2762 }
2763}
2764
2765bool dispc_wb_is_enabled(void)
2766{
2767 enum omap_plane plane = OMAP_DSS_WB;
2768
2769 return REG_GET(DISPC_OVL_ATTRIBUTES(plane), 0, 0);
2770}
2771
2454void dispc_lcd_enable_signal_polarity(bool act_high) 2772void dispc_lcd_enable_signal_polarity(bool act_high)
2455{ 2773{
2456 if (!dss_has_feature(FEAT_LCDENABLEPOL)) 2774 if (!dss_has_feature(FEAT_LCDENABLEPOL))
@@ -2605,24 +2923,13 @@ static bool _dispc_mgr_size_ok(u16 width, u16 height)
2605static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp, 2923static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
2606 int vsw, int vfp, int vbp) 2924 int vsw, int vfp, int vbp)
2607{ 2925{
2608 if (cpu_is_omap24xx() || omap_rev() < OMAP3430_REV_ES3_0) { 2926 if (hsw < 1 || hsw > dispc.feat->sw_max ||
2609 if (hsw < 1 || hsw > 64 || 2927 hfp < 1 || hfp > dispc.feat->hp_max ||
2610 hfp < 1 || hfp > 256 || 2928 hbp < 1 || hbp > dispc.feat->hp_max ||
2611 hbp < 1 || hbp > 256 || 2929 vsw < 1 || vsw > dispc.feat->sw_max ||
2612 vsw < 1 || vsw > 64 || 2930 vfp < 0 || vfp > dispc.feat->vp_max ||
2613 vfp < 0 || vfp > 255 || 2931 vbp < 0 || vbp > dispc.feat->vp_max)
2614 vbp < 0 || vbp > 255) 2932 return false;
2615 return false;
2616 } else {
2617 if (hsw < 1 || hsw > 256 ||
2618 hfp < 1 || hfp > 4096 ||
2619 hbp < 1 || hbp > 4096 ||
2620 vsw < 1 || vsw > 256 ||
2621 vfp < 0 || vfp > 4095 ||
2622 vbp < 0 || vbp > 4095)
2623 return false;
2624 }
2625
2626 return true; 2933 return true;
2627} 2934}
2628 2935
@@ -2654,19 +2961,12 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
2654 u32 timing_h, timing_v, l; 2961 u32 timing_h, timing_v, l;
2655 bool onoff, rf, ipc; 2962 bool onoff, rf, ipc;
2656 2963
2657 if (cpu_is_omap24xx() || omap_rev() < OMAP3430_REV_ES3_0) { 2964 timing_h = FLD_VAL(hsw-1, dispc.feat->sw_start, 0) |
2658 timing_h = FLD_VAL(hsw-1, 5, 0) | FLD_VAL(hfp-1, 15, 8) | 2965 FLD_VAL(hfp-1, dispc.feat->fp_start, 8) |
2659 FLD_VAL(hbp-1, 27, 20); 2966 FLD_VAL(hbp-1, dispc.feat->bp_start, 20);
2660 2967 timing_v = FLD_VAL(vsw-1, dispc.feat->sw_start, 0) |
2661 timing_v = FLD_VAL(vsw-1, 5, 0) | FLD_VAL(vfp, 15, 8) | 2968 FLD_VAL(vfp, dispc.feat->fp_start, 8) |
2662 FLD_VAL(vbp, 27, 20); 2969 FLD_VAL(vbp, dispc.feat->bp_start, 20);
2663 } else {
2664 timing_h = FLD_VAL(hsw-1, 7, 0) | FLD_VAL(hfp-1, 19, 8) |
2665 FLD_VAL(hbp-1, 31, 20);
2666
2667 timing_v = FLD_VAL(vsw-1, 7, 0) | FLD_VAL(vfp, 19, 8) |
2668 FLD_VAL(vbp, 31, 20);
2669 }
2670 2970
2671 dispc_write_reg(DISPC_TIMING_H(channel), timing_h); 2971 dispc_write_reg(DISPC_TIMING_H(channel), timing_h);
2672 dispc_write_reg(DISPC_TIMING_V(channel), timing_v); 2972 dispc_write_reg(DISPC_TIMING_V(channel), timing_v);
@@ -2872,6 +3172,23 @@ unsigned long dispc_core_clk_rate(void)
2872 return fclk / lcd; 3172 return fclk / lcd;
2873} 3173}
2874 3174
3175static unsigned long dispc_plane_pclk_rate(enum omap_plane plane)
3176{
3177 enum omap_channel channel = dispc_ovl_get_channel_out(plane);
3178
3179 return dispc_mgr_pclk_rate(channel);
3180}
3181
3182static unsigned long dispc_plane_lclk_rate(enum omap_plane plane)
3183{
3184 enum omap_channel channel = dispc_ovl_get_channel_out(plane);
3185
3186 if (dss_mgr_is_lcd(channel))
3187 return dispc_mgr_lclk_rate(channel);
3188 else
3189 return dispc_fclk_rate();
3190
3191}
2875static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel) 3192static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel)
2876{ 3193{
2877 int lcd, pcd; 3194 int lcd, pcd;
@@ -3492,7 +3809,7 @@ static void dispc_error_worker(struct work_struct *work)
3492 ovl->name); 3809 ovl->name);
3493 dispc_ovl_enable(ovl->id, false); 3810 dispc_ovl_enable(ovl->id, false);
3494 dispc_mgr_go(ovl->manager->id); 3811 dispc_mgr_go(ovl->manager->id);
3495 mdelay(50); 3812 msleep(50);
3496 } 3813 }
3497 } 3814 }
3498 3815
@@ -3504,7 +3821,7 @@ static void dispc_error_worker(struct work_struct *work)
3504 bit = mgr_desc[i].sync_lost_irq; 3821 bit = mgr_desc[i].sync_lost_irq;
3505 3822
3506 if (bit & errors) { 3823 if (bit & errors) {
3507 struct omap_dss_device *dssdev = mgr->device; 3824 struct omap_dss_device *dssdev = mgr->get_device(mgr);
3508 bool enable; 3825 bool enable;
3509 3826
3510 DSSERR("SYNC_LOST on channel %s, restarting the output " 3827 DSSERR("SYNC_LOST on channel %s, restarting the output "
@@ -3524,7 +3841,7 @@ static void dispc_error_worker(struct work_struct *work)
3524 } 3841 }
3525 3842
3526 dispc_mgr_go(mgr->id); 3843 dispc_mgr_go(mgr->id);
3527 mdelay(50); 3844 msleep(50);
3528 3845
3529 if (enable) 3846 if (enable)
3530 dssdev->driver->enable(dssdev); 3847 dssdev->driver->enable(dssdev);
@@ -3535,9 +3852,13 @@ static void dispc_error_worker(struct work_struct *work)
3535 DSSERR("OCP_ERR\n"); 3852 DSSERR("OCP_ERR\n");
3536 for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) { 3853 for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
3537 struct omap_overlay_manager *mgr; 3854 struct omap_overlay_manager *mgr;
3855 struct omap_dss_device *dssdev;
3856
3538 mgr = omap_dss_get_overlay_manager(i); 3857 mgr = omap_dss_get_overlay_manager(i);
3539 if (mgr->device && mgr->device->driver) 3858 dssdev = mgr->get_device(mgr);
3540 mgr->device->driver->disable(mgr->device); 3859
3860 if (dssdev && dssdev->driver)
3861 dssdev->driver->disable(dssdev);
3541 } 3862 }
3542 } 3863 }
3543 3864
@@ -3661,17 +3982,98 @@ static void _omap_dispc_initial_config(void)
3661 if (dss_has_feature(FEAT_FUNCGATED)) 3982 if (dss_has_feature(FEAT_FUNCGATED))
3662 REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9); 3983 REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
3663 3984
3664 _dispc_setup_color_conv_coef(); 3985 dispc_setup_color_conv_coef();
3665 3986
3666 dispc_set_loadmode(OMAP_DSS_LOAD_FRAME_ONLY); 3987 dispc_set_loadmode(OMAP_DSS_LOAD_FRAME_ONLY);
3667 3988
3668 dispc_read_plane_fifo_sizes(); 3989 dispc_init_fifos();
3669 3990
3670 dispc_configure_burst_sizes(); 3991 dispc_configure_burst_sizes();
3671 3992
3672 dispc_ovl_enable_zorder_planes(); 3993 dispc_ovl_enable_zorder_planes();
3673} 3994}
3674 3995
3996static const struct dispc_features omap24xx_dispc_feats __initconst = {
3997 .sw_start = 5,
3998 .fp_start = 15,
3999 .bp_start = 27,
4000 .sw_max = 64,
4001 .vp_max = 255,
4002 .hp_max = 256,
4003 .calc_scaling = dispc_ovl_calc_scaling_24xx,
4004 .calc_core_clk = calc_core_clk_24xx,
4005 .num_fifos = 3,
4006};
4007
4008static const struct dispc_features omap34xx_rev1_0_dispc_feats __initconst = {
4009 .sw_start = 5,
4010 .fp_start = 15,
4011 .bp_start = 27,
4012 .sw_max = 64,
4013 .vp_max = 255,
4014 .hp_max = 256,
4015 .calc_scaling = dispc_ovl_calc_scaling_34xx,
4016 .calc_core_clk = calc_core_clk_34xx,
4017 .num_fifos = 3,
4018};
4019
4020static const struct dispc_features omap34xx_rev3_0_dispc_feats __initconst = {
4021 .sw_start = 7,
4022 .fp_start = 19,
4023 .bp_start = 31,
4024 .sw_max = 256,
4025 .vp_max = 4095,
4026 .hp_max = 4096,
4027 .calc_scaling = dispc_ovl_calc_scaling_34xx,
4028 .calc_core_clk = calc_core_clk_34xx,
4029 .num_fifos = 3,
4030};
4031
4032static const struct dispc_features omap44xx_dispc_feats __initconst = {
4033 .sw_start = 7,
4034 .fp_start = 19,
4035 .bp_start = 31,
4036 .sw_max = 256,
4037 .vp_max = 4095,
4038 .hp_max = 4096,
4039 .calc_scaling = dispc_ovl_calc_scaling_44xx,
4040 .calc_core_clk = calc_core_clk_44xx,
4041 .num_fifos = 5,
4042 .gfx_fifo_workaround = true,
4043};
4044
4045static int __init dispc_init_features(struct device *dev)
4046{
4047 const struct dispc_features *src;
4048 struct dispc_features *dst;
4049
4050 dst = devm_kzalloc(dev, sizeof(*dst), GFP_KERNEL);
4051 if (!dst) {
4052 dev_err(dev, "Failed to allocate DISPC Features\n");
4053 return -ENOMEM;
4054 }
4055
4056 if (cpu_is_omap24xx()) {
4057 src = &omap24xx_dispc_feats;
4058 } else if (cpu_is_omap34xx()) {
4059 if (omap_rev() < OMAP3430_REV_ES3_0)
4060 src = &omap34xx_rev1_0_dispc_feats;
4061 else
4062 src = &omap34xx_rev3_0_dispc_feats;
4063 } else if (cpu_is_omap44xx()) {
4064 src = &omap44xx_dispc_feats;
4065 } else if (soc_is_omap54xx()) {
4066 src = &omap44xx_dispc_feats;
4067 } else {
4068 return -ENODEV;
4069 }
4070
4071 memcpy(dst, src, sizeof(*dst));
4072 dispc.feat = dst;
4073
4074 return 0;
4075}
4076
3675/* DISPC HW IP initialisation */ 4077/* DISPC HW IP initialisation */
3676static int __init omap_dispchw_probe(struct platform_device *pdev) 4078static int __init omap_dispchw_probe(struct platform_device *pdev)
3677{ 4079{
@@ -3682,6 +4084,10 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
3682 4084
3683 dispc.pdev = pdev; 4085 dispc.pdev = pdev;
3684 4086
4087 r = dispc_init_features(&dispc.pdev->dev);
4088 if (r)
4089 return r;
4090
3685 spin_lock_init(&dispc.irq_lock); 4091 spin_lock_init(&dispc.irq_lock);
3686 4092
3687#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 4093#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/omap2/dss/dispc.h
index 92d8a9be86fc..222363c6e623 100644
--- a/drivers/video/omap2/dss/dispc.h
+++ b/drivers/video/omap2/dss/dispc.h
@@ -36,6 +36,7 @@
36#define DISPC_CONTROL2 0x0238 36#define DISPC_CONTROL2 0x0238
37#define DISPC_CONFIG2 0x0620 37#define DISPC_CONFIG2 0x0620
38#define DISPC_DIVISOR 0x0804 38#define DISPC_DIVISOR 0x0804
39#define DISPC_GLOBAL_BUFFER 0x0800
39#define DISPC_CONTROL3 0x0848 40#define DISPC_CONTROL3 0x0848
40#define DISPC_CONFIG3 0x084C 41#define DISPC_CONFIG3 0x084C
41 42
@@ -355,6 +356,8 @@ static inline u16 DISPC_OVL_BASE(enum omap_plane plane)
355 return 0x014C; 356 return 0x014C;
356 case OMAP_DSS_VIDEO3: 357 case OMAP_DSS_VIDEO3:
357 return 0x0300; 358 return 0x0300;
359 case OMAP_DSS_WB:
360 return 0x0500;
358 default: 361 default:
359 BUG(); 362 BUG();
360 return 0; 363 return 0;
@@ -370,6 +373,7 @@ static inline u16 DISPC_BA0_OFFSET(enum omap_plane plane)
370 case OMAP_DSS_VIDEO2: 373 case OMAP_DSS_VIDEO2:
371 return 0x0000; 374 return 0x0000;
372 case OMAP_DSS_VIDEO3: 375 case OMAP_DSS_VIDEO3:
376 case OMAP_DSS_WB:
373 return 0x0008; 377 return 0x0008;
374 default: 378 default:
375 BUG(); 379 BUG();
@@ -385,6 +389,7 @@ static inline u16 DISPC_BA1_OFFSET(enum omap_plane plane)
385 case OMAP_DSS_VIDEO2: 389 case OMAP_DSS_VIDEO2:
386 return 0x0004; 390 return 0x0004;
387 case OMAP_DSS_VIDEO3: 391 case OMAP_DSS_VIDEO3:
392 case OMAP_DSS_WB:
388 return 0x000C; 393 return 0x000C;
389 default: 394 default:
390 BUG(); 395 BUG();
@@ -404,6 +409,8 @@ static inline u16 DISPC_BA0_UV_OFFSET(enum omap_plane plane)
404 return 0x04BC; 409 return 0x04BC;
405 case OMAP_DSS_VIDEO3: 410 case OMAP_DSS_VIDEO3:
406 return 0x0310; 411 return 0x0310;
412 case OMAP_DSS_WB:
413 return 0x0118;
407 default: 414 default:
408 BUG(); 415 BUG();
409 return 0; 416 return 0;
@@ -422,6 +429,8 @@ static inline u16 DISPC_BA1_UV_OFFSET(enum omap_plane plane)
422 return 0x04C0; 429 return 0x04C0;
423 case OMAP_DSS_VIDEO3: 430 case OMAP_DSS_VIDEO3:
424 return 0x0314; 431 return 0x0314;
432 case OMAP_DSS_WB:
433 return 0x011C;
425 default: 434 default:
426 BUG(); 435 BUG();
427 return 0; 436 return 0;
@@ -451,6 +460,7 @@ static inline u16 DISPC_SIZE_OFFSET(enum omap_plane plane)
451 case OMAP_DSS_VIDEO2: 460 case OMAP_DSS_VIDEO2:
452 return 0x000C; 461 return 0x000C;
453 case OMAP_DSS_VIDEO3: 462 case OMAP_DSS_VIDEO3:
463 case OMAP_DSS_WB:
454 return 0x00A8; 464 return 0x00A8;
455 default: 465 default:
456 BUG(); 466 BUG();
@@ -467,6 +477,7 @@ static inline u16 DISPC_ATTR_OFFSET(enum omap_plane plane)
467 case OMAP_DSS_VIDEO2: 477 case OMAP_DSS_VIDEO2:
468 return 0x0010; 478 return 0x0010;
469 case OMAP_DSS_VIDEO3: 479 case OMAP_DSS_VIDEO3:
480 case OMAP_DSS_WB:
470 return 0x0070; 481 return 0x0070;
471 default: 482 default:
472 BUG(); 483 BUG();
@@ -486,6 +497,8 @@ static inline u16 DISPC_ATTR2_OFFSET(enum omap_plane plane)
486 return 0x04DC; 497 return 0x04DC;
487 case OMAP_DSS_VIDEO3: 498 case OMAP_DSS_VIDEO3:
488 return 0x032C; 499 return 0x032C;
500 case OMAP_DSS_WB:
501 return 0x0310;
489 default: 502 default:
490 BUG(); 503 BUG();
491 return 0; 504 return 0;
@@ -501,6 +514,7 @@ static inline u16 DISPC_FIFO_THRESH_OFFSET(enum omap_plane plane)
501 case OMAP_DSS_VIDEO2: 514 case OMAP_DSS_VIDEO2:
502 return 0x0014; 515 return 0x0014;
503 case OMAP_DSS_VIDEO3: 516 case OMAP_DSS_VIDEO3:
517 case OMAP_DSS_WB:
504 return 0x008C; 518 return 0x008C;
505 default: 519 default:
506 BUG(); 520 BUG();
@@ -517,6 +531,7 @@ static inline u16 DISPC_FIFO_SIZE_STATUS_OFFSET(enum omap_plane plane)
517 case OMAP_DSS_VIDEO2: 531 case OMAP_DSS_VIDEO2:
518 return 0x0018; 532 return 0x0018;
519 case OMAP_DSS_VIDEO3: 533 case OMAP_DSS_VIDEO3:
534 case OMAP_DSS_WB:
520 return 0x0088; 535 return 0x0088;
521 default: 536 default:
522 BUG(); 537 BUG();
@@ -533,6 +548,7 @@ static inline u16 DISPC_ROW_INC_OFFSET(enum omap_plane plane)
533 case OMAP_DSS_VIDEO2: 548 case OMAP_DSS_VIDEO2:
534 return 0x001C; 549 return 0x001C;
535 case OMAP_DSS_VIDEO3: 550 case OMAP_DSS_VIDEO3:
551 case OMAP_DSS_WB:
536 return 0x00A4; 552 return 0x00A4;
537 default: 553 default:
538 BUG(); 554 BUG();
@@ -549,6 +565,7 @@ static inline u16 DISPC_PIX_INC_OFFSET(enum omap_plane plane)
549 case OMAP_DSS_VIDEO2: 565 case OMAP_DSS_VIDEO2:
550 return 0x0020; 566 return 0x0020;
551 case OMAP_DSS_VIDEO3: 567 case OMAP_DSS_VIDEO3:
568 case OMAP_DSS_WB:
552 return 0x0098; 569 return 0x0098;
553 default: 570 default:
554 BUG(); 571 BUG();
@@ -598,6 +615,7 @@ static inline u16 DISPC_FIR_OFFSET(enum omap_plane plane)
598 case OMAP_DSS_VIDEO2: 615 case OMAP_DSS_VIDEO2:
599 return 0x0024; 616 return 0x0024;
600 case OMAP_DSS_VIDEO3: 617 case OMAP_DSS_VIDEO3:
618 case OMAP_DSS_WB:
601 return 0x0090; 619 return 0x0090;
602 default: 620 default:
603 BUG(); 621 BUG();
@@ -617,6 +635,8 @@ static inline u16 DISPC_FIR2_OFFSET(enum omap_plane plane)
617 return 0x055C; 635 return 0x055C;
618 case OMAP_DSS_VIDEO3: 636 case OMAP_DSS_VIDEO3:
619 return 0x0424; 637 return 0x0424;
638 case OMAP_DSS_WB:
639 return 0x290;
620 default: 640 default:
621 BUG(); 641 BUG();
622 return 0; 642 return 0;
@@ -633,6 +653,7 @@ static inline u16 DISPC_PIC_SIZE_OFFSET(enum omap_plane plane)
633 case OMAP_DSS_VIDEO2: 653 case OMAP_DSS_VIDEO2:
634 return 0x0028; 654 return 0x0028;
635 case OMAP_DSS_VIDEO3: 655 case OMAP_DSS_VIDEO3:
656 case OMAP_DSS_WB:
636 return 0x0094; 657 return 0x0094;
637 default: 658 default:
638 BUG(); 659 BUG();
@@ -651,6 +672,7 @@ static inline u16 DISPC_ACCU0_OFFSET(enum omap_plane plane)
651 case OMAP_DSS_VIDEO2: 672 case OMAP_DSS_VIDEO2:
652 return 0x002C; 673 return 0x002C;
653 case OMAP_DSS_VIDEO3: 674 case OMAP_DSS_VIDEO3:
675 case OMAP_DSS_WB:
654 return 0x0000; 676 return 0x0000;
655 default: 677 default:
656 BUG(); 678 BUG();
@@ -670,6 +692,8 @@ static inline u16 DISPC_ACCU2_0_OFFSET(enum omap_plane plane)
670 return 0x0560; 692 return 0x0560;
671 case OMAP_DSS_VIDEO3: 693 case OMAP_DSS_VIDEO3:
672 return 0x0428; 694 return 0x0428;
695 case OMAP_DSS_WB:
696 return 0x0294;
673 default: 697 default:
674 BUG(); 698 BUG();
675 return 0; 699 return 0;
@@ -686,6 +710,7 @@ static inline u16 DISPC_ACCU1_OFFSET(enum omap_plane plane)
686 case OMAP_DSS_VIDEO2: 710 case OMAP_DSS_VIDEO2:
687 return 0x0030; 711 return 0x0030;
688 case OMAP_DSS_VIDEO3: 712 case OMAP_DSS_VIDEO3:
713 case OMAP_DSS_WB:
689 return 0x0004; 714 return 0x0004;
690 default: 715 default:
691 BUG(); 716 BUG();
@@ -705,6 +730,8 @@ static inline u16 DISPC_ACCU2_1_OFFSET(enum omap_plane plane)
705 return 0x0564; 730 return 0x0564;
706 case OMAP_DSS_VIDEO3: 731 case OMAP_DSS_VIDEO3:
707 return 0x042C; 732 return 0x042C;
733 case OMAP_DSS_WB:
734 return 0x0298;
708 default: 735 default:
709 BUG(); 736 BUG();
710 return 0; 737 return 0;
@@ -722,6 +749,7 @@ static inline u16 DISPC_FIR_COEF_H_OFFSET(enum omap_plane plane, u16 i)
722 case OMAP_DSS_VIDEO2: 749 case OMAP_DSS_VIDEO2:
723 return 0x0034 + i * 0x8; 750 return 0x0034 + i * 0x8;
724 case OMAP_DSS_VIDEO3: 751 case OMAP_DSS_VIDEO3:
752 case OMAP_DSS_WB:
725 return 0x0010 + i * 0x8; 753 return 0x0010 + i * 0x8;
726 default: 754 default:
727 BUG(); 755 BUG();
@@ -742,6 +770,8 @@ static inline u16 DISPC_FIR_COEF_H2_OFFSET(enum omap_plane plane, u16 i)
742 return 0x0568 + i * 0x8; 770 return 0x0568 + i * 0x8;
743 case OMAP_DSS_VIDEO3: 771 case OMAP_DSS_VIDEO3:
744 return 0x0430 + i * 0x8; 772 return 0x0430 + i * 0x8;
773 case OMAP_DSS_WB:
774 return 0x02A0 + i * 0x8;
745 default: 775 default:
746 BUG(); 776 BUG();
747 return 0; 777 return 0;
@@ -759,6 +789,7 @@ static inline u16 DISPC_FIR_COEF_HV_OFFSET(enum omap_plane plane, u16 i)
759 case OMAP_DSS_VIDEO2: 789 case OMAP_DSS_VIDEO2:
760 return 0x0038 + i * 0x8; 790 return 0x0038 + i * 0x8;
761 case OMAP_DSS_VIDEO3: 791 case OMAP_DSS_VIDEO3:
792 case OMAP_DSS_WB:
762 return 0x0014 + i * 0x8; 793 return 0x0014 + i * 0x8;
763 default: 794 default:
764 BUG(); 795 BUG();
@@ -779,6 +810,8 @@ static inline u16 DISPC_FIR_COEF_HV2_OFFSET(enum omap_plane plane, u16 i)
779 return 0x056C + i * 0x8; 810 return 0x056C + i * 0x8;
780 case OMAP_DSS_VIDEO3: 811 case OMAP_DSS_VIDEO3:
781 return 0x0434 + i * 0x8; 812 return 0x0434 + i * 0x8;
813 case OMAP_DSS_WB:
814 return 0x02A4 + i * 0x8;
782 default: 815 default:
783 BUG(); 816 BUG();
784 return 0; 817 return 0;
@@ -795,6 +828,7 @@ static inline u16 DISPC_CONV_COEF_OFFSET(enum omap_plane plane, u16 i)
795 case OMAP_DSS_VIDEO1: 828 case OMAP_DSS_VIDEO1:
796 case OMAP_DSS_VIDEO2: 829 case OMAP_DSS_VIDEO2:
797 case OMAP_DSS_VIDEO3: 830 case OMAP_DSS_VIDEO3:
831 case OMAP_DSS_WB:
798 return 0x0074 + i * 0x4; 832 return 0x0074 + i * 0x4;
799 default: 833 default:
800 BUG(); 834 BUG();
@@ -814,6 +848,7 @@ static inline u16 DISPC_FIR_COEF_V_OFFSET(enum omap_plane plane, u16 i)
814 case OMAP_DSS_VIDEO2: 848 case OMAP_DSS_VIDEO2:
815 return 0x00B4 + i * 0x4; 849 return 0x00B4 + i * 0x4;
816 case OMAP_DSS_VIDEO3: 850 case OMAP_DSS_VIDEO3:
851 case OMAP_DSS_WB:
817 return 0x0050 + i * 0x4; 852 return 0x0050 + i * 0x4;
818 default: 853 default:
819 BUG(); 854 BUG();
@@ -834,6 +869,8 @@ static inline u16 DISPC_FIR_COEF_V2_OFFSET(enum omap_plane plane, u16 i)
834 return 0x05A8 + i * 0x4; 869 return 0x05A8 + i * 0x4;
835 case OMAP_DSS_VIDEO3: 870 case OMAP_DSS_VIDEO3:
836 return 0x0470 + i * 0x4; 871 return 0x0470 + i * 0x4;
872 case OMAP_DSS_WB:
873 return 0x02E0 + i * 0x4;
837 default: 874 default:
838 BUG(); 875 BUG();
839 return 0; 876 return 0;
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index 5bd957e85505..ccf8550fafde 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -142,7 +142,11 @@ static ssize_t display_timings_store(struct device *dev,
142 if (r) 142 if (r)
143 return r; 143 return r;
144 144
145 dssdev->driver->disable(dssdev);
145 dssdev->driver->set_timings(dssdev, &t); 146 dssdev->driver->set_timings(dssdev, &t);
147 r = dssdev->driver->enable(dssdev);
148 if (r)
149 return r;
146 150
147 return size; 151 return size;
148} 152}
@@ -316,26 +320,117 @@ void omapdss_default_get_timings(struct omap_dss_device *dssdev,
316} 320}
317EXPORT_SYMBOL(omapdss_default_get_timings); 321EXPORT_SYMBOL(omapdss_default_get_timings);
318 322
319void dss_init_device(struct platform_device *pdev, 323/*
324 * Connect dssdev to a manager if the manager is free or if force is specified.
325 * Connect all overlays to that manager if they are free or if force is
326 * specified.
327 */
328static int dss_init_connections(struct omap_dss_device *dssdev, bool force)
329{
330 struct omap_dss_output *out;
331 struct omap_overlay_manager *mgr;
332 int i, r;
333
334 out = omapdss_get_output_from_dssdev(dssdev);
335
336 WARN_ON(dssdev->output);
337 WARN_ON(out->device);
338
339 r = omapdss_output_set_device(out, dssdev);
340 if (r) {
341 DSSERR("failed to connect output to new device\n");
342 return r;
343 }
344
345 mgr = omap_dss_get_overlay_manager(dssdev->channel);
346
347 if (mgr->output && !force)
348 return 0;
349
350 if (mgr->output)
351 mgr->unset_output(mgr);
352
353 r = mgr->set_output(mgr, out);
354 if (r) {
355 DSSERR("failed to connect manager to output of new device\n");
356
357 /* remove the output-device connection we just made */
358 omapdss_output_unset_device(out);
359 return r;
360 }
361
362 for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
363 struct omap_overlay *ovl = omap_dss_get_overlay(i);
364
365 if (!ovl->manager || force) {
366 if (ovl->manager)
367 ovl->unset_manager(ovl);
368
369 r = ovl->set_manager(ovl, mgr);
370 if (r) {
371 DSSERR("failed to set initial overlay\n");
372 return r;
373 }
374 }
375 }
376
377 return 0;
378}
379
380static void dss_uninit_connections(struct omap_dss_device *dssdev)
381{
382 if (dssdev->output) {
383 struct omap_overlay_manager *mgr = dssdev->output->manager;
384
385 if (mgr)
386 mgr->unset_output(mgr);
387
388 omapdss_output_unset_device(dssdev->output);
389 }
390}
391
392int dss_init_device(struct platform_device *pdev,
320 struct omap_dss_device *dssdev) 393 struct omap_dss_device *dssdev)
321{ 394{
322 struct device_attribute *attr; 395 struct device_attribute *attr;
323 int i; 396 int i, r;
324 int r; 397 const char *def_disp_name = dss_get_default_display_name();
398 bool force;
399
400 force = def_disp_name && strcmp(def_disp_name, dssdev->name) == 0;
401 dss_init_connections(dssdev, force);
325 402
326 /* create device sysfs files */ 403 /* create device sysfs files */
327 i = 0; 404 i = 0;
328 while ((attr = display_sysfs_attrs[i++]) != NULL) { 405 while ((attr = display_sysfs_attrs[i++]) != NULL) {
329 r = device_create_file(&dssdev->dev, attr); 406 r = device_create_file(&dssdev->dev, attr);
330 if (r) 407 if (r) {
408 for (i = i - 2; i >= 0; i--) {
409 attr = display_sysfs_attrs[i];
410 device_remove_file(&dssdev->dev, attr);
411 }
412
413 dss_uninit_connections(dssdev);
414
331 DSSERR("failed to create sysfs file\n"); 415 DSSERR("failed to create sysfs file\n");
416 return r;
417 }
332 } 418 }
333 419
334 /* create display? sysfs links */ 420 /* create display? sysfs links */
335 r = sysfs_create_link(&pdev->dev.kobj, &dssdev->dev.kobj, 421 r = sysfs_create_link(&pdev->dev.kobj, &dssdev->dev.kobj,
336 dev_name(&dssdev->dev)); 422 dev_name(&dssdev->dev));
337 if (r) 423 if (r) {
424 while ((attr = display_sysfs_attrs[i++]) != NULL)
425 device_remove_file(&dssdev->dev, attr);
426
427 dss_uninit_connections(dssdev);
428
338 DSSERR("failed to create sysfs display link\n"); 429 DSSERR("failed to create sysfs display link\n");
430 return r;
431 }
432
433 return 0;
339} 434}
340 435
341void dss_uninit_device(struct platform_device *pdev, 436void dss_uninit_device(struct platform_device *pdev,
@@ -349,8 +444,7 @@ void dss_uninit_device(struct platform_device *pdev,
349 while ((attr = display_sysfs_attrs[i++]) != NULL) 444 while ((attr = display_sysfs_attrs[i++]) != NULL)
350 device_remove_file(&dssdev->dev, attr); 445 device_remove_file(&dssdev->dev, attr);
351 446
352 if (dssdev->manager) 447 dss_uninit_connections(dssdev);
353 dssdev->manager->unset_device(dssdev->manager);
354} 448}
355 449
356static int dss_suspend_device(struct device *dev, void *data) 450static int dss_suspend_device(struct device *dev, void *data)
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 3266be23fc0d..56748cf8760e 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -29,17 +29,24 @@
29#include <linux/errno.h> 29#include <linux/errno.h>
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/regulator/consumer.h> 31#include <linux/regulator/consumer.h>
32#include <linux/string.h>
32 33
33#include <video/omapdss.h> 34#include <video/omapdss.h>
34#include <plat/cpu.h>
35 35
36#include "dss.h" 36#include "dss.h"
37#include "dss_features.h"
37 38
38static struct { 39static struct {
39 struct regulator *vdds_dsi_reg; 40 struct regulator *vdds_dsi_reg;
40 struct platform_device *dsidev; 41 struct platform_device *dsidev;
41 42
43 struct mutex lock;
44
45 struct omap_video_timings timings;
42 struct dss_lcd_mgr_config mgr_config; 46 struct dss_lcd_mgr_config mgr_config;
47 int data_lines;
48
49 struct omap_dss_output output;
43} dpi; 50} dpi;
44 51
45static struct platform_device *dpi_get_dsidev(enum omap_dss_clk_source clk) 52static struct platform_device *dpi_get_dsidev(enum omap_dss_clk_source clk)
@@ -121,7 +128,8 @@ static int dpi_set_dispc_clk(struct omap_dss_device *dssdev,
121 128
122static int dpi_set_mode(struct omap_dss_device *dssdev) 129static int dpi_set_mode(struct omap_dss_device *dssdev)
123{ 130{
124 struct omap_video_timings *t = &dssdev->panel.timings; 131 struct omap_video_timings *t = &dpi.timings;
132 struct omap_overlay_manager *mgr = dssdev->output->manager;
125 int lck_div = 0, pck_div = 0; 133 int lck_div = 0, pck_div = 0;
126 unsigned long fck = 0; 134 unsigned long fck = 0;
127 unsigned long pck; 135 unsigned long pck;
@@ -146,37 +154,44 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
146 t->pixel_clock = pck; 154 t->pixel_clock = pck;
147 } 155 }
148 156
149 dss_mgr_set_timings(dssdev->manager, t); 157 dss_mgr_set_timings(mgr, t);
150 158
151 return 0; 159 return 0;
152} 160}
153 161
154static void dpi_config_lcd_manager(struct omap_dss_device *dssdev) 162static void dpi_config_lcd_manager(struct omap_dss_device *dssdev)
155{ 163{
164 struct omap_overlay_manager *mgr = dssdev->output->manager;
165
156 dpi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS; 166 dpi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
157 167
158 dpi.mgr_config.stallmode = false; 168 dpi.mgr_config.stallmode = false;
159 dpi.mgr_config.fifohandcheck = false; 169 dpi.mgr_config.fifohandcheck = false;
160 170
161 dpi.mgr_config.video_port_width = dssdev->phy.dpi.data_lines; 171 dpi.mgr_config.video_port_width = dpi.data_lines;
162 172
163 dpi.mgr_config.lcden_sig_polarity = 0; 173 dpi.mgr_config.lcden_sig_polarity = 0;
164 174
165 dss_mgr_set_lcd_config(dssdev->manager, &dpi.mgr_config); 175 dss_mgr_set_lcd_config(mgr, &dpi.mgr_config);
166} 176}
167 177
168int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) 178int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
169{ 179{
180 struct omap_dss_output *out = dssdev->output;
170 int r; 181 int r;
171 182
172 if (cpu_is_omap34xx() && !dpi.vdds_dsi_reg) { 183 mutex_lock(&dpi.lock);
184
185 if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI) && !dpi.vdds_dsi_reg) {
173 DSSERR("no VDSS_DSI regulator\n"); 186 DSSERR("no VDSS_DSI regulator\n");
174 return -ENODEV; 187 r = -ENODEV;
188 goto err_no_reg;
175 } 189 }
176 190
177 if (dssdev->manager == NULL) { 191 if (out == NULL || out->manager == NULL) {
178 DSSERR("failed to enable display: no manager\n"); 192 DSSERR("failed to enable display: no output/manager\n");
179 return -ENODEV; 193 r = -ENODEV;
194 goto err_no_out_mgr;
180 } 195 }
181 196
182 r = omap_dss_start_device(dssdev); 197 r = omap_dss_start_device(dssdev);
@@ -185,7 +200,7 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
185 goto err_start_dev; 200 goto err_start_dev;
186 } 201 }
187 202
188 if (cpu_is_omap34xx()) { 203 if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI)) {
189 r = regulator_enable(dpi.vdds_dsi_reg); 204 r = regulator_enable(dpi.vdds_dsi_reg);
190 if (r) 205 if (r)
191 goto err_reg_enable; 206 goto err_reg_enable;
@@ -195,6 +210,10 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
195 if (r) 210 if (r)
196 goto err_get_dispc; 211 goto err_get_dispc;
197 212
213 r = dss_dpi_select_source(dssdev->channel);
214 if (r)
215 goto err_src_sel;
216
198 if (dpi_use_dsi_pll(dssdev)) { 217 if (dpi_use_dsi_pll(dssdev)) {
199 r = dsi_runtime_get(dpi.dsidev); 218 r = dsi_runtime_get(dpi.dsidev);
200 if (r) 219 if (r)
@@ -213,10 +232,12 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
213 232
214 mdelay(2); 233 mdelay(2);
215 234
216 r = dss_mgr_enable(dssdev->manager); 235 r = dss_mgr_enable(out->manager);
217 if (r) 236 if (r)
218 goto err_mgr_enable; 237 goto err_mgr_enable;
219 238
239 mutex_unlock(&dpi.lock);
240
220 return 0; 241 return 0;
221 242
222err_mgr_enable: 243err_mgr_enable:
@@ -227,20 +248,28 @@ err_dsi_pll_init:
227 if (dpi_use_dsi_pll(dssdev)) 248 if (dpi_use_dsi_pll(dssdev))
228 dsi_runtime_put(dpi.dsidev); 249 dsi_runtime_put(dpi.dsidev);
229err_get_dsi: 250err_get_dsi:
251err_src_sel:
230 dispc_runtime_put(); 252 dispc_runtime_put();
231err_get_dispc: 253err_get_dispc:
232 if (cpu_is_omap34xx()) 254 if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI))
233 regulator_disable(dpi.vdds_dsi_reg); 255 regulator_disable(dpi.vdds_dsi_reg);
234err_reg_enable: 256err_reg_enable:
235 omap_dss_stop_device(dssdev); 257 omap_dss_stop_device(dssdev);
236err_start_dev: 258err_start_dev:
259err_no_out_mgr:
260err_no_reg:
261 mutex_unlock(&dpi.lock);
237 return r; 262 return r;
238} 263}
239EXPORT_SYMBOL(omapdss_dpi_display_enable); 264EXPORT_SYMBOL(omapdss_dpi_display_enable);
240 265
241void omapdss_dpi_display_disable(struct omap_dss_device *dssdev) 266void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
242{ 267{
243 dss_mgr_disable(dssdev->manager); 268 struct omap_overlay_manager *mgr = dssdev->output->manager;
269
270 mutex_lock(&dpi.lock);
271
272 dss_mgr_disable(mgr);
244 273
245 if (dpi_use_dsi_pll(dssdev)) { 274 if (dpi_use_dsi_pll(dssdev)) {
246 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); 275 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
@@ -250,44 +279,39 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
250 279
251 dispc_runtime_put(); 280 dispc_runtime_put();
252 281
253 if (cpu_is_omap34xx()) 282 if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI))
254 regulator_disable(dpi.vdds_dsi_reg); 283 regulator_disable(dpi.vdds_dsi_reg);
255 284
256 omap_dss_stop_device(dssdev); 285 omap_dss_stop_device(dssdev);
286
287 mutex_unlock(&dpi.lock);
257} 288}
258EXPORT_SYMBOL(omapdss_dpi_display_disable); 289EXPORT_SYMBOL(omapdss_dpi_display_disable);
259 290
260void dpi_set_timings(struct omap_dss_device *dssdev, 291void omapdss_dpi_set_timings(struct omap_dss_device *dssdev,
261 struct omap_video_timings *timings) 292 struct omap_video_timings *timings)
262{ 293{
263 int r;
264
265 DSSDBG("dpi_set_timings\n"); 294 DSSDBG("dpi_set_timings\n");
266 dssdev->panel.timings = *timings;
267 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
268 r = dispc_runtime_get();
269 if (r)
270 return;
271 295
272 dpi_set_mode(dssdev); 296 mutex_lock(&dpi.lock);
273 297
274 dispc_runtime_put(); 298 dpi.timings = *timings;
275 } else { 299
276 dss_mgr_set_timings(dssdev->manager, timings); 300 mutex_unlock(&dpi.lock);
277 }
278} 301}
279EXPORT_SYMBOL(dpi_set_timings); 302EXPORT_SYMBOL(omapdss_dpi_set_timings);
280 303
281int dpi_check_timings(struct omap_dss_device *dssdev, 304int dpi_check_timings(struct omap_dss_device *dssdev,
282 struct omap_video_timings *timings) 305 struct omap_video_timings *timings)
283{ 306{
284 int r; 307 int r;
308 struct omap_overlay_manager *mgr = dssdev->output->manager;
285 int lck_div, pck_div; 309 int lck_div, pck_div;
286 unsigned long fck; 310 unsigned long fck;
287 unsigned long pck; 311 unsigned long pck;
288 struct dispc_clock_info dispc_cinfo; 312 struct dispc_clock_info dispc_cinfo;
289 313
290 if (dss_mgr_check_timings(dssdev->manager, timings)) 314 if (dss_mgr_check_timings(mgr, timings))
291 return -EINVAL; 315 return -EINVAL;
292 316
293 if (timings->pixel_clock == 0) 317 if (timings->pixel_clock == 0)
@@ -325,11 +349,22 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
325} 349}
326EXPORT_SYMBOL(dpi_check_timings); 350EXPORT_SYMBOL(dpi_check_timings);
327 351
352void omapdss_dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
353{
354 mutex_lock(&dpi.lock);
355
356 dpi.data_lines = data_lines;
357
358 mutex_unlock(&dpi.lock);
359}
360EXPORT_SYMBOL(omapdss_dpi_set_data_lines);
361
328static int __init dpi_init_display(struct omap_dss_device *dssdev) 362static int __init dpi_init_display(struct omap_dss_device *dssdev)
329{ 363{
330 DSSDBG("init_display\n"); 364 DSSDBG("init_display\n");
331 365
332 if (cpu_is_omap34xx() && dpi.vdds_dsi_reg == NULL) { 366 if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI) &&
367 dpi.vdds_dsi_reg == NULL) {
333 struct regulator *vdds_dsi; 368 struct regulator *vdds_dsi;
334 369
335 vdds_dsi = dss_get_vdds_dsi(); 370 vdds_dsi = dss_get_vdds_dsi();
@@ -351,10 +386,14 @@ static int __init dpi_init_display(struct omap_dss_device *dssdev)
351 return 0; 386 return 0;
352} 387}
353 388
354static void __init dpi_probe_pdata(struct platform_device *pdev) 389static struct omap_dss_device * __init dpi_find_dssdev(struct platform_device *pdev)
355{ 390{
356 struct omap_dss_board_info *pdata = pdev->dev.platform_data; 391 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
357 int i, r; 392 const char *def_disp_name = dss_get_default_display_name();
393 struct omap_dss_device *def_dssdev;
394 int i;
395
396 def_dssdev = NULL;
358 397
359 for (i = 0; i < pdata->num_devices; ++i) { 398 for (i = 0; i < pdata->num_devices; ++i) {
360 struct omap_dss_device *dssdev = pdata->devices[i]; 399 struct omap_dss_device *dssdev = pdata->devices[i];
@@ -362,21 +401,75 @@ static void __init dpi_probe_pdata(struct platform_device *pdev)
362 if (dssdev->type != OMAP_DISPLAY_TYPE_DPI) 401 if (dssdev->type != OMAP_DISPLAY_TYPE_DPI)
363 continue; 402 continue;
364 403
365 r = dpi_init_display(dssdev); 404 if (def_dssdev == NULL)
366 if (r) { 405 def_dssdev = dssdev;
367 DSSERR("device %s init failed: %d\n", dssdev->name, r); 406
368 continue; 407 if (def_disp_name != NULL &&
408 strcmp(dssdev->name, def_disp_name) == 0) {
409 def_dssdev = dssdev;
410 break;
369 } 411 }
412 }
370 413
371 r = omap_dss_register_device(dssdev, &pdev->dev, i); 414 return def_dssdev;
372 if (r) 415}
373 DSSERR("device %s register failed: %d\n", 416
374 dssdev->name, r); 417static void __init dpi_probe_pdata(struct platform_device *dpidev)
418{
419 struct omap_dss_device *plat_dssdev;
420 struct omap_dss_device *dssdev;
421 int r;
422
423 plat_dssdev = dpi_find_dssdev(dpidev);
424
425 if (!plat_dssdev)
426 return;
427
428 dssdev = dss_alloc_and_init_device(&dpidev->dev);
429 if (!dssdev)
430 return;
431
432 dss_copy_device_pdata(dssdev, plat_dssdev);
433
434 r = dpi_init_display(dssdev);
435 if (r) {
436 DSSERR("device %s init failed: %d\n", dssdev->name, r);
437 dss_put_device(dssdev);
438 return;
439 }
440
441 r = dss_add_device(dssdev);
442 if (r) {
443 DSSERR("device %s register failed: %d\n", dssdev->name, r);
444 dss_put_device(dssdev);
445 return;
375 } 446 }
376} 447}
377 448
449static void __init dpi_init_output(struct platform_device *pdev)
450{
451 struct omap_dss_output *out = &dpi.output;
452
453 out->pdev = pdev;
454 out->id = OMAP_DSS_OUTPUT_DPI;
455 out->type = OMAP_DISPLAY_TYPE_DPI;
456
457 dss_register_output(out);
458}
459
460static void __exit dpi_uninit_output(struct platform_device *pdev)
461{
462 struct omap_dss_output *out = &dpi.output;
463
464 dss_unregister_output(out);
465}
466
378static int __init omap_dpi_probe(struct platform_device *pdev) 467static int __init omap_dpi_probe(struct platform_device *pdev)
379{ 468{
469 mutex_init(&dpi.lock);
470
471 dpi_init_output(pdev);
472
380 dpi_probe_pdata(pdev); 473 dpi_probe_pdata(pdev);
381 474
382 return 0; 475 return 0;
@@ -384,7 +477,9 @@ static int __init omap_dpi_probe(struct platform_device *pdev)
384 477
385static int __exit omap_dpi_remove(struct platform_device *pdev) 478static int __exit omap_dpi_remove(struct platform_device *pdev)
386{ 479{
387 omap_dss_unregister_child_devices(&pdev->dev); 480 dss_unregister_child_devices(&pdev->dev);
481
482 dpi_uninit_output(pdev);
388 483
389 return 0; 484 return 0;
390} 485}
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 05ee04667af1..d64ac3842884 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -41,7 +41,6 @@
41 41
42#include <video/omapdss.h> 42#include <video/omapdss.h>
43#include <video/mipi_display.h> 43#include <video/mipi_display.h>
44#include <plat/clock.h>
45 44
46#include "dss.h" 45#include "dss.h"
47#include "dss_features.h" 46#include "dss_features.h"
@@ -333,6 +332,12 @@ struct dsi_data {
333 unsigned scp_clk_refcount; 332 unsigned scp_clk_refcount;
334 333
335 struct dss_lcd_mgr_config mgr_config; 334 struct dss_lcd_mgr_config mgr_config;
335 struct omap_video_timings timings;
336 enum omap_dss_dsi_pixel_format pix_fmt;
337 enum omap_dss_dsi_mode mode;
338 struct omap_dss_dsi_videomode_timings vm_timings;
339
340 struct omap_dss_output output;
336}; 341};
337 342
338struct dsi_packet_sent_handler_data { 343struct dsi_packet_sent_handler_data {
@@ -340,8 +345,6 @@ struct dsi_packet_sent_handler_data {
340 struct completion *completion; 345 struct completion *completion;
341}; 346};
342 347
343static struct platform_device *dsi_pdev_map[MAX_NUM_DSI];
344
345#ifdef DEBUG 348#ifdef DEBUG
346static bool dsi_perf; 349static bool dsi_perf;
347module_param(dsi_perf, bool, 0644); 350module_param(dsi_perf, bool, 0644);
@@ -354,12 +357,19 @@ static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dside
354 357
355static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev) 358static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev)
356{ 359{
357 return dsi_pdev_map[dssdev->phy.dsi.module]; 360 return dssdev->output->pdev;
358} 361}
359 362
360struct platform_device *dsi_get_dsidev_from_id(int module) 363struct platform_device *dsi_get_dsidev_from_id(int module)
361{ 364{
362 return dsi_pdev_map[module]; 365 struct omap_dss_output *out;
366 enum omap_dss_output_id id;
367
368 id = module == 0 ? OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
369
370 out = omap_dss_get_output(id);
371
372 return out->pdev;
363} 373}
364 374
365static inline void dsi_write_reg(struct platform_device *dsidev, 375static inline void dsi_write_reg(struct platform_device *dsidev,
@@ -1450,6 +1460,148 @@ found:
1450 return 0; 1460 return 0;
1451} 1461}
1452 1462
1463static int dsi_pll_calc_ddrfreq(struct platform_device *dsidev,
1464 unsigned long req_clkin4ddr, struct dsi_clock_info *cinfo)
1465{
1466 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1467 struct dsi_clock_info cur, best;
1468
1469 DSSDBG("dsi_pll_calc_ddrfreq\n");
1470
1471 memset(&best, 0, sizeof(best));
1472 memset(&cur, 0, sizeof(cur));
1473
1474 cur.clkin = clk_get_rate(dsi->sys_clk);
1475
1476 for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
1477 cur.fint = cur.clkin / cur.regn;
1478
1479 if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
1480 continue;
1481
1482 /* DSIPHY(MHz) = (2 * regm / regn) * clkin */
1483 for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
1484 unsigned long a, b;
1485
1486 a = 2 * cur.regm * (cur.clkin/1000);
1487 b = cur.regn;
1488 cur.clkin4ddr = a / b * 1000;
1489
1490 if (cur.clkin4ddr > 1800 * 1000 * 1000)
1491 break;
1492
1493 if (abs(cur.clkin4ddr - req_clkin4ddr) <
1494 abs(best.clkin4ddr - req_clkin4ddr)) {
1495 best = cur;
1496 DSSDBG("best %ld\n", best.clkin4ddr);
1497 }
1498
1499 if (cur.clkin4ddr == req_clkin4ddr)
1500 goto found;
1501 }
1502 }
1503found:
1504 if (cinfo)
1505 *cinfo = best;
1506
1507 return 0;
1508}
1509
1510static void dsi_pll_calc_dsi_fck(struct platform_device *dsidev,
1511 struct dsi_clock_info *cinfo)
1512{
1513 unsigned long max_dsi_fck;
1514
1515 max_dsi_fck = dss_feat_get_param_max(FEAT_PARAM_DSI_FCK);
1516
1517 cinfo->regm_dsi = DIV_ROUND_UP(cinfo->clkin4ddr, max_dsi_fck);
1518 cinfo->dsi_pll_hsdiv_dsi_clk = cinfo->clkin4ddr / cinfo->regm_dsi;
1519}
1520
1521static int dsi_pll_calc_dispc_fck(struct platform_device *dsidev,
1522 unsigned long req_pck, struct dsi_clock_info *cinfo,
1523 struct dispc_clock_info *dispc_cinfo)
1524{
1525 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1526 unsigned regm_dispc, best_regm_dispc;
1527 unsigned long dispc_clk, best_dispc_clk;
1528 int min_fck_per_pck;
1529 unsigned long max_dss_fck;
1530 struct dispc_clock_info best_dispc;
1531 bool match;
1532
1533 max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
1534
1535 min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
1536
1537 if (min_fck_per_pck &&
1538 req_pck * min_fck_per_pck > max_dss_fck) {
1539 DSSERR("Requested pixel clock not possible with the current "
1540 "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
1541 "the constraint off.\n");
1542 min_fck_per_pck = 0;
1543 }
1544
1545retry:
1546 best_regm_dispc = 0;
1547 best_dispc_clk = 0;
1548 memset(&best_dispc, 0, sizeof(best_dispc));
1549 match = false;
1550
1551 for (regm_dispc = 1; regm_dispc < dsi->regm_dispc_max; ++regm_dispc) {
1552 struct dispc_clock_info cur_dispc;
1553
1554 dispc_clk = cinfo->clkin4ddr / regm_dispc;
1555
1556 /* this will narrow down the search a bit,
1557 * but still give pixclocks below what was
1558 * requested */
1559 if (dispc_clk < req_pck)
1560 break;
1561
1562 if (dispc_clk > max_dss_fck)
1563 continue;
1564
1565 if (min_fck_per_pck && dispc_clk < req_pck * min_fck_per_pck)
1566 continue;
1567
1568 match = true;
1569
1570 dispc_find_clk_divs(req_pck, dispc_clk, &cur_dispc);
1571
1572 if (abs(cur_dispc.pck - req_pck) <
1573 abs(best_dispc.pck - req_pck)) {
1574 best_regm_dispc = regm_dispc;
1575 best_dispc_clk = dispc_clk;
1576 best_dispc = cur_dispc;
1577
1578 if (cur_dispc.pck == req_pck)
1579 goto found;
1580 }
1581 }
1582
1583 if (!match) {
1584 if (min_fck_per_pck) {
1585 DSSERR("Could not find suitable clock settings.\n"
1586 "Turning FCK/PCK constraint off and"
1587 "trying again.\n");
1588 min_fck_per_pck = 0;
1589 goto retry;
1590 }
1591
1592 DSSERR("Could not find suitable clock settings.\n");
1593
1594 return -EINVAL;
1595 }
1596found:
1597 cinfo->regm_dispc = best_regm_dispc;
1598 cinfo->dsi_pll_hsdiv_dispc_clk = best_dispc_clk;
1599
1600 *dispc_cinfo = best_dispc;
1601
1602 return 0;
1603}
1604
1453int dsi_pll_set_clock_div(struct platform_device *dsidev, 1605int dsi_pll_set_clock_div(struct platform_device *dsidev,
1454 struct dsi_clock_info *cinfo) 1606 struct dsi_clock_info *cinfo)
1455{ 1607{
@@ -1526,21 +1678,27 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
1526 1678
1527 BUG_ON(cinfo->fint < dsi->fint_min || cinfo->fint > dsi->fint_max); 1679 BUG_ON(cinfo->fint < dsi->fint_min || cinfo->fint > dsi->fint_max);
1528 1680
1681 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1682
1529 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) { 1683 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) {
1530 f = cinfo->fint < 1000000 ? 0x3 : 1684 f = cinfo->fint < 1000000 ? 0x3 :
1531 cinfo->fint < 1250000 ? 0x4 : 1685 cinfo->fint < 1250000 ? 0x4 :
1532 cinfo->fint < 1500000 ? 0x5 : 1686 cinfo->fint < 1500000 ? 0x5 :
1533 cinfo->fint < 1750000 ? 0x6 : 1687 cinfo->fint < 1750000 ? 0x6 :
1534 0x7; 1688 0x7;
1535 }
1536 1689
1537 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1538
1539 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL))
1540 l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */ 1690 l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
1691 } else if (dss_has_feature(FEAT_DSI_PLL_SELFREQDCO)) {
1692 f = cinfo->clkin4ddr < 1000000000 ? 0x2 : 0x4;
1693
1694 l = FLD_MOD(l, f, 4, 1); /* PLL_SELFREQDCO */
1695 }
1696
1541 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */ 1697 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1542 l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */ 1698 l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
1543 l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */ 1699 l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
1700 if (dss_has_feature(FEAT_DSI_PLL_REFSEL))
1701 l = FLD_MOD(l, 3, 22, 21); /* REF_SYSCLK = sysclk */
1544 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l); 1702 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1545 1703
1546 REG_FLD_MOD(dsidev, DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */ 1704 REG_FLD_MOD(dsidev, DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */
@@ -2004,15 +2162,16 @@ static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
2004 return 1194 * 3; /* 1194x24 bits */ 2162 return 1194 * 3; /* 1194x24 bits */
2005 case 6: 2163 case 6:
2006 return 1365 * 3; /* 1365x24 bits */ 2164 return 1365 * 3; /* 1365x24 bits */
2165 case 7:
2166 return 1920 * 3; /* 1920x24 bits */
2007 default: 2167 default:
2008 BUG(); 2168 BUG();
2009 return 0; 2169 return 0;
2010 } 2170 }
2011} 2171}
2012 2172
2013static int dsi_set_lane_config(struct omap_dss_device *dssdev) 2173static int dsi_set_lane_config(struct platform_device *dsidev)
2014{ 2174{
2015 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2016 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 2175 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2017 static const u8 offsets[] = { 0, 4, 8, 12, 16 }; 2176 static const u8 offsets[] = { 0, 4, 8, 12, 16 };
2018 static const enum dsi_lane_function functions[] = { 2177 static const enum dsi_lane_function functions[] = {
@@ -2136,9 +2295,16 @@ static void dsi_cio_timings(struct platform_device *dsidev)
2136 dsi_write_reg(dsidev, DSI_DSIPHY_CFG0, r); 2295 dsi_write_reg(dsidev, DSI_DSIPHY_CFG0, r);
2137 2296
2138 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1); 2297 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
2139 r = FLD_MOD(r, tlpx_half, 22, 16); 2298 r = FLD_MOD(r, tlpx_half, 20, 16);
2140 r = FLD_MOD(r, tclk_trail, 15, 8); 2299 r = FLD_MOD(r, tclk_trail, 15, 8);
2141 r = FLD_MOD(r, tclk_zero, 7, 0); 2300 r = FLD_MOD(r, tclk_zero, 7, 0);
2301
2302 if (dss_has_feature(FEAT_DSI_PHY_DCC)) {
2303 r = FLD_MOD(r, 0, 21, 21); /* DCCEN = disable */
2304 r = FLD_MOD(r, 1, 22, 22); /* CLKINP_DIVBY2EN = enable */
2305 r = FLD_MOD(r, 1, 23, 23); /* CLKINP_SEL = enable */
2306 }
2307
2142 dsi_write_reg(dsidev, DSI_DSIPHY_CFG1, r); 2308 dsi_write_reg(dsidev, DSI_DSIPHY_CFG1, r);
2143 2309
2144 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2); 2310 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
@@ -2147,10 +2313,9 @@ static void dsi_cio_timings(struct platform_device *dsidev)
2147} 2313}
2148 2314
2149/* lane masks have lane 0 at lsb. mask_p for positive lines, n for negative */ 2315/* lane masks have lane 0 at lsb. mask_p for positive lines, n for negative */
2150static void dsi_cio_enable_lane_override(struct omap_dss_device *dssdev, 2316static void dsi_cio_enable_lane_override(struct platform_device *dsidev,
2151 unsigned mask_p, unsigned mask_n) 2317 unsigned mask_p, unsigned mask_n)
2152{ 2318{
2153 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2154 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 2319 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2155 int i; 2320 int i;
2156 u32 l; 2321 u32 l;
@@ -2197,9 +2362,8 @@ static void dsi_cio_disable_lane_override(struct platform_device *dsidev)
2197 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 22, 17); 2362 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 22, 17);
2198} 2363}
2199 2364
2200static int dsi_cio_wait_tx_clk_esc_reset(struct omap_dss_device *dssdev) 2365static int dsi_cio_wait_tx_clk_esc_reset(struct platform_device *dsidev)
2201{ 2366{
2202 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2203 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 2367 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2204 int t, i; 2368 int t, i;
2205 bool in_use[DSI_MAX_NR_LANES]; 2369 bool in_use[DSI_MAX_NR_LANES];
@@ -2247,9 +2411,8 @@ static int dsi_cio_wait_tx_clk_esc_reset(struct omap_dss_device *dssdev)
2247} 2411}
2248 2412
2249/* return bitmask of enabled lanes, lane0 being the lsb */ 2413/* return bitmask of enabled lanes, lane0 being the lsb */
2250static unsigned dsi_get_lane_mask(struct omap_dss_device *dssdev) 2414static unsigned dsi_get_lane_mask(struct platform_device *dsidev)
2251{ 2415{
2252 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2253 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 2416 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2254 unsigned mask = 0; 2417 unsigned mask = 0;
2255 int i; 2418 int i;
@@ -2262,16 +2425,15 @@ static unsigned dsi_get_lane_mask(struct omap_dss_device *dssdev)
2262 return mask; 2425 return mask;
2263} 2426}
2264 2427
2265static int dsi_cio_init(struct omap_dss_device *dssdev) 2428static int dsi_cio_init(struct platform_device *dsidev)
2266{ 2429{
2267 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2268 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 2430 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2269 int r; 2431 int r;
2270 u32 l; 2432 u32 l;
2271 2433
2272 DSSDBGF(); 2434 DSSDBGF();
2273 2435
2274 r = dss_dsi_enable_pads(dsi->module_id, dsi_get_lane_mask(dssdev)); 2436 r = dss_dsi_enable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
2275 if (r) 2437 if (r)
2276 return r; 2438 return r;
2277 2439
@@ -2288,7 +2450,7 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
2288 goto err_scp_clk_dom; 2450 goto err_scp_clk_dom;
2289 } 2451 }
2290 2452
2291 r = dsi_set_lane_config(dssdev); 2453 r = dsi_set_lane_config(dsidev);
2292 if (r) 2454 if (r)
2293 goto err_scp_clk_dom; 2455 goto err_scp_clk_dom;
2294 2456
@@ -2323,7 +2485,7 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
2323 mask_p |= 1 << i; 2485 mask_p |= 1 << i;
2324 } 2486 }
2325 2487
2326 dsi_cio_enable_lane_override(dssdev, mask_p, 0); 2488 dsi_cio_enable_lane_override(dsidev, mask_p, 0);
2327 } 2489 }
2328 2490
2329 r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON); 2491 r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON);
@@ -2340,7 +2502,7 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
2340 dsi_if_enable(dsidev, false); 2502 dsi_if_enable(dsidev, false);
2341 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */ 2503 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
2342 2504
2343 r = dsi_cio_wait_tx_clk_esc_reset(dssdev); 2505 r = dsi_cio_wait_tx_clk_esc_reset(dsidev);
2344 if (r) 2506 if (r)
2345 goto err_tx_clk_esc_rst; 2507 goto err_tx_clk_esc_rst;
2346 2508
@@ -2360,10 +2522,10 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
2360 2522
2361 dsi_cio_timings(dsidev); 2523 dsi_cio_timings(dsidev);
2362 2524
2363 if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) { 2525 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
2364 /* DDR_CLK_ALWAYS_ON */ 2526 /* DDR_CLK_ALWAYS_ON */
2365 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 2527 REG_FLD_MOD(dsidev, DSI_CLK_CTRL,
2366 dssdev->panel.dsi_vm_data.ddr_clk_always_on, 13, 13); 2528 dsi->vm_timings.ddr_clk_always_on, 13, 13);
2367 } 2529 }
2368 2530
2369 dsi->ulps_enabled = false; 2531 dsi->ulps_enabled = false;
@@ -2381,13 +2543,12 @@ err_cio_pwr:
2381 dsi_cio_disable_lane_override(dsidev); 2543 dsi_cio_disable_lane_override(dsidev);
2382err_scp_clk_dom: 2544err_scp_clk_dom:
2383 dsi_disable_scp_clk(dsidev); 2545 dsi_disable_scp_clk(dsidev);
2384 dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dssdev)); 2546 dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
2385 return r; 2547 return r;
2386} 2548}
2387 2549
2388static void dsi_cio_uninit(struct omap_dss_device *dssdev) 2550static void dsi_cio_uninit(struct platform_device *dsidev)
2389{ 2551{
2390 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2391 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 2552 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2392 2553
2393 /* DDR_CLK_ALWAYS_ON */ 2554 /* DDR_CLK_ALWAYS_ON */
@@ -2395,7 +2556,7 @@ static void dsi_cio_uninit(struct omap_dss_device *dssdev)
2395 2556
2396 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF); 2557 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2397 dsi_disable_scp_clk(dsidev); 2558 dsi_disable_scp_clk(dsidev);
2398 dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dssdev)); 2559 dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
2399} 2560}
2400 2561
2401static void dsi_config_tx_fifo(struct platform_device *dsidev, 2562static void dsi_config_tx_fifo(struct platform_device *dsidev,
@@ -2685,6 +2846,7 @@ void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
2685 bool enable) 2846 bool enable)
2686{ 2847{
2687 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 2848 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2849 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2688 2850
2689 DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable); 2851 DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
2690 2852
@@ -2701,7 +2863,7 @@ void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
2701 dsi_force_tx_stop_mode_io(dsidev); 2863 dsi_force_tx_stop_mode_io(dsidev);
2702 2864
2703 /* start the DDR clock by sending a NULL packet */ 2865 /* start the DDR clock by sending a NULL packet */
2704 if (dssdev->panel.dsi_vm_data.ddr_clk_always_on && enable) 2866 if (dsi->vm_timings.ddr_clk_always_on && enable)
2705 dsi_vc_send_null(dssdev, channel); 2867 dsi_vc_send_null(dssdev, channel);
2706} 2868}
2707EXPORT_SYMBOL(omapdss_dsi_vc_enable_hs); 2869EXPORT_SYMBOL(omapdss_dsi_vc_enable_hs);
@@ -2987,10 +3149,9 @@ int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel)
2987} 3149}
2988EXPORT_SYMBOL(dsi_vc_send_null); 3150EXPORT_SYMBOL(dsi_vc_send_null);
2989 3151
2990static int dsi_vc_write_nosync_common(struct omap_dss_device *dssdev, 3152static int dsi_vc_write_nosync_common(struct platform_device *dsidev,
2991 int channel, u8 *data, int len, enum dss_dsi_content_type type) 3153 int channel, u8 *data, int len, enum dss_dsi_content_type type)
2992{ 3154{
2993 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2994 int r; 3155 int r;
2995 3156
2996 if (len == 0) { 3157 if (len == 0) {
@@ -3021,7 +3182,9 @@ static int dsi_vc_write_nosync_common(struct omap_dss_device *dssdev,
3021int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel, 3182int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
3022 u8 *data, int len) 3183 u8 *data, int len)
3023{ 3184{
3024 return dsi_vc_write_nosync_common(dssdev, channel, data, len, 3185 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3186
3187 return dsi_vc_write_nosync_common(dsidev, channel, data, len,
3025 DSS_DSI_CONTENT_DCS); 3188 DSS_DSI_CONTENT_DCS);
3026} 3189}
3027EXPORT_SYMBOL(dsi_vc_dcs_write_nosync); 3190EXPORT_SYMBOL(dsi_vc_dcs_write_nosync);
@@ -3029,7 +3192,9 @@ EXPORT_SYMBOL(dsi_vc_dcs_write_nosync);
3029int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel, 3192int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel,
3030 u8 *data, int len) 3193 u8 *data, int len)
3031{ 3194{
3032 return dsi_vc_write_nosync_common(dssdev, channel, data, len, 3195 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3196
3197 return dsi_vc_write_nosync_common(dsidev, channel, data, len,
3033 DSS_DSI_CONTENT_GENERIC); 3198 DSS_DSI_CONTENT_GENERIC);
3034} 3199}
3035EXPORT_SYMBOL(dsi_vc_generic_write_nosync); 3200EXPORT_SYMBOL(dsi_vc_generic_write_nosync);
@@ -3040,7 +3205,7 @@ static int dsi_vc_write_common(struct omap_dss_device *dssdev, int channel,
3040 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 3205 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3041 int r; 3206 int r;
3042 3207
3043 r = dsi_vc_write_nosync_common(dssdev, channel, data, len, type); 3208 r = dsi_vc_write_nosync_common(dsidev, channel, data, len, type);
3044 if (r) 3209 if (r)
3045 goto err; 3210 goto err;
3046 3211
@@ -3118,10 +3283,9 @@ int dsi_vc_generic_write_2(struct omap_dss_device *dssdev, int channel,
3118} 3283}
3119EXPORT_SYMBOL(dsi_vc_generic_write_2); 3284EXPORT_SYMBOL(dsi_vc_generic_write_2);
3120 3285
3121static int dsi_vc_dcs_send_read_request(struct omap_dss_device *dssdev, 3286static int dsi_vc_dcs_send_read_request(struct platform_device *dsidev,
3122 int channel, u8 dcs_cmd) 3287 int channel, u8 dcs_cmd)
3123{ 3288{
3124 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3125 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 3289 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3126 int r; 3290 int r;
3127 3291
@@ -3139,10 +3303,9 @@ static int dsi_vc_dcs_send_read_request(struct omap_dss_device *dssdev,
3139 return 0; 3303 return 0;
3140} 3304}
3141 3305
3142static int dsi_vc_generic_send_read_request(struct omap_dss_device *dssdev, 3306static int dsi_vc_generic_send_read_request(struct platform_device *dsidev,
3143 int channel, u8 *reqdata, int reqlen) 3307 int channel, u8 *reqdata, int reqlen)
3144{ 3308{
3145 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3146 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 3309 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3147 u16 data; 3310 u16 data;
3148 u8 data_type; 3311 u8 data_type;
@@ -3291,7 +3454,7 @@ int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3291 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 3454 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3292 int r; 3455 int r;
3293 3456
3294 r = dsi_vc_dcs_send_read_request(dssdev, channel, dcs_cmd); 3457 r = dsi_vc_dcs_send_read_request(dsidev, channel, dcs_cmd);
3295 if (r) 3458 if (r)
3296 goto err; 3459 goto err;
3297 3460
@@ -3322,7 +3485,7 @@ static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
3322 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 3485 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3323 int r; 3486 int r;
3324 3487
3325 r = dsi_vc_generic_send_read_request(dssdev, channel, reqdata, reqlen); 3488 r = dsi_vc_generic_send_read_request(dsidev, channel, reqdata, reqlen);
3326 if (r) 3489 if (r)
3327 return r; 3490 return r;
3328 3491
@@ -3604,15 +3767,15 @@ static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
3604 (total_ticks * 1000) / (fck / 1000 / 1000)); 3767 (total_ticks * 1000) / (fck / 1000 / 1000));
3605} 3768}
3606 3769
3607static void dsi_config_vp_num_line_buffers(struct omap_dss_device *dssdev) 3770static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
3608{ 3771{
3609 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 3772 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3610 int num_line_buffers; 3773 int num_line_buffers;
3611 3774
3612 if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) { 3775 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
3613 int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt); 3776 int bpp = dsi_get_pixel_size(dsi->pix_fmt);
3614 unsigned line_buf_size = dsi_get_line_buf_size(dsidev); 3777 unsigned line_buf_size = dsi_get_line_buf_size(dsidev);
3615 struct omap_video_timings *timings = &dssdev->panel.timings; 3778 struct omap_video_timings *timings = &dsi->timings;
3616 /* 3779 /*
3617 * Don't use line buffers if width is greater than the video 3780 * Don't use line buffers if width is greater than the video
3618 * port's line buffer size 3781 * port's line buffer size
@@ -3630,11 +3793,11 @@ static void dsi_config_vp_num_line_buffers(struct omap_dss_device *dssdev)
3630 REG_FLD_MOD(dsidev, DSI_CTRL, num_line_buffers, 13, 12); 3793 REG_FLD_MOD(dsidev, DSI_CTRL, num_line_buffers, 13, 12);
3631} 3794}
3632 3795
3633static void dsi_config_vp_sync_events(struct omap_dss_device *dssdev) 3796static void dsi_config_vp_sync_events(struct platform_device *dsidev)
3634{ 3797{
3635 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 3798 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3636 bool vsync_end = dssdev->panel.dsi_vm_data.vp_vsync_end; 3799 bool vsync_end = dsi->vm_timings.vp_vsync_end;
3637 bool hsync_end = dssdev->panel.dsi_vm_data.vp_hsync_end; 3800 bool hsync_end = dsi->vm_timings.vp_hsync_end;
3638 u32 r; 3801 u32 r;
3639 3802
3640 r = dsi_read_reg(dsidev, DSI_CTRL); 3803 r = dsi_read_reg(dsidev, DSI_CTRL);
@@ -3648,13 +3811,13 @@ static void dsi_config_vp_sync_events(struct omap_dss_device *dssdev)
3648 dsi_write_reg(dsidev, DSI_CTRL, r); 3811 dsi_write_reg(dsidev, DSI_CTRL, r);
3649} 3812}
3650 3813
3651static void dsi_config_blanking_modes(struct omap_dss_device *dssdev) 3814static void dsi_config_blanking_modes(struct platform_device *dsidev)
3652{ 3815{
3653 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 3816 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3654 int blanking_mode = dssdev->panel.dsi_vm_data.blanking_mode; 3817 int blanking_mode = dsi->vm_timings.blanking_mode;
3655 int hfp_blanking_mode = dssdev->panel.dsi_vm_data.hfp_blanking_mode; 3818 int hfp_blanking_mode = dsi->vm_timings.hfp_blanking_mode;
3656 int hbp_blanking_mode = dssdev->panel.dsi_vm_data.hbp_blanking_mode; 3819 int hbp_blanking_mode = dsi->vm_timings.hbp_blanking_mode;
3657 int hsa_blanking_mode = dssdev->panel.dsi_vm_data.hsa_blanking_mode; 3820 int hsa_blanking_mode = dsi->vm_timings.hsa_blanking_mode;
3658 u32 r; 3821 u32 r;
3659 3822
3660 /* 3823 /*
@@ -3741,8 +3904,8 @@ static void dsi_config_cmd_mode_interleaving(struct omap_dss_device *dssdev)
3741 int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat; 3904 int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat;
3742 int tclk_trail, ths_exit, exiths_clk; 3905 int tclk_trail, ths_exit, exiths_clk;
3743 bool ddr_alwon; 3906 bool ddr_alwon;
3744 struct omap_video_timings *timings = &dssdev->panel.timings; 3907 struct omap_video_timings *timings = &dsi->timings;
3745 int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt); 3908 int bpp = dsi_get_pixel_size(dsi->pix_fmt);
3746 int ndl = dsi->num_lanes_used - 1; 3909 int ndl = dsi->num_lanes_used - 1;
3747 int dsi_fclk_hsdiv = dssdev->clocks.dsi.regm_dsi + 1; 3910 int dsi_fclk_hsdiv = dssdev->clocks.dsi.regm_dsi + 1;
3748 int hsa_interleave_hs = 0, hsa_interleave_lp = 0; 3911 int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
@@ -3852,6 +4015,7 @@ static void dsi_config_cmd_mode_interleaving(struct omap_dss_device *dssdev)
3852static int dsi_proto_config(struct omap_dss_device *dssdev) 4015static int dsi_proto_config(struct omap_dss_device *dssdev)
3853{ 4016{
3854 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 4017 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4018 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3855 u32 r; 4019 u32 r;
3856 int buswidth = 0; 4020 int buswidth = 0;
3857 4021
@@ -3871,7 +4035,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
3871 dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true); 4035 dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true);
3872 dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true); 4036 dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true);
3873 4037
3874 switch (dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt)) { 4038 switch (dsi_get_pixel_size(dsi->pix_fmt)) {
3875 case 16: 4039 case 16:
3876 buswidth = 0; 4040 buswidth = 0;
3877 break; 4041 break;
@@ -3903,11 +4067,11 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
3903 4067
3904 dsi_write_reg(dsidev, DSI_CTRL, r); 4068 dsi_write_reg(dsidev, DSI_CTRL, r);
3905 4069
3906 dsi_config_vp_num_line_buffers(dssdev); 4070 dsi_config_vp_num_line_buffers(dsidev);
3907 4071
3908 if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) { 4072 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
3909 dsi_config_vp_sync_events(dssdev); 4073 dsi_config_vp_sync_events(dsidev);
3910 dsi_config_blanking_modes(dssdev); 4074 dsi_config_blanking_modes(dsidev);
3911 dsi_config_cmd_mode_interleaving(dssdev); 4075 dsi_config_cmd_mode_interleaving(dssdev);
3912 } 4076 }
3913 4077
@@ -3919,9 +4083,8 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
3919 return 0; 4083 return 0;
3920} 4084}
3921 4085
3922static void dsi_proto_timings(struct omap_dss_device *dssdev) 4086static void dsi_proto_timings(struct platform_device *dsidev)
3923{ 4087{
3924 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3925 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 4088 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3926 unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail; 4089 unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
3927 unsigned tclk_pre, tclk_post; 4090 unsigned tclk_pre, tclk_post;
@@ -3941,7 +4104,7 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev)
3941 ths_exit = FLD_GET(r, 7, 0); 4104 ths_exit = FLD_GET(r, 7, 0);
3942 4105
3943 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1); 4106 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
3944 tlpx = FLD_GET(r, 22, 16) * 2; 4107 tlpx = FLD_GET(r, 20, 16) * 2;
3945 tclk_trail = FLD_GET(r, 15, 8); 4108 tclk_trail = FLD_GET(r, 15, 8);
3946 tclk_zero = FLD_GET(r, 7, 0); 4109 tclk_zero = FLD_GET(r, 7, 0);
3947 4110
@@ -3984,18 +4147,18 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev)
3984 DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n", 4147 DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
3985 enter_hs_mode_lat, exit_hs_mode_lat); 4148 enter_hs_mode_lat, exit_hs_mode_lat);
3986 4149
3987 if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) { 4150 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
3988 /* TODO: Implement a video mode check_timings function */ 4151 /* TODO: Implement a video mode check_timings function */
3989 int hsa = dssdev->panel.dsi_vm_data.hsa; 4152 int hsa = dsi->vm_timings.hsa;
3990 int hfp = dssdev->panel.dsi_vm_data.hfp; 4153 int hfp = dsi->vm_timings.hfp;
3991 int hbp = dssdev->panel.dsi_vm_data.hbp; 4154 int hbp = dsi->vm_timings.hbp;
3992 int vsa = dssdev->panel.dsi_vm_data.vsa; 4155 int vsa = dsi->vm_timings.vsa;
3993 int vfp = dssdev->panel.dsi_vm_data.vfp; 4156 int vfp = dsi->vm_timings.vfp;
3994 int vbp = dssdev->panel.dsi_vm_data.vbp; 4157 int vbp = dsi->vm_timings.vbp;
3995 int window_sync = dssdev->panel.dsi_vm_data.window_sync; 4158 int window_sync = dsi->vm_timings.window_sync;
3996 bool hsync_end = dssdev->panel.dsi_vm_data.vp_hsync_end; 4159 bool hsync_end = dsi->vm_timings.vp_hsync_end;
3997 struct omap_video_timings *timings = &dssdev->panel.timings; 4160 struct omap_video_timings *timings = &dsi->timings;
3998 int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt); 4161 int bpp = dsi_get_pixel_size(dsi->pix_fmt);
3999 int tl, t_he, width_bytes; 4162 int tl, t_he, width_bytes;
4000 4163
4001 t_he = hsync_end ? 4164 t_he = hsync_end ?
@@ -4100,16 +4263,84 @@ int omapdss_dsi_configure_pins(struct omap_dss_device *dssdev,
4100} 4263}
4101EXPORT_SYMBOL(omapdss_dsi_configure_pins); 4264EXPORT_SYMBOL(omapdss_dsi_configure_pins);
4102 4265
4103int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel) 4266int omapdss_dsi_set_clocks(struct omap_dss_device *dssdev,
4267 unsigned long ddr_clk, unsigned long lp_clk)
4104{ 4268{
4105 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 4269 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4270 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4271 struct dsi_clock_info cinfo;
4272 struct dispc_clock_info dispc_cinfo;
4273 unsigned lp_clk_div;
4274 unsigned long dsi_fclk;
4106 int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt); 4275 int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt);
4276 unsigned long pck;
4277 int r;
4278
4279 DSSDBGF("ddr_clk %lu, lp_clk %lu", ddr_clk, lp_clk);
4280
4281 mutex_lock(&dsi->lock);
4282
4283 /* Calculate PLL output clock */
4284 r = dsi_pll_calc_ddrfreq(dsidev, ddr_clk * 4, &cinfo);
4285 if (r)
4286 goto err;
4287
4288 /* Calculate PLL's DSI clock */
4289 dsi_pll_calc_dsi_fck(dsidev, &cinfo);
4290
4291 /* Calculate PLL's DISPC clock and pck & lck divs */
4292 pck = cinfo.clkin4ddr / 16 * (dsi->num_lanes_used - 1) * 8 / bpp;
4293 DSSDBG("finding dispc dividers for pck %lu\n", pck);
4294 r = dsi_pll_calc_dispc_fck(dsidev, pck, &cinfo, &dispc_cinfo);
4295 if (r)
4296 goto err;
4297
4298 /* Calculate LP clock */
4299 dsi_fclk = cinfo.dsi_pll_hsdiv_dsi_clk;
4300 lp_clk_div = DIV_ROUND_UP(dsi_fclk, lp_clk * 2);
4301
4302 dssdev->clocks.dsi.regn = cinfo.regn;
4303 dssdev->clocks.dsi.regm = cinfo.regm;
4304 dssdev->clocks.dsi.regm_dispc = cinfo.regm_dispc;
4305 dssdev->clocks.dsi.regm_dsi = cinfo.regm_dsi;
4306
4307 dssdev->clocks.dsi.lp_clk_div = lp_clk_div;
4308
4309 dssdev->clocks.dispc.channel.lck_div = dispc_cinfo.lck_div;
4310 dssdev->clocks.dispc.channel.pck_div = dispc_cinfo.pck_div;
4311
4312 dssdev->clocks.dispc.dispc_fclk_src = OMAP_DSS_CLK_SRC_FCK;
4313
4314 dssdev->clocks.dispc.channel.lcd_clk_src =
4315 dsi->module_id == 0 ?
4316 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
4317 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC;
4318
4319 dssdev->clocks.dsi.dsi_fclk_src =
4320 dsi->module_id == 0 ?
4321 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
4322 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI;
4323
4324 mutex_unlock(&dsi->lock);
4325 return 0;
4326err:
4327 mutex_unlock(&dsi->lock);
4328 return r;
4329}
4330EXPORT_SYMBOL(omapdss_dsi_set_clocks);
4331
4332int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
4333{
4334 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4335 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4336 struct omap_overlay_manager *mgr = dssdev->output->manager;
4337 int bpp = dsi_get_pixel_size(dsi->pix_fmt);
4107 u8 data_type; 4338 u8 data_type;
4108 u16 word_count; 4339 u16 word_count;
4109 int r; 4340 int r;
4110 4341
4111 if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) { 4342 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
4112 switch (dssdev->panel.dsi_pix_fmt) { 4343 switch (dsi->pix_fmt) {
4113 case OMAP_DSS_DSI_FMT_RGB888: 4344 case OMAP_DSS_DSI_FMT_RGB888:
4114 data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24; 4345 data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
4115 break; 4346 break;
@@ -4133,7 +4364,7 @@ int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
4133 /* MODE, 1 = video mode */ 4364 /* MODE, 1 = video mode */
4134 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4); 4365 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4);
4135 4366
4136 word_count = DIV_ROUND_UP(dssdev->panel.timings.x_res * bpp, 8); 4367 word_count = DIV_ROUND_UP(dsi->timings.x_res * bpp, 8);
4137 4368
4138 dsi_vc_write_long_header(dsidev, channel, data_type, 4369 dsi_vc_write_long_header(dsidev, channel, data_type,
4139 word_count, 0); 4370 word_count, 0);
@@ -4142,9 +4373,9 @@ int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
4142 dsi_if_enable(dsidev, true); 4373 dsi_if_enable(dsidev, true);
4143 } 4374 }
4144 4375
4145 r = dss_mgr_enable(dssdev->manager); 4376 r = dss_mgr_enable(mgr);
4146 if (r) { 4377 if (r) {
4147 if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) { 4378 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
4148 dsi_if_enable(dsidev, false); 4379 dsi_if_enable(dsidev, false);
4149 dsi_vc_enable(dsidev, channel, false); 4380 dsi_vc_enable(dsidev, channel, false);
4150 } 4381 }
@@ -4159,8 +4390,10 @@ EXPORT_SYMBOL(dsi_enable_video_output);
4159void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel) 4390void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
4160{ 4391{
4161 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 4392 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4393 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4394 struct omap_overlay_manager *mgr = dssdev->output->manager;
4162 4395
4163 if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) { 4396 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
4164 dsi_if_enable(dsidev, false); 4397 dsi_if_enable(dsidev, false);
4165 dsi_vc_enable(dsidev, channel, false); 4398 dsi_vc_enable(dsidev, channel, false);
4166 4399
@@ -4171,15 +4404,15 @@ void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
4171 dsi_if_enable(dsidev, true); 4404 dsi_if_enable(dsidev, true);
4172 } 4405 }
4173 4406
4174 dss_mgr_disable(dssdev->manager); 4407 dss_mgr_disable(mgr);
4175} 4408}
4176EXPORT_SYMBOL(dsi_disable_video_output); 4409EXPORT_SYMBOL(dsi_disable_video_output);
4177 4410
4178static void dsi_update_screen_dispc(struct omap_dss_device *dssdev, 4411static void dsi_update_screen_dispc(struct omap_dss_device *dssdev)
4179 u16 w, u16 h)
4180{ 4412{
4181 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 4413 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4182 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 4414 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4415 struct omap_overlay_manager *mgr = dssdev->output->manager;
4183 unsigned bytespp; 4416 unsigned bytespp;
4184 unsigned bytespl; 4417 unsigned bytespl;
4185 unsigned bytespf; 4418 unsigned bytespf;
@@ -4190,12 +4423,14 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
4190 int r; 4423 int r;
4191 const unsigned channel = dsi->update_channel; 4424 const unsigned channel = dsi->update_channel;
4192 const unsigned line_buf_size = dsi_get_line_buf_size(dsidev); 4425 const unsigned line_buf_size = dsi_get_line_buf_size(dsidev);
4426 u16 w = dsi->timings.x_res;
4427 u16 h = dsi->timings.y_res;
4193 4428
4194 DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h); 4429 DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h);
4195 4430
4196 dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_VP); 4431 dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_VP);
4197 4432
4198 bytespp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt) / 8; 4433 bytespp = dsi_get_pixel_size(dsi->pix_fmt) / 8;
4199 bytespl = w * bytespp; 4434 bytespl = w * bytespp;
4200 bytespf = bytespl * h; 4435 bytespf = bytespl * h;
4201 4436
@@ -4239,7 +4474,9 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
4239 msecs_to_jiffies(250)); 4474 msecs_to_jiffies(250));
4240 BUG_ON(r == 0); 4475 BUG_ON(r == 0);
4241 4476
4242 dss_mgr_start_update(dssdev->manager); 4477 dss_mgr_set_timings(mgr, &dsi->timings);
4478
4479 dss_mgr_start_update(mgr);
4243 4480
4244 if (dsi->te_enabled) { 4481 if (dsi->te_enabled) {
4245 /* disable LP_RX_TO, so that we can receive TE. Time to wait 4482 /* disable LP_RX_TO, so that we can receive TE. Time to wait
@@ -4297,8 +4534,7 @@ static void dsi_framedone_timeout_work_callback(struct work_struct *work)
4297 4534
4298static void dsi_framedone_irq_callback(void *data, u32 mask) 4535static void dsi_framedone_irq_callback(void *data, u32 mask)
4299{ 4536{
4300 struct omap_dss_device *dssdev = (struct omap_dss_device *) data; 4537 struct platform_device *dsidev = (struct platform_device *) data;
4301 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4302 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 4538 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4303 4539
4304 /* Note: We get FRAMEDONE when DISPC has finished sending pixels and 4540 /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
@@ -4325,13 +4561,14 @@ int omap_dsi_update(struct omap_dss_device *dssdev, int channel,
4325 dsi->framedone_callback = callback; 4561 dsi->framedone_callback = callback;
4326 dsi->framedone_data = data; 4562 dsi->framedone_data = data;
4327 4563
4328 dssdev->driver->get_resolution(dssdev, &dw, &dh); 4564 dw = dsi->timings.x_res;
4565 dh = dsi->timings.y_res;
4329 4566
4330#ifdef DEBUG 4567#ifdef DEBUG
4331 dsi->update_bytes = dw * dh * 4568 dsi->update_bytes = dw * dh *
4332 dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt) / 8; 4569 dsi_get_pixel_size(dsi->pix_fmt) / 8;
4333#endif 4570#endif
4334 dsi_update_screen_dispc(dssdev, dw, dh); 4571 dsi_update_screen_dispc(dssdev);
4335 4572
4336 return 0; 4573 return 0;
4337} 4574}
@@ -4367,28 +4604,22 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
4367{ 4604{
4368 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 4605 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4369 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 4606 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4370 struct omap_video_timings timings; 4607 struct omap_overlay_manager *mgr = dssdev->output->manager;
4371 int r; 4608 int r;
4372 u32 irq = 0; 4609 u32 irq = 0;
4373 4610
4374 if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE) { 4611 if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
4375 u16 dw, dh; 4612 dsi->timings.hsw = 1;
4376 4613 dsi->timings.hfp = 1;
4377 dssdev->driver->get_resolution(dssdev, &dw, &dh); 4614 dsi->timings.hbp = 1;
4378 4615 dsi->timings.vsw = 1;
4379 timings.x_res = dw; 4616 dsi->timings.vfp = 0;
4380 timings.y_res = dh; 4617 dsi->timings.vbp = 0;
4381 timings.hsw = 1;
4382 timings.hfp = 1;
4383 timings.hbp = 1;
4384 timings.vsw = 1;
4385 timings.vfp = 0;
4386 timings.vbp = 0;
4387 4618
4388 irq = dispc_mgr_get_framedone_irq(dssdev->manager->id); 4619 irq = dispc_mgr_get_framedone_irq(mgr->id);
4389 4620
4390 r = omap_dispc_register_isr(dsi_framedone_irq_callback, 4621 r = omap_dispc_register_isr(dsi_framedone_irq_callback,
4391 (void *) dssdev, irq); 4622 (void *) dsidev, irq);
4392 if (r) { 4623 if (r) {
4393 DSSERR("can't get FRAMEDONE irq\n"); 4624 DSSERR("can't get FRAMEDONE irq\n");
4394 goto err; 4625 goto err;
@@ -4397,8 +4628,6 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
4397 dsi->mgr_config.stallmode = true; 4628 dsi->mgr_config.stallmode = true;
4398 dsi->mgr_config.fifohandcheck = true; 4629 dsi->mgr_config.fifohandcheck = true;
4399 } else { 4630 } else {
4400 timings = dssdev->panel.timings;
4401
4402 dsi->mgr_config.stallmode = false; 4631 dsi->mgr_config.stallmode = false;
4403 dsi->mgr_config.fifohandcheck = false; 4632 dsi->mgr_config.fifohandcheck = false;
4404 } 4633 }
@@ -4407,14 +4636,14 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
4407 * override interlace, logic level and edge related parameters in 4636 * override interlace, logic level and edge related parameters in
4408 * omap_video_timings with default values 4637 * omap_video_timings with default values
4409 */ 4638 */
4410 timings.interlace = false; 4639 dsi->timings.interlace = false;
4411 timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH; 4640 dsi->timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
4412 timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH; 4641 dsi->timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
4413 timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; 4642 dsi->timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
4414 timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH; 4643 dsi->timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
4415 timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES; 4644 dsi->timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
4416 4645
4417 dss_mgr_set_timings(dssdev->manager, &timings); 4646 dss_mgr_set_timings(mgr, &dsi->timings);
4418 4647
4419 r = dsi_configure_dispc_clocks(dssdev); 4648 r = dsi_configure_dispc_clocks(dssdev);
4420 if (r) 4649 if (r)
@@ -4422,29 +4651,33 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
4422 4651
4423 dsi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS; 4652 dsi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
4424 dsi->mgr_config.video_port_width = 4653 dsi->mgr_config.video_port_width =
4425 dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt); 4654 dsi_get_pixel_size(dsi->pix_fmt);
4426 dsi->mgr_config.lcden_sig_polarity = 0; 4655 dsi->mgr_config.lcden_sig_polarity = 0;
4427 4656
4428 dss_mgr_set_lcd_config(dssdev->manager, &dsi->mgr_config); 4657 dss_mgr_set_lcd_config(mgr, &dsi->mgr_config);
4429 4658
4430 return 0; 4659 return 0;
4431err1: 4660err1:
4432 if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE) 4661 if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
4433 omap_dispc_unregister_isr(dsi_framedone_irq_callback, 4662 omap_dispc_unregister_isr(dsi_framedone_irq_callback,
4434 (void *) dssdev, irq); 4663 (void *) dsidev, irq);
4435err: 4664err:
4436 return r; 4665 return r;
4437} 4666}
4438 4667
4439static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev) 4668static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
4440{ 4669{
4441 if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE) { 4670 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4671 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4672 struct omap_overlay_manager *mgr = dssdev->output->manager;
4673
4674 if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
4442 u32 irq; 4675 u32 irq;
4443 4676
4444 irq = dispc_mgr_get_framedone_irq(dssdev->manager->id); 4677 irq = dispc_mgr_get_framedone_irq(mgr->id);
4445 4678
4446 omap_dispc_unregister_isr(dsi_framedone_irq_callback, 4679 omap_dispc_unregister_isr(dsi_framedone_irq_callback,
4447 (void *) dssdev, irq); 4680 (void *) dsidev, irq);
4448 } 4681 }
4449} 4682}
4450 4683
@@ -4477,6 +4710,7 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
4477{ 4710{
4478 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 4711 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4479 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 4712 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4713 struct omap_overlay_manager *mgr = dssdev->output->manager;
4480 int r; 4714 int r;
4481 4715
4482 r = dsi_pll_init(dsidev, true, true); 4716 r = dsi_pll_init(dsidev, true, true);
@@ -4489,18 +4723,18 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
4489 4723
4490 dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src); 4724 dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
4491 dss_select_dsi_clk_source(dsi->module_id, dssdev->clocks.dsi.dsi_fclk_src); 4725 dss_select_dsi_clk_source(dsi->module_id, dssdev->clocks.dsi.dsi_fclk_src);
4492 dss_select_lcd_clk_source(dssdev->manager->id, 4726 dss_select_lcd_clk_source(mgr->id,
4493 dssdev->clocks.dispc.channel.lcd_clk_src); 4727 dssdev->clocks.dispc.channel.lcd_clk_src);
4494 4728
4495 DSSDBG("PLL OK\n"); 4729 DSSDBG("PLL OK\n");
4496 4730
4497 r = dsi_cio_init(dssdev); 4731 r = dsi_cio_init(dsidev);
4498 if (r) 4732 if (r)
4499 goto err2; 4733 goto err2;
4500 4734
4501 _dsi_print_reset_status(dsidev); 4735 _dsi_print_reset_status(dsidev);
4502 4736
4503 dsi_proto_timings(dssdev); 4737 dsi_proto_timings(dsidev);
4504 dsi_set_lp_clk_divisor(dssdev); 4738 dsi_set_lp_clk_divisor(dssdev);
4505 4739
4506 if (1) 4740 if (1)
@@ -4520,11 +4754,11 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
4520 4754
4521 return 0; 4755 return 0;
4522err3: 4756err3:
4523 dsi_cio_uninit(dssdev); 4757 dsi_cio_uninit(dsidev);
4524err2: 4758err2:
4525 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); 4759 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
4526 dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); 4760 dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
4527 dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK); 4761 dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
4528 4762
4529err1: 4763err1:
4530 dsi_pll_uninit(dsidev, true); 4764 dsi_pll_uninit(dsidev, true);
@@ -4537,6 +4771,7 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
4537{ 4771{
4538 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 4772 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4539 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 4773 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4774 struct omap_overlay_manager *mgr = dssdev->output->manager;
4540 4775
4541 if (enter_ulps && !dsi->ulps_enabled) 4776 if (enter_ulps && !dsi->ulps_enabled)
4542 dsi_enter_ulps(dsidev); 4777 dsi_enter_ulps(dsidev);
@@ -4550,8 +4785,8 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
4550 4785
4551 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); 4786 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
4552 dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); 4787 dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
4553 dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK); 4788 dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
4554 dsi_cio_uninit(dssdev); 4789 dsi_cio_uninit(dsidev);
4555 dsi_pll_uninit(dsidev, disconnect_lanes); 4790 dsi_pll_uninit(dsidev, disconnect_lanes);
4556} 4791}
4557 4792
@@ -4559,6 +4794,7 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
4559{ 4794{
4560 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 4795 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4561 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 4796 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4797 struct omap_dss_output *out = dssdev->output;
4562 int r = 0; 4798 int r = 0;
4563 4799
4564 DSSDBG("dsi_display_enable\n"); 4800 DSSDBG("dsi_display_enable\n");
@@ -4567,8 +4803,8 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
4567 4803
4568 mutex_lock(&dsi->lock); 4804 mutex_lock(&dsi->lock);
4569 4805
4570 if (dssdev->manager == NULL) { 4806 if (out == NULL || out->manager == NULL) {
4571 DSSERR("failed to enable display: no manager\n"); 4807 DSSERR("failed to enable display: no output/manager\n");
4572 r = -ENODEV; 4808 r = -ENODEV;
4573 goto err_start_dev; 4809 goto err_start_dev;
4574 } 4810 }
@@ -4653,17 +4889,83 @@ int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
4653} 4889}
4654EXPORT_SYMBOL(omapdss_dsi_enable_te); 4890EXPORT_SYMBOL(omapdss_dsi_enable_te);
4655 4891
4656static int __init dsi_init_display(struct omap_dss_device *dssdev) 4892void omapdss_dsi_set_timings(struct omap_dss_device *dssdev,
4893 struct omap_video_timings *timings)
4657{ 4894{
4658 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 4895 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4659 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 4896 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4660 4897
4661 DSSDBG("DSI init\n"); 4898 mutex_lock(&dsi->lock);
4662 4899
4663 if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE) { 4900 dsi->timings = *timings;
4664 dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE | 4901
4665 OMAP_DSS_DISPLAY_CAP_TEAR_ELIM; 4902 mutex_unlock(&dsi->lock);
4666 } 4903}
4904EXPORT_SYMBOL(omapdss_dsi_set_timings);
4905
4906void omapdss_dsi_set_size(struct omap_dss_device *dssdev, u16 w, u16 h)
4907{
4908 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4909 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4910
4911 mutex_lock(&dsi->lock);
4912
4913 dsi->timings.x_res = w;
4914 dsi->timings.y_res = h;
4915
4916 mutex_unlock(&dsi->lock);
4917}
4918EXPORT_SYMBOL(omapdss_dsi_set_size);
4919
4920void omapdss_dsi_set_pixel_format(struct omap_dss_device *dssdev,
4921 enum omap_dss_dsi_pixel_format fmt)
4922{
4923 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4924 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4925
4926 mutex_lock(&dsi->lock);
4927
4928 dsi->pix_fmt = fmt;
4929
4930 mutex_unlock(&dsi->lock);
4931}
4932EXPORT_SYMBOL(omapdss_dsi_set_pixel_format);
4933
4934void omapdss_dsi_set_operation_mode(struct omap_dss_device *dssdev,
4935 enum omap_dss_dsi_mode mode)
4936{
4937 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4938 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4939
4940 mutex_lock(&dsi->lock);
4941
4942 dsi->mode = mode;
4943
4944 mutex_unlock(&dsi->lock);
4945}
4946EXPORT_SYMBOL(omapdss_dsi_set_operation_mode);
4947
4948void omapdss_dsi_set_videomode_timings(struct omap_dss_device *dssdev,
4949 struct omap_dss_dsi_videomode_timings *timings)
4950{
4951 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4952 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4953
4954 mutex_lock(&dsi->lock);
4955
4956 dsi->vm_timings = *timings;
4957
4958 mutex_unlock(&dsi->lock);
4959}
4960EXPORT_SYMBOL(omapdss_dsi_set_videomode_timings);
4961
4962static int __init dsi_init_display(struct omap_dss_device *dssdev)
4963{
4964 struct platform_device *dsidev =
4965 dsi_get_dsidev_from_id(dssdev->phy.dsi.module);
4966 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4967
4968 DSSDBG("DSI init\n");
4667 4969
4668 if (dsi->vdds_dsi_reg == NULL) { 4970 if (dsi->vdds_dsi_reg == NULL) {
4669 struct regulator *vdds_dsi; 4971 struct regulator *vdds_dsi;
@@ -4806,11 +5108,15 @@ static void dsi_put_clocks(struct platform_device *dsidev)
4806 clk_put(dsi->sys_clk); 5108 clk_put(dsi->sys_clk);
4807} 5109}
4808 5110
4809static void __init dsi_probe_pdata(struct platform_device *dsidev) 5111static struct omap_dss_device * __init dsi_find_dssdev(struct platform_device *pdev)
4810{ 5112{
4811 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 5113 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
4812 struct omap_dss_board_info *pdata = dsidev->dev.platform_data; 5114 struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
4813 int i, r; 5115 const char *def_disp_name = dss_get_default_display_name();
5116 struct omap_dss_device *def_dssdev;
5117 int i;
5118
5119 def_dssdev = NULL;
4814 5120
4815 for (i = 0; i < pdata->num_devices; ++i) { 5121 for (i = 0; i < pdata->num_devices; ++i) {
4816 struct omap_dss_device *dssdev = pdata->devices[i]; 5122 struct omap_dss_device *dssdev = pdata->devices[i];
@@ -4821,19 +5127,73 @@ static void __init dsi_probe_pdata(struct platform_device *dsidev)
4821 if (dssdev->phy.dsi.module != dsi->module_id) 5127 if (dssdev->phy.dsi.module != dsi->module_id)
4822 continue; 5128 continue;
4823 5129
4824 r = dsi_init_display(dssdev); 5130 if (def_dssdev == NULL)
4825 if (r) { 5131 def_dssdev = dssdev;
4826 DSSERR("device %s init failed: %d\n", dssdev->name, r); 5132
4827 continue; 5133 if (def_disp_name != NULL &&
5134 strcmp(dssdev->name, def_disp_name) == 0) {
5135 def_dssdev = dssdev;
5136 break;
4828 } 5137 }
5138 }
4829 5139
4830 r = omap_dss_register_device(dssdev, &dsidev->dev, i); 5140 return def_dssdev;
4831 if (r) 5141}
4832 DSSERR("device %s register failed: %d\n", 5142
4833 dssdev->name, r); 5143static void __init dsi_probe_pdata(struct platform_device *dsidev)
5144{
5145 struct omap_dss_device *plat_dssdev;
5146 struct omap_dss_device *dssdev;
5147 int r;
5148
5149 plat_dssdev = dsi_find_dssdev(dsidev);
5150
5151 if (!plat_dssdev)
5152 return;
5153
5154 dssdev = dss_alloc_and_init_device(&dsidev->dev);
5155 if (!dssdev)
5156 return;
5157
5158 dss_copy_device_pdata(dssdev, plat_dssdev);
5159
5160 r = dsi_init_display(dssdev);
5161 if (r) {
5162 DSSERR("device %s init failed: %d\n", dssdev->name, r);
5163 dss_put_device(dssdev);
5164 return;
5165 }
5166
5167 r = dss_add_device(dssdev);
5168 if (r) {
5169 DSSERR("device %s register failed: %d\n", dssdev->name, r);
5170 dss_put_device(dssdev);
5171 return;
4834 } 5172 }
4835} 5173}
4836 5174
5175static void __init dsi_init_output(struct platform_device *dsidev)
5176{
5177 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5178 struct omap_dss_output *out = &dsi->output;
5179
5180 out->pdev = dsidev;
5181 out->id = dsi->module_id == 0 ?
5182 OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
5183
5184 out->type = OMAP_DISPLAY_TYPE_DSI;
5185
5186 dss_register_output(out);
5187}
5188
5189static void __exit dsi_uninit_output(struct platform_device *dsidev)
5190{
5191 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5192 struct omap_dss_output *out = &dsi->output;
5193
5194 dss_unregister_output(out);
5195}
5196
4837/* DSI1 HW IP initialisation */ 5197/* DSI1 HW IP initialisation */
4838static int __init omap_dsihw_probe(struct platform_device *dsidev) 5198static int __init omap_dsihw_probe(struct platform_device *dsidev)
4839{ 5199{
@@ -4848,7 +5208,6 @@ static int __init omap_dsihw_probe(struct platform_device *dsidev)
4848 5208
4849 dsi->module_id = dsidev->id; 5209 dsi->module_id = dsidev->id;
4850 dsi->pdev = dsidev; 5210 dsi->pdev = dsidev;
4851 dsi_pdev_map[dsi->module_id] = dsidev;
4852 dev_set_drvdata(&dsidev->dev, dsi); 5211 dev_set_drvdata(&dsidev->dev, dsi);
4853 5212
4854 spin_lock_init(&dsi->irq_lock); 5213 spin_lock_init(&dsi->irq_lock);
@@ -4928,6 +5287,8 @@ static int __init omap_dsihw_probe(struct platform_device *dsidev)
4928 else 5287 else
4929 dsi->num_lanes_supported = 3; 5288 dsi->num_lanes_supported = 3;
4930 5289
5290 dsi_init_output(dsidev);
5291
4931 dsi_probe_pdata(dsidev); 5292 dsi_probe_pdata(dsidev);
4932 5293
4933 dsi_runtime_put(dsidev); 5294 dsi_runtime_put(dsidev);
@@ -4957,7 +5318,9 @@ static int __exit omap_dsihw_remove(struct platform_device *dsidev)
4957 5318
4958 WARN_ON(dsi->scp_clk_refcount > 0); 5319 WARN_ON(dsi->scp_clk_refcount > 0);
4959 5320
4960 omap_dss_unregister_child_devices(&dsidev->dev); 5321 dss_unregister_child_devices(&dsidev->dev);
5322
5323 dsi_uninit_output(dsidev);
4961 5324
4962 pm_runtime_disable(&dsidev->dev); 5325 pm_runtime_disable(&dsidev->dev);
4963 5326
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 04b4586113e3..2ab1c3e96553 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -31,11 +31,11 @@
31#include <linux/clk.h> 31#include <linux/clk.h>
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34#include <linux/gfp.h>
34 35
35#include <video/omapdss.h> 36#include <video/omapdss.h>
36 37
37#include <plat/cpu.h> 38#include <plat/cpu.h>
38#include <plat/clock.h>
39 39
40#include "dss.h" 40#include "dss.h"
41#include "dss_features.h" 41#include "dss_features.h"
@@ -65,6 +65,13 @@ struct dss_reg {
65static int dss_runtime_get(void); 65static int dss_runtime_get(void);
66static void dss_runtime_put(void); 66static void dss_runtime_put(void);
67 67
68struct dss_features {
69 u8 fck_div_max;
70 u8 dss_fck_multiplier;
71 const char *clk_name;
72 int (*dpi_select_source)(enum omap_channel channel);
73};
74
68static struct { 75static struct {
69 struct platform_device *pdev; 76 struct platform_device *pdev;
70 void __iomem *base; 77 void __iomem *base;
@@ -83,6 +90,8 @@ static struct {
83 90
84 bool ctx_valid; 91 bool ctx_valid;
85 u32 ctx[DSS_SZ_REGS / sizeof(u32)]; 92 u32 ctx[DSS_SZ_REGS / sizeof(u32)];
93
94 const struct dss_features *feat;
86} dss; 95} dss;
87 96
88static const char * const dss_generic_clk_source_names[] = { 97static const char * const dss_generic_clk_source_names[] = {
@@ -144,7 +153,7 @@ static void dss_restore_context(void)
144#undef SR 153#undef SR
145#undef RR 154#undef RR
146 155
147void dss_sdi_init(u8 datapairs) 156void dss_sdi_init(int datapairs)
148{ 157{
149 u32 l; 158 u32 l;
150 159
@@ -236,7 +245,6 @@ const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src)
236 return dss_generic_clk_source_names[clk_src]; 245 return dss_generic_clk_source_names[clk_src];
237} 246}
238 247
239
240void dss_dump_clocks(struct seq_file *s) 248void dss_dump_clocks(struct seq_file *s)
241{ 249{
242 unsigned long dpll4_ck_rate; 250 unsigned long dpll4_ck_rate;
@@ -259,18 +267,10 @@ void dss_dump_clocks(struct seq_file *s)
259 267
260 seq_printf(s, "dpll4_ck %lu\n", dpll4_ck_rate); 268 seq_printf(s, "dpll4_ck %lu\n", dpll4_ck_rate);
261 269
262 if (cpu_is_omap3630() || cpu_is_omap44xx()) 270 seq_printf(s, "%s (%s) = %lu / %lu * %d = %lu\n",
263 seq_printf(s, "%s (%s) = %lu / %lu = %lu\n", 271 fclk_name, fclk_real_name, dpll4_ck_rate,
264 fclk_name, fclk_real_name, 272 dpll4_ck_rate / dpll4_m4_ck_rate,
265 dpll4_ck_rate, 273 dss.feat->dss_fck_multiplier, fclk_rate);
266 dpll4_ck_rate / dpll4_m4_ck_rate,
267 fclk_rate);
268 else
269 seq_printf(s, "%s (%s) = %lu / %lu * 2 = %lu\n",
270 fclk_name, fclk_real_name,
271 dpll4_ck_rate,
272 dpll4_ck_rate / dpll4_m4_ck_rate,
273 fclk_rate);
274 } else { 274 } else {
275 seq_printf(s, "%s (%s) = %lu\n", 275 seq_printf(s, "%s (%s) = %lu\n",
276 fclk_name, fclk_real_name, 276 fclk_name, fclk_real_name,
@@ -431,31 +431,6 @@ enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
431 } 431 }
432} 432}
433 433
434/* calculate clock rates using dividers in cinfo */
435int dss_calc_clock_rates(struct dss_clock_info *cinfo)
436{
437 if (dss.dpll4_m4_ck) {
438 unsigned long prate;
439 u16 fck_div_max = 16;
440
441 if (cpu_is_omap3630() || cpu_is_omap44xx())
442 fck_div_max = 32;
443
444 if (cinfo->fck_div > fck_div_max || cinfo->fck_div == 0)
445 return -EINVAL;
446
447 prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
448
449 cinfo->fck = prate / cinfo->fck_div;
450 } else {
451 if (cinfo->fck_div != 0)
452 return -EINVAL;
453 cinfo->fck = clk_get_rate(dss.dss_clk);
454 }
455
456 return 0;
457}
458
459int dss_set_clock_div(struct dss_clock_info *cinfo) 434int dss_set_clock_div(struct dss_clock_info *cinfo)
460{ 435{
461 if (dss.dpll4_m4_ck) { 436 if (dss.dpll4_m4_ck) {
@@ -478,26 +453,6 @@ int dss_set_clock_div(struct dss_clock_info *cinfo)
478 return 0; 453 return 0;
479} 454}
480 455
481int dss_get_clock_div(struct dss_clock_info *cinfo)
482{
483 cinfo->fck = clk_get_rate(dss.dss_clk);
484
485 if (dss.dpll4_m4_ck) {
486 unsigned long prate;
487
488 prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
489
490 if (cpu_is_omap3630() || cpu_is_omap44xx())
491 cinfo->fck_div = prate / (cinfo->fck);
492 else
493 cinfo->fck_div = prate / (cinfo->fck / 2);
494 } else {
495 cinfo->fck_div = 0;
496 }
497
498 return 0;
499}
500
501unsigned long dss_get_dpll4_rate(void) 456unsigned long dss_get_dpll4_rate(void)
502{ 457{
503 if (dss.dpll4_m4_ck) 458 if (dss.dpll4_m4_ck)
@@ -515,7 +470,7 @@ int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo,
515 470
516 unsigned long fck, max_dss_fck; 471 unsigned long fck, max_dss_fck;
517 472
518 u16 fck_div, fck_div_max = 16; 473 u16 fck_div;
519 474
520 int match = 0; 475 int match = 0;
521 int min_fck_per_pck; 476 int min_fck_per_pck;
@@ -525,9 +480,8 @@ int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo,
525 max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK); 480 max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
526 481
527 fck = clk_get_rate(dss.dss_clk); 482 fck = clk_get_rate(dss.dss_clk);
528 if (req_pck == dss.cache_req_pck && 483 if (req_pck == dss.cache_req_pck && prate == dss.cache_prate &&
529 ((cpu_is_omap34xx() && prate == dss.cache_prate) || 484 dss.cache_dss_cinfo.fck == fck) {
530 dss.cache_dss_cinfo.fck == fck)) {
531 DSSDBG("dispc clock info found from cache.\n"); 485 DSSDBG("dispc clock info found from cache.\n");
532 *dss_cinfo = dss.cache_dss_cinfo; 486 *dss_cinfo = dss.cache_dss_cinfo;
533 *dispc_cinfo = dss.cache_dispc_cinfo; 487 *dispc_cinfo = dss.cache_dispc_cinfo;
@@ -564,16 +518,10 @@ retry:
564 518
565 goto found; 519 goto found;
566 } else { 520 } else {
567 if (cpu_is_omap3630() || cpu_is_omap44xx()) 521 for (fck_div = dss.feat->fck_div_max; fck_div > 0; --fck_div) {
568 fck_div_max = 32;
569
570 for (fck_div = fck_div_max; fck_div > 0; --fck_div) {
571 struct dispc_clock_info cur_dispc; 522 struct dispc_clock_info cur_dispc;
572 523
573 if (fck_div_max == 32) 524 fck = prate / fck_div * dss.feat->dss_fck_multiplier;
574 fck = prate / fck_div;
575 else
576 fck = prate / fck_div * 2;
577 525
578 if (fck > max_dss_fck) 526 if (fck > max_dss_fck)
579 continue; 527 continue;
@@ -648,9 +596,18 @@ void dss_set_dac_pwrdn_bgz(bool enable)
648 REG_FLD_MOD(DSS_CONTROL, enable, 5, 5); /* DAC Power-Down Control */ 596 REG_FLD_MOD(DSS_CONTROL, enable, 5, 5); /* DAC Power-Down Control */
649} 597}
650 598
651void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select hdmi) 599void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select src)
652{ 600{
653 REG_FLD_MOD(DSS_CONTROL, hdmi, 15, 15); /* VENC_HDMI_SWITCH */ 601 enum omap_display_type dp;
602 dp = dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_DIGIT);
603
604 /* Complain about invalid selections */
605 WARN_ON((src == DSS_VENC_TV_CLK) && !(dp & OMAP_DISPLAY_TYPE_VENC));
606 WARN_ON((src == DSS_HDMI_M_PCLK) && !(dp & OMAP_DISPLAY_TYPE_HDMI));
607
608 /* Select only if we have options */
609 if ((dp & OMAP_DISPLAY_TYPE_VENC) && (dp & OMAP_DISPLAY_TYPE_HDMI))
610 REG_FLD_MOD(DSS_CONTROL, src, 15, 15); /* VENC_HDMI_SWITCH */
654} 611}
655 612
656enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void) 613enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void)
@@ -661,9 +618,71 @@ enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void)
661 if ((displays & OMAP_DISPLAY_TYPE_HDMI) == 0) 618 if ((displays & OMAP_DISPLAY_TYPE_HDMI) == 0)
662 return DSS_VENC_TV_CLK; 619 return DSS_VENC_TV_CLK;
663 620
621 if ((displays & OMAP_DISPLAY_TYPE_VENC) == 0)
622 return DSS_HDMI_M_PCLK;
623
664 return REG_GET(DSS_CONTROL, 15, 15); 624 return REG_GET(DSS_CONTROL, 15, 15);
665} 625}
666 626
627static int dss_dpi_select_source_omap2_omap3(enum omap_channel channel)
628{
629 if (channel != OMAP_DSS_CHANNEL_LCD)
630 return -EINVAL;
631
632 return 0;
633}
634
635static int dss_dpi_select_source_omap4(enum omap_channel channel)
636{
637 int val;
638
639 switch (channel) {
640 case OMAP_DSS_CHANNEL_LCD2:
641 val = 0;
642 break;
643 case OMAP_DSS_CHANNEL_DIGIT:
644 val = 1;
645 break;
646 default:
647 return -EINVAL;
648 }
649
650 REG_FLD_MOD(DSS_CONTROL, val, 17, 17);
651
652 return 0;
653}
654
655static int dss_dpi_select_source_omap5(enum omap_channel channel)
656{
657 int val;
658
659 switch (channel) {
660 case OMAP_DSS_CHANNEL_LCD:
661 val = 1;
662 break;
663 case OMAP_DSS_CHANNEL_LCD2:
664 val = 2;
665 break;
666 case OMAP_DSS_CHANNEL_LCD3:
667 val = 3;
668 break;
669 case OMAP_DSS_CHANNEL_DIGIT:
670 val = 0;
671 break;
672 default:
673 return -EINVAL;
674 }
675
676 REG_FLD_MOD(DSS_CONTROL, val, 17, 16);
677
678 return 0;
679}
680
681int dss_dpi_select_source(enum omap_channel channel)
682{
683 return dss.feat->dpi_select_source(channel);
684}
685
667static int dss_get_clocks(void) 686static int dss_get_clocks(void)
668{ 687{
669 struct clk *clk; 688 struct clk *clk;
@@ -678,22 +697,11 @@ static int dss_get_clocks(void)
678 697
679 dss.dss_clk = clk; 698 dss.dss_clk = clk;
680 699
681 if (cpu_is_omap34xx()) { 700 clk = clk_get(NULL, dss.feat->clk_name);
682 clk = clk_get(NULL, "dpll4_m4_ck"); 701 if (IS_ERR(clk)) {
683 if (IS_ERR(clk)) { 702 DSSERR("Failed to get %s\n", dss.feat->clk_name);
684 DSSERR("Failed to get dpll4_m4_ck\n"); 703 r = PTR_ERR(clk);
685 r = PTR_ERR(clk); 704 goto err;
686 goto err;
687 }
688 } else if (cpu_is_omap44xx()) {
689 clk = clk_get(NULL, "dpll_per_m5x2_ck");
690 if (IS_ERR(clk)) {
691 DSSERR("Failed to get dpll_per_m5x2_ck\n");
692 r = PTR_ERR(clk);
693 goto err;
694 }
695 } else { /* omap24xx */
696 clk = NULL;
697 } 705 }
698 706
699 dss.dpll4_m4_ck = clk; 707 dss.dpll4_m4_ck = clk;
@@ -749,6 +757,71 @@ void dss_debug_dump_clocks(struct seq_file *s)
749} 757}
750#endif 758#endif
751 759
760static const struct dss_features omap24xx_dss_feats __initconst = {
761 .fck_div_max = 16,
762 .dss_fck_multiplier = 2,
763 .clk_name = NULL,
764 .dpi_select_source = &dss_dpi_select_source_omap2_omap3,
765};
766
767static const struct dss_features omap34xx_dss_feats __initconst = {
768 .fck_div_max = 16,
769 .dss_fck_multiplier = 2,
770 .clk_name = "dpll4_m4_ck",
771 .dpi_select_source = &dss_dpi_select_source_omap2_omap3,
772};
773
774static const struct dss_features omap3630_dss_feats __initconst = {
775 .fck_div_max = 32,
776 .dss_fck_multiplier = 1,
777 .clk_name = "dpll4_m4_ck",
778 .dpi_select_source = &dss_dpi_select_source_omap2_omap3,
779};
780
781static const struct dss_features omap44xx_dss_feats __initconst = {
782 .fck_div_max = 32,
783 .dss_fck_multiplier = 1,
784 .clk_name = "dpll_per_m5x2_ck",
785 .dpi_select_source = &dss_dpi_select_source_omap4,
786};
787
788static const struct dss_features omap54xx_dss_feats __initconst = {
789 .fck_div_max = 64,
790 .dss_fck_multiplier = 1,
791 .clk_name = "dpll_per_h12x2_ck",
792 .dpi_select_source = &dss_dpi_select_source_omap5,
793};
794
795static int __init dss_init_features(struct device *dev)
796{
797 const struct dss_features *src;
798 struct dss_features *dst;
799
800 dst = devm_kzalloc(dev, sizeof(*dst), GFP_KERNEL);
801 if (!dst) {
802 dev_err(dev, "Failed to allocate local DSS Features\n");
803 return -ENOMEM;
804 }
805
806 if (cpu_is_omap24xx())
807 src = &omap24xx_dss_feats;
808 else if (cpu_is_omap34xx())
809 src = &omap34xx_dss_feats;
810 else if (cpu_is_omap3630())
811 src = &omap3630_dss_feats;
812 else if (cpu_is_omap44xx())
813 src = &omap44xx_dss_feats;
814 else if (soc_is_omap54xx())
815 src = &omap54xx_dss_feats;
816 else
817 return -ENODEV;
818
819 memcpy(dst, src, sizeof(*dst));
820 dss.feat = dst;
821
822 return 0;
823}
824
752/* DSS HW IP initialisation */ 825/* DSS HW IP initialisation */
753static int __init omap_dsshw_probe(struct platform_device *pdev) 826static int __init omap_dsshw_probe(struct platform_device *pdev)
754{ 827{
@@ -758,6 +831,10 @@ static int __init omap_dsshw_probe(struct platform_device *pdev)
758 831
759 dss.pdev = pdev; 832 dss.pdev = pdev;
760 833
834 r = dss_init_features(&dss.pdev->dev);
835 if (r)
836 return r;
837
761 dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0); 838 dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
762 if (!dss_mem) { 839 if (!dss_mem) {
763 DSSERR("can't get IORESOURCE_MEM DSS\n"); 840 DSSERR("can't get IORESOURCE_MEM DSS\n");
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index f67afe76f217..6728892f9dad 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -113,6 +113,17 @@ enum dss_dsi_content_type {
113 DSS_DSI_CONTENT_GENERIC, 113 DSS_DSI_CONTENT_GENERIC,
114}; 114};
115 115
116enum dss_writeback_channel {
117 DSS_WB_LCD1_MGR = 0,
118 DSS_WB_LCD2_MGR = 1,
119 DSS_WB_TV_MGR = 2,
120 DSS_WB_OVL0 = 3,
121 DSS_WB_OVL1 = 4,
122 DSS_WB_OVL2 = 5,
123 DSS_WB_OVL3 = 6,
124 DSS_WB_LCD3_MGR = 7,
125};
126
116struct dss_clock_info { 127struct dss_clock_info {
117 /* rates that we get with dividers below */ 128 /* rates that we get with dividers below */
118 unsigned long fck; 129 unsigned long fck;
@@ -175,6 +186,7 @@ struct seq_file;
175struct platform_device; 186struct platform_device;
176 187
177/* core */ 188/* core */
189const char *dss_get_default_display_name(void);
178struct bus_type *dss_get_bus(void); 190struct bus_type *dss_get_bus(void);
179struct regulator *dss_get_vdds_dsi(void); 191struct regulator *dss_get_vdds_dsi(void);
180struct regulator *dss_get_vdds_sdi(void); 192struct regulator *dss_get_vdds_sdi(void);
@@ -184,10 +196,13 @@ void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask);
184int dss_set_min_bus_tput(struct device *dev, unsigned long tput); 196int dss_set_min_bus_tput(struct device *dev, unsigned long tput);
185int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *)); 197int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *));
186 198
187int omap_dss_register_device(struct omap_dss_device *dssdev, 199struct omap_dss_device *dss_alloc_and_init_device(struct device *parent);
188 struct device *parent, int disp_num); 200int dss_add_device(struct omap_dss_device *dssdev);
189void omap_dss_unregister_device(struct omap_dss_device *dssdev); 201void dss_unregister_device(struct omap_dss_device *dssdev);
190void omap_dss_unregister_child_devices(struct device *parent); 202void dss_unregister_child_devices(struct device *parent);
203void dss_put_device(struct omap_dss_device *dssdev);
204void dss_copy_device_pdata(struct omap_dss_device *dst,
205 const struct omap_dss_device *src);
191 206
192/* apply */ 207/* apply */
193void dss_apply_init(void); 208void dss_apply_init(void);
@@ -205,8 +220,11 @@ void dss_mgr_get_info(struct omap_overlay_manager *mgr,
205int dss_mgr_set_device(struct omap_overlay_manager *mgr, 220int dss_mgr_set_device(struct omap_overlay_manager *mgr,
206 struct omap_dss_device *dssdev); 221 struct omap_dss_device *dssdev);
207int dss_mgr_unset_device(struct omap_overlay_manager *mgr); 222int dss_mgr_unset_device(struct omap_overlay_manager *mgr);
223int dss_mgr_set_output(struct omap_overlay_manager *mgr,
224 struct omap_dss_output *output);
225int dss_mgr_unset_output(struct omap_overlay_manager *mgr);
208void dss_mgr_set_timings(struct omap_overlay_manager *mgr, 226void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
209 struct omap_video_timings *timings); 227 const struct omap_video_timings *timings);
210void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr, 228void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr,
211 const struct dss_lcd_mgr_config *config); 229 const struct dss_lcd_mgr_config *config);
212const struct omap_video_timings *dss_mgr_get_timings(struct omap_overlay_manager *mgr); 230const struct omap_video_timings *dss_mgr_get_timings(struct omap_overlay_manager *mgr);
@@ -222,12 +240,17 @@ int dss_ovl_set_manager(struct omap_overlay *ovl,
222 struct omap_overlay_manager *mgr); 240 struct omap_overlay_manager *mgr);
223int dss_ovl_unset_manager(struct omap_overlay *ovl); 241int dss_ovl_unset_manager(struct omap_overlay *ovl);
224 242
243/* output */
244void dss_register_output(struct omap_dss_output *out);
245void dss_unregister_output(struct omap_dss_output *out);
246struct omap_dss_output *omapdss_get_output_from_dssdev(struct omap_dss_device *dssdev);
247
225/* display */ 248/* display */
226int dss_suspend_all_devices(void); 249int dss_suspend_all_devices(void);
227int dss_resume_all_devices(void); 250int dss_resume_all_devices(void);
228void dss_disable_all_devices(void); 251void dss_disable_all_devices(void);
229 252
230void dss_init_device(struct platform_device *pdev, 253int dss_init_device(struct platform_device *pdev,
231 struct omap_dss_device *dssdev); 254 struct omap_dss_device *dssdev);
232void dss_uninit_device(struct platform_device *pdev, 255void dss_uninit_device(struct platform_device *pdev,
233 struct omap_dss_device *dssdev); 256 struct omap_dss_device *dssdev);
@@ -254,22 +277,29 @@ static inline bool dss_mgr_is_lcd(enum omap_channel id)
254 return false; 277 return false;
255} 278}
256 279
280int dss_manager_kobj_init(struct omap_overlay_manager *mgr,
281 struct platform_device *pdev);
282void dss_manager_kobj_uninit(struct omap_overlay_manager *mgr);
283
257/* overlay */ 284/* overlay */
258void dss_init_overlays(struct platform_device *pdev); 285void dss_init_overlays(struct platform_device *pdev);
259void dss_uninit_overlays(struct platform_device *pdev); 286void dss_uninit_overlays(struct platform_device *pdev);
260void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr); 287void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr);
261void dss_recheck_connections(struct omap_dss_device *dssdev, bool force);
262int dss_ovl_simple_check(struct omap_overlay *ovl, 288int dss_ovl_simple_check(struct omap_overlay *ovl,
263 const struct omap_overlay_info *info); 289 const struct omap_overlay_info *info);
264int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info, 290int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info,
265 const struct omap_video_timings *mgr_timings); 291 const struct omap_video_timings *mgr_timings);
266bool dss_ovl_use_replication(struct dss_lcd_mgr_config config, 292bool dss_ovl_use_replication(struct dss_lcd_mgr_config config,
267 enum omap_color_mode mode); 293 enum omap_color_mode mode);
294int dss_overlay_kobj_init(struct omap_overlay *ovl,
295 struct platform_device *pdev);
296void dss_overlay_kobj_uninit(struct omap_overlay *ovl);
268 297
269/* DSS */ 298/* DSS */
270int dss_init_platform_driver(void) __init; 299int dss_init_platform_driver(void) __init;
271void dss_uninit_platform_driver(void); 300void dss_uninit_platform_driver(void);
272 301
302int dss_dpi_select_source(enum omap_channel channel);
273void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select); 303void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
274enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void); 304enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
275const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src); 305const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
@@ -279,7 +309,7 @@ void dss_dump_clocks(struct seq_file *s);
279void dss_debug_dump_clocks(struct seq_file *s); 309void dss_debug_dump_clocks(struct seq_file *s);
280#endif 310#endif
281 311
282void dss_sdi_init(u8 datapairs); 312void dss_sdi_init(int datapairs);
283int dss_sdi_enable(void); 313int dss_sdi_enable(void);
284void dss_sdi_disable(void); 314void dss_sdi_disable(void);
285 315
@@ -296,9 +326,7 @@ void dss_set_venc_output(enum omap_dss_venc_type type);
296void dss_set_dac_pwrdn_bgz(bool enable); 326void dss_set_dac_pwrdn_bgz(bool enable);
297 327
298unsigned long dss_get_dpll4_rate(void); 328unsigned long dss_get_dpll4_rate(void);
299int dss_calc_clock_rates(struct dss_clock_info *cinfo);
300int dss_set_clock_div(struct dss_clock_info *cinfo); 329int dss_set_clock_div(struct dss_clock_info *cinfo);
301int dss_get_clock_div(struct dss_clock_info *cinfo);
302int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo, 330int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo,
303 struct dispc_clock_info *dispc_cinfo); 331 struct dispc_clock_info *dispc_cinfo);
304 332
@@ -427,8 +455,9 @@ void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high);
427void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane, 455void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
428 u32 *fifo_low, u32 *fifo_high, bool use_fifomerge, 456 u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
429 bool manual_update); 457 bool manual_update);
430int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi, 458int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
431 bool replication, const struct omap_video_timings *mgr_timings); 459 bool replication, const struct omap_video_timings *mgr_timings,
460 bool mem_to_mem);
432int dispc_ovl_enable(enum omap_plane plane, bool enable); 461int dispc_ovl_enable(enum omap_plane plane, bool enable);
433void dispc_ovl_set_channel_out(enum omap_plane plane, 462void dispc_ovl_set_channel_out(enum omap_plane plane,
434 enum omap_channel channel); 463 enum omap_channel channel);
@@ -457,6 +486,15 @@ int dispc_mgr_get_clock_div(enum omap_channel channel,
457void dispc_mgr_setup(enum omap_channel channel, 486void dispc_mgr_setup(enum omap_channel channel,
458 struct omap_overlay_manager_info *info); 487 struct omap_overlay_manager_info *info);
459 488
489u32 dispc_wb_get_framedone_irq(void);
490bool dispc_wb_go_busy(void);
491void dispc_wb_go(void);
492void dispc_wb_enable(bool enable);
493bool dispc_wb_is_enabled(void);
494void dispc_wb_set_channel_in(enum dss_writeback_channel channel);
495int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
496 bool mem_to_mem, const struct omap_video_timings *timings);
497
460/* VENC */ 498/* VENC */
461#ifdef CONFIG_OMAP2_DSS_VENC 499#ifdef CONFIG_OMAP2_DSS_VENC
462int venc_init_platform_driver(void) __init; 500int venc_init_platform_driver(void) __init;
@@ -469,6 +507,20 @@ static inline unsigned long venc_get_pixel_clock(void)
469 return 0; 507 return 0;
470} 508}
471#endif 509#endif
510int omapdss_venc_display_enable(struct omap_dss_device *dssdev);
511void omapdss_venc_display_disable(struct omap_dss_device *dssdev);
512void omapdss_venc_set_timings(struct omap_dss_device *dssdev,
513 struct omap_video_timings *timings);
514int omapdss_venc_check_timings(struct omap_dss_device *dssdev,
515 struct omap_video_timings *timings);
516u32 omapdss_venc_get_wss(struct omap_dss_device *dssdev);
517int omapdss_venc_set_wss(struct omap_dss_device *dssdev, u32 wss);
518void omapdss_venc_set_type(struct omap_dss_device *dssdev,
519 enum omap_dss_venc_type type);
520void omapdss_venc_invert_vid_out_polarity(struct omap_dss_device *dssdev,
521 bool invert_polarity);
522int venc_panel_init(void);
523void venc_panel_exit(void);
472 524
473/* HDMI */ 525/* HDMI */
474#ifdef CONFIG_OMAP4_DSS_HDMI 526#ifdef CONFIG_OMAP4_DSS_HDMI
@@ -484,7 +536,8 @@ static inline unsigned long hdmi_get_pixel_clock(void)
484#endif 536#endif
485int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev); 537int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev);
486void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev); 538void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev);
487void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev); 539void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev,
540 struct omap_video_timings *timings);
488int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev, 541int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev,
489 struct omap_video_timings *timings); 542 struct omap_video_timings *timings);
490int omapdss_hdmi_read_edid(u8 *buf, int len); 543int omapdss_hdmi_read_edid(u8 *buf, int len);
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index 938709724f0c..acbc1e1efba3 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -46,7 +46,9 @@ struct omap_dss_features {
46 46
47 const int num_mgrs; 47 const int num_mgrs;
48 const int num_ovls; 48 const int num_ovls;
49 const int num_wbs;
49 const enum omap_display_type *supported_displays; 50 const enum omap_display_type *supported_displays;
51 const enum omap_dss_output_id *supported_outputs;
50 const enum omap_color_mode *supported_color_modes; 52 const enum omap_color_mode *supported_color_modes;
51 const enum omap_overlay_caps *overlay_caps; 53 const enum omap_overlay_caps *overlay_caps;
52 const char * const *clksrc_names; 54 const char * const *clksrc_names;
@@ -106,6 +108,21 @@ static const struct dss_reg_field omap4_dss_reg_fields[] = {
106 [FEAT_REG_DSIPLL_REGM_DSI] = { 30, 26 }, 108 [FEAT_REG_DSIPLL_REGM_DSI] = { 30, 26 },
107}; 109};
108 110
111static const struct dss_reg_field omap5_dss_reg_fields[] = {
112 [FEAT_REG_FIRHINC] = { 12, 0 },
113 [FEAT_REG_FIRVINC] = { 28, 16 },
114 [FEAT_REG_FIFOLOWTHRESHOLD] = { 15, 0 },
115 [FEAT_REG_FIFOHIGHTHRESHOLD] = { 31, 16 },
116 [FEAT_REG_FIFOSIZE] = { 15, 0 },
117 [FEAT_REG_HORIZONTALACCU] = { 10, 0 },
118 [FEAT_REG_VERTICALACCU] = { 26, 16 },
119 [FEAT_REG_DISPC_CLK_SWITCH] = { 9, 7 },
120 [FEAT_REG_DSIPLL_REGN] = { 8, 1 },
121 [FEAT_REG_DSIPLL_REGM] = { 20, 9 },
122 [FEAT_REG_DSIPLL_REGM_DISPC] = { 25, 21 },
123 [FEAT_REG_DSIPLL_REGM_DSI] = { 30, 26 },
124};
125
109static const enum omap_display_type omap2_dss_supported_displays[] = { 126static const enum omap_display_type omap2_dss_supported_displays[] = {
110 /* OMAP_DSS_CHANNEL_LCD */ 127 /* OMAP_DSS_CHANNEL_LCD */
111 OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI, 128 OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI,
@@ -144,6 +161,76 @@ static const enum omap_display_type omap4_dss_supported_displays[] = {
144 OMAP_DISPLAY_TYPE_DSI, 161 OMAP_DISPLAY_TYPE_DSI,
145}; 162};
146 163
164static const enum omap_display_type omap5_dss_supported_displays[] = {
165 /* OMAP_DSS_CHANNEL_LCD */
166 OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
167 OMAP_DISPLAY_TYPE_DSI,
168
169 /* OMAP_DSS_CHANNEL_DIGIT */
170 OMAP_DISPLAY_TYPE_HDMI | OMAP_DISPLAY_TYPE_DPI,
171
172 /* OMAP_DSS_CHANNEL_LCD2 */
173 OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
174 OMAP_DISPLAY_TYPE_DSI,
175};
176
177static const enum omap_dss_output_id omap2_dss_supported_outputs[] = {
178 /* OMAP_DSS_CHANNEL_LCD */
179 OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI,
180
181 /* OMAP_DSS_CHANNEL_DIGIT */
182 OMAP_DSS_OUTPUT_VENC,
183};
184
185static const enum omap_dss_output_id omap3430_dss_supported_outputs[] = {
186 /* OMAP_DSS_CHANNEL_LCD */
187 OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
188 OMAP_DSS_OUTPUT_SDI | OMAP_DSS_OUTPUT_DSI1,
189
190 /* OMAP_DSS_CHANNEL_DIGIT */
191 OMAP_DSS_OUTPUT_VENC,
192};
193
194static const enum omap_dss_output_id omap3630_dss_supported_outputs[] = {
195 /* OMAP_DSS_CHANNEL_LCD */
196 OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
197 OMAP_DSS_OUTPUT_DSI1,
198
199 /* OMAP_DSS_CHANNEL_DIGIT */
200 OMAP_DSS_OUTPUT_VENC,
201};
202
203static const enum omap_dss_output_id omap4_dss_supported_outputs[] = {
204 /* OMAP_DSS_CHANNEL_LCD */
205 OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
206 OMAP_DSS_OUTPUT_DSI1,
207
208 /* OMAP_DSS_CHANNEL_DIGIT */
209 OMAP_DSS_OUTPUT_VENC | OMAP_DSS_OUTPUT_HDMI |
210 OMAP_DSS_OUTPUT_DPI,
211
212 /* OMAP_DSS_CHANNEL_LCD2 */
213 OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
214 OMAP_DSS_OUTPUT_DSI2,
215};
216
217static const enum omap_dss_output_id omap5_dss_supported_outputs[] = {
218 /* OMAP_DSS_CHANNEL_LCD */
219 OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
220 OMAP_DSS_OUTPUT_DSI1 | OMAP_DSS_OUTPUT_DSI2,
221
222 /* OMAP_DSS_CHANNEL_DIGIT */
223 OMAP_DSS_OUTPUT_HDMI | OMAP_DSS_OUTPUT_DPI,
224
225 /* OMAP_DSS_CHANNEL_LCD2 */
226 OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
227 OMAP_DSS_OUTPUT_DSI1,
228
229 /* OMAP_DSS_CHANNEL_LCD3 */
230 OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
231 OMAP_DSS_OUTPUT_DSI2,
232};
233
147static const enum omap_color_mode omap2_dss_supported_color_modes[] = { 234static const enum omap_color_mode omap2_dss_supported_color_modes[] = {
148 /* OMAP_DSS_GFX */ 235 /* OMAP_DSS_GFX */
149 OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 | 236 OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
@@ -224,58 +311,80 @@ static const enum omap_color_mode omap4_dss_supported_color_modes[] = {
224 OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 | 311 OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 |
225 OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 | 312 OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 |
226 OMAP_DSS_COLOR_RGBX32, 313 OMAP_DSS_COLOR_RGBX32,
314
315 /* OMAP_DSS_WB */
316 OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB12U |
317 OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_ARGB16_1555 |
318 OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_NV12 |
319 OMAP_DSS_COLOR_RGBA16 | OMAP_DSS_COLOR_RGB24U |
320 OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_UYVY |
321 OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 |
322 OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 |
323 OMAP_DSS_COLOR_RGBX32,
227}; 324};
228 325
229static const enum omap_overlay_caps omap2_dss_overlay_caps[] = { 326static const enum omap_overlay_caps omap2_dss_overlay_caps[] = {
230 /* OMAP_DSS_GFX */ 327 /* OMAP_DSS_GFX */
231 0, 328 OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
232 329
233 /* OMAP_DSS_VIDEO1 */ 330 /* OMAP_DSS_VIDEO1 */
234 OMAP_DSS_OVL_CAP_SCALE, 331 OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
332 OMAP_DSS_OVL_CAP_REPLICATION,
235 333
236 /* OMAP_DSS_VIDEO2 */ 334 /* OMAP_DSS_VIDEO2 */
237 OMAP_DSS_OVL_CAP_SCALE, 335 OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
336 OMAP_DSS_OVL_CAP_REPLICATION,
238}; 337};
239 338
240static const enum omap_overlay_caps omap3430_dss_overlay_caps[] = { 339static const enum omap_overlay_caps omap3430_dss_overlay_caps[] = {
241 /* OMAP_DSS_GFX */ 340 /* OMAP_DSS_GFX */
242 OMAP_DSS_OVL_CAP_GLOBAL_ALPHA, 341 OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_POS |
342 OMAP_DSS_OVL_CAP_REPLICATION,
243 343
244 /* OMAP_DSS_VIDEO1 */ 344 /* OMAP_DSS_VIDEO1 */
245 OMAP_DSS_OVL_CAP_SCALE, 345 OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
346 OMAP_DSS_OVL_CAP_REPLICATION,
246 347
247 /* OMAP_DSS_VIDEO2 */ 348 /* OMAP_DSS_VIDEO2 */
248 OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA, 349 OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
350 OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
249}; 351};
250 352
251static const enum omap_overlay_caps omap3630_dss_overlay_caps[] = { 353static const enum omap_overlay_caps omap3630_dss_overlay_caps[] = {
252 /* OMAP_DSS_GFX */ 354 /* OMAP_DSS_GFX */
253 OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA, 355 OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA |
356 OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
254 357
255 /* OMAP_DSS_VIDEO1 */ 358 /* OMAP_DSS_VIDEO1 */
256 OMAP_DSS_OVL_CAP_SCALE, 359 OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
360 OMAP_DSS_OVL_CAP_REPLICATION,
257 361
258 /* OMAP_DSS_VIDEO2 */ 362 /* OMAP_DSS_VIDEO2 */
259 OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | 363 OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
260 OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA, 364 OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_POS |
365 OMAP_DSS_OVL_CAP_REPLICATION,
261}; 366};
262 367
263static const enum omap_overlay_caps omap4_dss_overlay_caps[] = { 368static const enum omap_overlay_caps omap4_dss_overlay_caps[] = {
264 /* OMAP_DSS_GFX */ 369 /* OMAP_DSS_GFX */
265 OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | 370 OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA |
266 OMAP_DSS_OVL_CAP_ZORDER, 371 OMAP_DSS_OVL_CAP_ZORDER | OMAP_DSS_OVL_CAP_POS |
372 OMAP_DSS_OVL_CAP_REPLICATION,
267 373
268 /* OMAP_DSS_VIDEO1 */ 374 /* OMAP_DSS_VIDEO1 */
269 OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | 375 OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
270 OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER, 376 OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER |
377 OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
271 378
272 /* OMAP_DSS_VIDEO2 */ 379 /* OMAP_DSS_VIDEO2 */
273 OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | 380 OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
274 OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER, 381 OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER |
382 OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
275 383
276 /* OMAP_DSS_VIDEO3 */ 384 /* OMAP_DSS_VIDEO3 */
277 OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | 385 OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
278 OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER, 386 OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER |
387 OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
279}; 388};
280 389
281static const char * const omap2_dss_clk_source_names[] = { 390static const char * const omap2_dss_clk_source_names[] = {
@@ -298,6 +407,14 @@ static const char * const omap4_dss_clk_source_names[] = {
298 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "PLL2_CLK2", 407 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "PLL2_CLK2",
299}; 408};
300 409
410static const char * const omap5_dss_clk_source_names[] = {
411 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DPLL_DSI1_A_CLK1",
412 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DPLL_DSI1_A_CLK2",
413 [OMAP_DSS_CLK_SRC_FCK] = "DSS_CLK",
414 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DPLL_DSI1_C_CLK1",
415 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "DPLL_DSI1_C_CLK2",
416};
417
301static const struct dss_param_range omap2_dss_param_range[] = { 418static const struct dss_param_range omap2_dss_param_range[] = {
302 [FEAT_PARAM_DSS_FCK] = { 0, 173000000 }, 419 [FEAT_PARAM_DSS_FCK] = { 0, 173000000 },
303 [FEAT_PARAM_DSS_PCD] = { 2, 255 }, 420 [FEAT_PARAM_DSS_PCD] = { 2, 255 },
@@ -326,6 +443,7 @@ static const struct dss_param_range omap3_dss_param_range[] = {
326 [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, (1 << 4) - 1 }, 443 [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, (1 << 4) - 1 },
327 [FEAT_PARAM_DSIPLL_FINT] = { 750000, 2100000 }, 444 [FEAT_PARAM_DSIPLL_FINT] = { 750000, 2100000 },
328 [FEAT_PARAM_DSIPLL_LPDIV] = { 1, (1 << 13) - 1}, 445 [FEAT_PARAM_DSIPLL_LPDIV] = { 1, (1 << 13) - 1},
446 [FEAT_PARAM_DSI_FCK] = { 0, 173000000 },
329 [FEAT_PARAM_DOWNSCALE] = { 1, 4 }, 447 [FEAT_PARAM_DOWNSCALE] = { 1, 4 },
330 [FEAT_PARAM_LINEWIDTH] = { 1, 1024 }, 448 [FEAT_PARAM_LINEWIDTH] = { 1, 1024 },
331 [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 }, 449 [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
@@ -341,6 +459,23 @@ static const struct dss_param_range omap4_dss_param_range[] = {
341 [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, (1 << 5) - 1 }, 459 [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, (1 << 5) - 1 },
342 [FEAT_PARAM_DSIPLL_FINT] = { 500000, 2500000 }, 460 [FEAT_PARAM_DSIPLL_FINT] = { 500000, 2500000 },
343 [FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 }, 461 [FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 },
462 [FEAT_PARAM_DSI_FCK] = { 0, 170000000 },
463 [FEAT_PARAM_DOWNSCALE] = { 1, 4 },
464 [FEAT_PARAM_LINEWIDTH] = { 1, 2048 },
465 [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
466 [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 },
467};
468
469static const struct dss_param_range omap5_dss_param_range[] = {
470 [FEAT_PARAM_DSS_FCK] = { 0, 200000000 },
471 [FEAT_PARAM_DSS_PCD] = { 1, 255 },
472 [FEAT_PARAM_DSIPLL_REGN] = { 0, (1 << 8) - 1 },
473 [FEAT_PARAM_DSIPLL_REGM] = { 0, (1 << 12) - 1 },
474 [FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, (1 << 5) - 1 },
475 [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, (1 << 5) - 1 },
476 [FEAT_PARAM_DSIPLL_FINT] = { 500000, 2500000 },
477 [FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 },
478 [FEAT_PARAM_DSI_FCK] = { 0, 170000000 },
344 [FEAT_PARAM_DOWNSCALE] = { 1, 4 }, 479 [FEAT_PARAM_DOWNSCALE] = { 1, 4 },
345 [FEAT_PARAM_LINEWIDTH] = { 1, 2048 }, 480 [FEAT_PARAM_LINEWIDTH] = { 1, 2048 },
346 [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 }, 481 [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
@@ -373,6 +508,26 @@ static const enum dss_feat_id omap3430_dss_feat_list[] = {
373 FEAT_ALPHA_FIXED_ZORDER, 508 FEAT_ALPHA_FIXED_ZORDER,
374 FEAT_FIFO_MERGE, 509 FEAT_FIFO_MERGE,
375 FEAT_OMAP3_DSI_FIFO_BUG, 510 FEAT_OMAP3_DSI_FIFO_BUG,
511 FEAT_DPI_USES_VDDS_DSI,
512};
513
514static const enum dss_feat_id am35xx_dss_feat_list[] = {
515 FEAT_LCDENABLEPOL,
516 FEAT_LCDENABLESIGNAL,
517 FEAT_PCKFREEENABLE,
518 FEAT_FUNCGATED,
519 FEAT_LINEBUFFERSPLIT,
520 FEAT_ROWREPEATENABLE,
521 FEAT_RESIZECONF,
522 FEAT_DSI_PLL_FREQSEL,
523 FEAT_DSI_REVERSE_TXCLKESC,
524 FEAT_VENC_REQUIRES_TV_DAC_CLK,
525 FEAT_CPR,
526 FEAT_PRELOAD,
527 FEAT_FIR_COEF_V,
528 FEAT_ALPHA_FIXED_ZORDER,
529 FEAT_FIFO_MERGE,
530 FEAT_OMAP3_DSI_FIFO_BUG,
376}; 531};
377 532
378static const enum dss_feat_id omap3630_dss_feat_list[] = { 533static const enum dss_feat_id omap3630_dss_feat_list[] = {
@@ -447,6 +602,28 @@ static const enum dss_feat_id omap4_dss_feat_list[] = {
447 FEAT_BURST_2D, 602 FEAT_BURST_2D,
448}; 603};
449 604
605static const enum dss_feat_id omap5_dss_feat_list[] = {
606 FEAT_MGR_LCD2,
607 FEAT_CORE_CLK_DIV,
608 FEAT_LCD_CLK_SRC,
609 FEAT_DSI_DCS_CMD_CONFIG_VC,
610 FEAT_DSI_VC_OCP_WIDTH,
611 FEAT_DSI_GNQ,
612 FEAT_HDMI_CTS_SWMODE,
613 FEAT_HDMI_AUDIO_USE_MCLK,
614 FEAT_HANDLE_UV_SEPARATE,
615 FEAT_ATTR2,
616 FEAT_CPR,
617 FEAT_PRELOAD,
618 FEAT_FIR_COEF_V,
619 FEAT_ALPHA_FREE_ZORDER,
620 FEAT_FIFO_MERGE,
621 FEAT_BURST_2D,
622 FEAT_DSI_PLL_SELFREQDCO,
623 FEAT_DSI_PLL_REFSEL,
624 FEAT_DSI_PHY_DCC,
625};
626
450/* OMAP2 DSS Features */ 627/* OMAP2 DSS Features */
451static const struct omap_dss_features omap2_dss_features = { 628static const struct omap_dss_features omap2_dss_features = {
452 .reg_fields = omap2_dss_reg_fields, 629 .reg_fields = omap2_dss_reg_fields,
@@ -458,6 +635,7 @@ static const struct omap_dss_features omap2_dss_features = {
458 .num_mgrs = 2, 635 .num_mgrs = 2,
459 .num_ovls = 3, 636 .num_ovls = 3,
460 .supported_displays = omap2_dss_supported_displays, 637 .supported_displays = omap2_dss_supported_displays,
638 .supported_outputs = omap2_dss_supported_outputs,
461 .supported_color_modes = omap2_dss_supported_color_modes, 639 .supported_color_modes = omap2_dss_supported_color_modes,
462 .overlay_caps = omap2_dss_overlay_caps, 640 .overlay_caps = omap2_dss_overlay_caps,
463 .clksrc_names = omap2_dss_clk_source_names, 641 .clksrc_names = omap2_dss_clk_source_names,
@@ -478,6 +656,31 @@ static const struct omap_dss_features omap3430_dss_features = {
478 .num_mgrs = 2, 656 .num_mgrs = 2,
479 .num_ovls = 3, 657 .num_ovls = 3,
480 .supported_displays = omap3430_dss_supported_displays, 658 .supported_displays = omap3430_dss_supported_displays,
659 .supported_outputs = omap3430_dss_supported_outputs,
660 .supported_color_modes = omap3_dss_supported_color_modes,
661 .overlay_caps = omap3430_dss_overlay_caps,
662 .clksrc_names = omap3_dss_clk_source_names,
663 .dss_params = omap3_dss_param_range,
664 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
665 .buffer_size_unit = 1,
666 .burst_size_unit = 8,
667};
668
669/*
670 * AM35xx DSS Features. This is basically OMAP3 DSS Features without the
671 * vdds_dsi regulator.
672 */
673static const struct omap_dss_features am35xx_dss_features = {
674 .reg_fields = omap3_dss_reg_fields,
675 .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
676
677 .features = am35xx_dss_feat_list,
678 .num_features = ARRAY_SIZE(am35xx_dss_feat_list),
679
680 .num_mgrs = 2,
681 .num_ovls = 3,
682 .supported_displays = omap3430_dss_supported_displays,
683 .supported_outputs = omap3430_dss_supported_outputs,
481 .supported_color_modes = omap3_dss_supported_color_modes, 684 .supported_color_modes = omap3_dss_supported_color_modes,
482 .overlay_caps = omap3430_dss_overlay_caps, 685 .overlay_caps = omap3430_dss_overlay_caps,
483 .clksrc_names = omap3_dss_clk_source_names, 686 .clksrc_names = omap3_dss_clk_source_names,
@@ -497,6 +700,7 @@ static const struct omap_dss_features omap3630_dss_features = {
497 .num_mgrs = 2, 700 .num_mgrs = 2,
498 .num_ovls = 3, 701 .num_ovls = 3,
499 .supported_displays = omap3630_dss_supported_displays, 702 .supported_displays = omap3630_dss_supported_displays,
703 .supported_outputs = omap3630_dss_supported_outputs,
500 .supported_color_modes = omap3_dss_supported_color_modes, 704 .supported_color_modes = omap3_dss_supported_color_modes,
501 .overlay_caps = omap3630_dss_overlay_caps, 705 .overlay_caps = omap3630_dss_overlay_caps,
502 .clksrc_names = omap3_dss_clk_source_names, 706 .clksrc_names = omap3_dss_clk_source_names,
@@ -517,7 +721,9 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = {
517 721
518 .num_mgrs = 3, 722 .num_mgrs = 3,
519 .num_ovls = 4, 723 .num_ovls = 4,
724 .num_wbs = 1,
520 .supported_displays = omap4_dss_supported_displays, 725 .supported_displays = omap4_dss_supported_displays,
726 .supported_outputs = omap4_dss_supported_outputs,
521 .supported_color_modes = omap4_dss_supported_color_modes, 727 .supported_color_modes = omap4_dss_supported_color_modes,
522 .overlay_caps = omap4_dss_overlay_caps, 728 .overlay_caps = omap4_dss_overlay_caps,
523 .clksrc_names = omap4_dss_clk_source_names, 729 .clksrc_names = omap4_dss_clk_source_names,
@@ -537,7 +743,9 @@ static const struct omap_dss_features omap4430_es2_0_1_2_dss_features = {
537 743
538 .num_mgrs = 3, 744 .num_mgrs = 3,
539 .num_ovls = 4, 745 .num_ovls = 4,
746 .num_wbs = 1,
540 .supported_displays = omap4_dss_supported_displays, 747 .supported_displays = omap4_dss_supported_displays,
748 .supported_outputs = omap4_dss_supported_outputs,
541 .supported_color_modes = omap4_dss_supported_color_modes, 749 .supported_color_modes = omap4_dss_supported_color_modes,
542 .overlay_caps = omap4_dss_overlay_caps, 750 .overlay_caps = omap4_dss_overlay_caps,
543 .clksrc_names = omap4_dss_clk_source_names, 751 .clksrc_names = omap4_dss_clk_source_names,
@@ -557,7 +765,9 @@ static const struct omap_dss_features omap4_dss_features = {
557 765
558 .num_mgrs = 3, 766 .num_mgrs = 3,
559 .num_ovls = 4, 767 .num_ovls = 4,
768 .num_wbs = 1,
560 .supported_displays = omap4_dss_supported_displays, 769 .supported_displays = omap4_dss_supported_displays,
770 .supported_outputs = omap4_dss_supported_outputs,
561 .supported_color_modes = omap4_dss_supported_color_modes, 771 .supported_color_modes = omap4_dss_supported_color_modes,
562 .overlay_caps = omap4_dss_overlay_caps, 772 .overlay_caps = omap4_dss_overlay_caps,
563 .clksrc_names = omap4_dss_clk_source_names, 773 .clksrc_names = omap4_dss_clk_source_names,
@@ -567,6 +777,27 @@ static const struct omap_dss_features omap4_dss_features = {
567 .burst_size_unit = 16, 777 .burst_size_unit = 16,
568}; 778};
569 779
780/* OMAP5 DSS Features */
781static const struct omap_dss_features omap5_dss_features = {
782 .reg_fields = omap5_dss_reg_fields,
783 .num_reg_fields = ARRAY_SIZE(omap5_dss_reg_fields),
784
785 .features = omap5_dss_feat_list,
786 .num_features = ARRAY_SIZE(omap5_dss_feat_list),
787
788 .num_mgrs = 3,
789 .num_ovls = 4,
790 .supported_displays = omap5_dss_supported_displays,
791 .supported_outputs = omap5_dss_supported_outputs,
792 .supported_color_modes = omap4_dss_supported_color_modes,
793 .overlay_caps = omap4_dss_overlay_caps,
794 .clksrc_names = omap5_dss_clk_source_names,
795 .dss_params = omap5_dss_param_range,
796 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
797 .buffer_size_unit = 16,
798 .burst_size_unit = 16,
799};
800
570#if defined(CONFIG_OMAP4_DSS_HDMI) 801#if defined(CONFIG_OMAP4_DSS_HDMI)
571/* HDMI OMAP4 Functions*/ 802/* HDMI OMAP4 Functions*/
572static const struct ti_hdmi_ip_ops omap4_hdmi_functions = { 803static const struct ti_hdmi_ip_ops omap4_hdmi_functions = {
@@ -612,6 +843,11 @@ int dss_feat_get_num_ovls(void)
612 return omap_current_dss_features->num_ovls; 843 return omap_current_dss_features->num_ovls;
613} 844}
614 845
846int dss_feat_get_num_wbs(void)
847{
848 return omap_current_dss_features->num_wbs;
849}
850
615unsigned long dss_feat_get_param_min(enum dss_range_param param) 851unsigned long dss_feat_get_param_min(enum dss_range_param param)
616{ 852{
617 return omap_current_dss_features->dss_params[param].min; 853 return omap_current_dss_features->dss_params[param].min;
@@ -627,6 +863,11 @@ enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel
627 return omap_current_dss_features->supported_displays[channel]; 863 return omap_current_dss_features->supported_displays[channel];
628} 864}
629 865
866enum omap_dss_output_id dss_feat_get_supported_outputs(enum omap_channel channel)
867{
868 return omap_current_dss_features->supported_outputs[channel];
869}
870
630enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane) 871enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane)
631{ 872{
632 return omap_current_dss_features->supported_color_modes[plane]; 873 return omap_current_dss_features->supported_color_modes[plane];
@@ -694,8 +935,13 @@ void dss_features_init(void)
694 omap_current_dss_features = &omap2_dss_features; 935 omap_current_dss_features = &omap2_dss_features;
695 else if (cpu_is_omap3630()) 936 else if (cpu_is_omap3630())
696 omap_current_dss_features = &omap3630_dss_features; 937 omap_current_dss_features = &omap3630_dss_features;
697 else if (cpu_is_omap34xx()) 938 else if (cpu_is_omap34xx()) {
698 omap_current_dss_features = &omap3430_dss_features; 939 if (soc_is_am35xx()) {
940 omap_current_dss_features = &am35xx_dss_features;
941 } else {
942 omap_current_dss_features = &omap3430_dss_features;
943 }
944 }
699 else if (omap_rev() == OMAP4430_REV_ES1_0) 945 else if (omap_rev() == OMAP4430_REV_ES1_0)
700 omap_current_dss_features = &omap4430_es1_0_dss_features; 946 omap_current_dss_features = &omap4430_es1_0_dss_features;
701 else if (omap_rev() == OMAP4430_REV_ES2_0 || 947 else if (omap_rev() == OMAP4430_REV_ES2_0 ||
@@ -704,6 +950,8 @@ void dss_features_init(void)
704 omap_current_dss_features = &omap4430_es2_0_1_2_dss_features; 950 omap_current_dss_features = &omap4430_es2_0_1_2_dss_features;
705 else if (cpu_is_omap44xx()) 951 else if (cpu_is_omap44xx())
706 omap_current_dss_features = &omap4_dss_features; 952 omap_current_dss_features = &omap4_dss_features;
953 else if (soc_is_omap54xx())
954 omap_current_dss_features = &omap5_dss_features;
707 else 955 else
708 DSSWARN("Unsupported OMAP version"); 956 DSSWARN("Unsupported OMAP version");
709} 957}
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index 996ffcbfed58..9218113b5e88 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -50,6 +50,7 @@ enum dss_feat_id {
50 FEAT_DSI_VC_OCP_WIDTH, 50 FEAT_DSI_VC_OCP_WIDTH,
51 FEAT_DSI_REVERSE_TXCLKESC, 51 FEAT_DSI_REVERSE_TXCLKESC,
52 FEAT_DSI_GNQ, 52 FEAT_DSI_GNQ,
53 FEAT_DPI_USES_VDDS_DSI,
53 FEAT_HDMI_CTS_SWMODE, 54 FEAT_HDMI_CTS_SWMODE,
54 FEAT_HDMI_AUDIO_USE_MCLK, 55 FEAT_HDMI_AUDIO_USE_MCLK,
55 FEAT_HANDLE_UV_SEPARATE, 56 FEAT_HANDLE_UV_SEPARATE,
@@ -64,6 +65,9 @@ enum dss_feat_id {
64 /* An unknown HW bug causing the normal FIFO thresholds not to work */ 65 /* An unknown HW bug causing the normal FIFO thresholds not to work */
65 FEAT_OMAP3_DSI_FIFO_BUG, 66 FEAT_OMAP3_DSI_FIFO_BUG,
66 FEAT_BURST_2D, 67 FEAT_BURST_2D,
68 FEAT_DSI_PLL_SELFREQDCO,
69 FEAT_DSI_PLL_REFSEL,
70 FEAT_DSI_PHY_DCC,
67}; 71};
68 72
69/* DSS register field id */ 73/* DSS register field id */
@@ -91,6 +95,7 @@ enum dss_range_param {
91 FEAT_PARAM_DSIPLL_REGM_DSI, 95 FEAT_PARAM_DSIPLL_REGM_DSI,
92 FEAT_PARAM_DSIPLL_FINT, 96 FEAT_PARAM_DSIPLL_FINT,
93 FEAT_PARAM_DSIPLL_LPDIV, 97 FEAT_PARAM_DSIPLL_LPDIV,
98 FEAT_PARAM_DSI_FCK,
94 FEAT_PARAM_DOWNSCALE, 99 FEAT_PARAM_DOWNSCALE,
95 FEAT_PARAM_LINEWIDTH, 100 FEAT_PARAM_LINEWIDTH,
96 FEAT_PARAM_MGR_WIDTH, 101 FEAT_PARAM_MGR_WIDTH,
@@ -100,9 +105,11 @@ enum dss_range_param {
100/* DSS Feature Functions */ 105/* DSS Feature Functions */
101int dss_feat_get_num_mgrs(void); 106int dss_feat_get_num_mgrs(void);
102int dss_feat_get_num_ovls(void); 107int dss_feat_get_num_ovls(void);
108int dss_feat_get_num_wbs(void);
103unsigned long dss_feat_get_param_min(enum dss_range_param param); 109unsigned long dss_feat_get_param_min(enum dss_range_param param);
104unsigned long dss_feat_get_param_max(enum dss_range_param param); 110unsigned long dss_feat_get_param_max(enum dss_range_param param);
105enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel); 111enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel);
112enum omap_dss_output_id dss_feat_get_supported_outputs(enum omap_channel channel);
106enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane); 113enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
107enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane); 114enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane);
108bool dss_feat_color_mode_supported(enum omap_plane plane, 115bool dss_feat_color_mode_supported(enum omap_plane plane,
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index 060216fdc578..a48a7dd75b33 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -32,6 +32,8 @@
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34#include <linux/clk.h> 34#include <linux/clk.h>
35#include <linux/gpio.h>
36#include <linux/regulator/consumer.h>
35#include <video/omapdss.h> 37#include <video/omapdss.h>
36 38
37#include "ti_hdmi.h" 39#include "ti_hdmi.h"
@@ -61,6 +63,13 @@ static struct {
61 struct hdmi_ip_data ip_data; 63 struct hdmi_ip_data ip_data;
62 64
63 struct clk *sys_clk; 65 struct clk *sys_clk;
66 struct regulator *vdda_hdmi_dac_reg;
67
68 int ct_cp_hpd_gpio;
69 int ls_oe_gpio;
70 int hpd_gpio;
71
72 struct omap_dss_output output;
64} hdmi; 73} hdmi;
65 74
66/* 75/*
@@ -314,12 +323,47 @@ static void hdmi_runtime_put(void)
314 323
315static int __init hdmi_init_display(struct omap_dss_device *dssdev) 324static int __init hdmi_init_display(struct omap_dss_device *dssdev)
316{ 325{
326 int r;
327
328 struct gpio gpios[] = {
329 { hdmi.ct_cp_hpd_gpio, GPIOF_OUT_INIT_LOW, "hdmi_ct_cp_hpd" },
330 { hdmi.ls_oe_gpio, GPIOF_OUT_INIT_LOW, "hdmi_ls_oe" },
331 { hdmi.hpd_gpio, GPIOF_DIR_IN, "hdmi_hpd" },
332 };
333
317 DSSDBG("init_display\n"); 334 DSSDBG("init_display\n");
318 335
319 dss_init_hdmi_ip_ops(&hdmi.ip_data); 336 dss_init_hdmi_ip_ops(&hdmi.ip_data);
337
338 if (hdmi.vdda_hdmi_dac_reg == NULL) {
339 struct regulator *reg;
340
341 reg = devm_regulator_get(&hdmi.pdev->dev, "vdda_hdmi_dac");
342
343 if (IS_ERR(reg)) {
344 DSSERR("can't get VDDA_HDMI_DAC regulator\n");
345 return PTR_ERR(reg);
346 }
347
348 hdmi.vdda_hdmi_dac_reg = reg;
349 }
350
351 r = gpio_request_array(gpios, ARRAY_SIZE(gpios));
352 if (r)
353 return r;
354
320 return 0; 355 return 0;
321} 356}
322 357
358static void __exit hdmi_uninit_display(struct omap_dss_device *dssdev)
359{
360 DSSDBG("uninit_display\n");
361
362 gpio_free(hdmi.ct_cp_hpd_gpio);
363 gpio_free(hdmi.ls_oe_gpio);
364 gpio_free(hdmi.hpd_gpio);
365}
366
323static const struct hdmi_config *hdmi_find_timing( 367static const struct hdmi_config *hdmi_find_timing(
324 const struct hdmi_config *timings_arr, 368 const struct hdmi_config *timings_arr,
325 int len) 369 int len)
@@ -459,32 +503,30 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
459static int hdmi_power_on(struct omap_dss_device *dssdev) 503static int hdmi_power_on(struct omap_dss_device *dssdev)
460{ 504{
461 int r; 505 int r;
462 const struct hdmi_config *timing;
463 struct omap_video_timings *p; 506 struct omap_video_timings *p;
507 struct omap_overlay_manager *mgr = dssdev->output->manager;
464 unsigned long phy; 508 unsigned long phy;
465 509
510 gpio_set_value(hdmi.ct_cp_hpd_gpio, 1);
511 gpio_set_value(hdmi.ls_oe_gpio, 1);
512
513 /* wait 300us after CT_CP_HPD for the 5V power output to reach 90% */
514 udelay(300);
515
516 r = regulator_enable(hdmi.vdda_hdmi_dac_reg);
517 if (r)
518 goto err_vdac_enable;
519
466 r = hdmi_runtime_get(); 520 r = hdmi_runtime_get();
467 if (r) 521 if (r)
468 return r; 522 goto err_runtime_get;
469 523
470 dss_mgr_disable(dssdev->manager); 524 dss_mgr_disable(mgr);
471 525
472 p = &dssdev->panel.timings; 526 p = &hdmi.ip_data.cfg.timings;
473 527
474 DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", 528 DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
475 dssdev->panel.timings.x_res,
476 dssdev->panel.timings.y_res);
477 529
478 timing = hdmi_get_timings();
479 if (timing == NULL) {
480 /* HDMI code 4 corresponds to 640 * 480 VGA */
481 hdmi.ip_data.cfg.cm.code = 4;
482 /* DVI mode 1 corresponds to HDMI 0 to DVI */
483 hdmi.ip_data.cfg.cm.mode = HDMI_DVI;
484 hdmi.ip_data.cfg = vesa_timings[0];
485 } else {
486 hdmi.ip_data.cfg = *timing;
487 }
488 phy = p->pixel_clock; 530 phy = p->pixel_clock;
489 531
490 hdmi_compute_pll(dssdev, phy, &hdmi.ip_data.pll_data); 532 hdmi_compute_pll(dssdev, phy, &hdmi.ip_data.pll_data);
@@ -495,13 +537,13 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
495 r = hdmi.ip_data.ops->pll_enable(&hdmi.ip_data); 537 r = hdmi.ip_data.ops->pll_enable(&hdmi.ip_data);
496 if (r) { 538 if (r) {
497 DSSDBG("Failed to lock PLL\n"); 539 DSSDBG("Failed to lock PLL\n");
498 goto err; 540 goto err_pll_enable;
499 } 541 }
500 542
501 r = hdmi.ip_data.ops->phy_enable(&hdmi.ip_data); 543 r = hdmi.ip_data.ops->phy_enable(&hdmi.ip_data);
502 if (r) { 544 if (r) {
503 DSSDBG("Failed to start PHY\n"); 545 DSSDBG("Failed to start PHY\n");
504 goto err; 546 goto err_phy_enable;
505 } 547 }
506 548
507 hdmi.ip_data.ops->video_configure(&hdmi.ip_data); 549 hdmi.ip_data.ops->video_configure(&hdmi.ip_data);
@@ -521,13 +563,13 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
521 dispc_enable_gamma_table(0); 563 dispc_enable_gamma_table(0);
522 564
523 /* tv size */ 565 /* tv size */
524 dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings); 566 dss_mgr_set_timings(mgr, p);
525 567
526 r = hdmi.ip_data.ops->video_enable(&hdmi.ip_data); 568 r = hdmi.ip_data.ops->video_enable(&hdmi.ip_data);
527 if (r) 569 if (r)
528 goto err_vid_enable; 570 goto err_vid_enable;
529 571
530 r = dss_mgr_enable(dssdev->manager); 572 r = dss_mgr_enable(mgr);
531 if (r) 573 if (r)
532 goto err_mgr_enable; 574 goto err_mgr_enable;
533 575
@@ -537,20 +579,33 @@ err_mgr_enable:
537 hdmi.ip_data.ops->video_disable(&hdmi.ip_data); 579 hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
538err_vid_enable: 580err_vid_enable:
539 hdmi.ip_data.ops->phy_disable(&hdmi.ip_data); 581 hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
582err_phy_enable:
540 hdmi.ip_data.ops->pll_disable(&hdmi.ip_data); 583 hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
541err: 584err_pll_enable:
542 hdmi_runtime_put(); 585 hdmi_runtime_put();
586err_runtime_get:
587 regulator_disable(hdmi.vdda_hdmi_dac_reg);
588err_vdac_enable:
589 gpio_set_value(hdmi.ct_cp_hpd_gpio, 0);
590 gpio_set_value(hdmi.ls_oe_gpio, 0);
543 return -EIO; 591 return -EIO;
544} 592}
545 593
546static void hdmi_power_off(struct omap_dss_device *dssdev) 594static void hdmi_power_off(struct omap_dss_device *dssdev)
547{ 595{
548 dss_mgr_disable(dssdev->manager); 596 struct omap_overlay_manager *mgr = dssdev->output->manager;
597
598 dss_mgr_disable(mgr);
549 599
550 hdmi.ip_data.ops->video_disable(&hdmi.ip_data); 600 hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
551 hdmi.ip_data.ops->phy_disable(&hdmi.ip_data); 601 hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
552 hdmi.ip_data.ops->pll_disable(&hdmi.ip_data); 602 hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
553 hdmi_runtime_put(); 603 hdmi_runtime_put();
604
605 regulator_disable(hdmi.vdda_hdmi_dac_reg);
606
607 gpio_set_value(hdmi.ct_cp_hpd_gpio, 0);
608 gpio_set_value(hdmi.ls_oe_gpio, 0);
554} 609}
555 610
556int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev, 611int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev,
@@ -567,25 +622,22 @@ int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev,
567 622
568} 623}
569 624
570void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev) 625void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev,
626 struct omap_video_timings *timings)
571{ 627{
572 struct hdmi_cm cm; 628 struct hdmi_cm cm;
629 const struct hdmi_config *t;
573 630
574 cm = hdmi_get_code(&dssdev->panel.timings); 631 mutex_lock(&hdmi.lock);
575 hdmi.ip_data.cfg.cm.code = cm.code;
576 hdmi.ip_data.cfg.cm.mode = cm.mode;
577 632
578 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { 633 cm = hdmi_get_code(timings);
579 int r; 634 hdmi.ip_data.cfg.cm = cm;
580 635
581 hdmi_power_off(dssdev); 636 t = hdmi_get_timings();
637 if (t != NULL)
638 hdmi.ip_data.cfg = *t;
582 639
583 r = hdmi_power_on(dssdev); 640 mutex_unlock(&hdmi.lock);
584 if (r)
585 DSSERR("failed to power on device\n");
586 } else {
587 dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
588 }
589} 641}
590 642
591static void hdmi_dump_regs(struct seq_file *s) 643static void hdmi_dump_regs(struct seq_file *s)
@@ -640,20 +692,20 @@ bool omapdss_hdmi_detect(void)
640 692
641int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev) 693int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev)
642{ 694{
643 struct omap_dss_hdmi_data *priv = dssdev->data; 695 struct omap_dss_output *out = dssdev->output;
644 int r = 0; 696 int r = 0;
645 697
646 DSSDBG("ENTER hdmi_display_enable\n"); 698 DSSDBG("ENTER hdmi_display_enable\n");
647 699
648 mutex_lock(&hdmi.lock); 700 mutex_lock(&hdmi.lock);
649 701
650 if (dssdev->manager == NULL) { 702 if (out == NULL || out->manager == NULL) {
651 DSSERR("failed to enable display: no manager\n"); 703 DSSERR("failed to enable display: no output/manager\n");
652 r = -ENODEV; 704 r = -ENODEV;
653 goto err0; 705 goto err0;
654 } 706 }
655 707
656 hdmi.ip_data.hpd_gpio = priv->hpd_gpio; 708 hdmi.ip_data.hpd_gpio = hdmi.hpd_gpio;
657 709
658 r = omap_dss_start_device(dssdev); 710 r = omap_dss_start_device(dssdev);
659 if (r) { 711 if (r) {
@@ -661,26 +713,15 @@ int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev)
661 goto err0; 713 goto err0;
662 } 714 }
663 715
664 if (dssdev->platform_enable) {
665 r = dssdev->platform_enable(dssdev);
666 if (r) {
667 DSSERR("failed to enable GPIO's\n");
668 goto err1;
669 }
670 }
671
672 r = hdmi_power_on(dssdev); 716 r = hdmi_power_on(dssdev);
673 if (r) { 717 if (r) {
674 DSSERR("failed to power on device\n"); 718 DSSERR("failed to power on device\n");
675 goto err2; 719 goto err1;
676 } 720 }
677 721
678 mutex_unlock(&hdmi.lock); 722 mutex_unlock(&hdmi.lock);
679 return 0; 723 return 0;
680 724
681err2:
682 if (dssdev->platform_disable)
683 dssdev->platform_disable(dssdev);
684err1: 725err1:
685 omap_dss_stop_device(dssdev); 726 omap_dss_stop_device(dssdev);
686err0: 727err0:
@@ -696,9 +737,6 @@ void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev)
696 737
697 hdmi_power_off(dssdev); 738 hdmi_power_off(dssdev);
698 739
699 if (dssdev->platform_disable)
700 dssdev->platform_disable(dssdev);
701
702 omap_dss_stop_device(dssdev); 740 omap_dss_stop_device(dssdev);
703 741
704 mutex_unlock(&hdmi.lock); 742 mutex_unlock(&hdmi.lock);
@@ -869,10 +907,14 @@ int hdmi_audio_config(struct omap_dss_audio *audio)
869 907
870#endif 908#endif
871 909
872static void __init hdmi_probe_pdata(struct platform_device *pdev) 910static struct omap_dss_device * __init hdmi_find_dssdev(struct platform_device *pdev)
873{ 911{
874 struct omap_dss_board_info *pdata = pdev->dev.platform_data; 912 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
875 int r, i; 913 const char *def_disp_name = dss_get_default_display_name();
914 struct omap_dss_device *def_dssdev;
915 int i;
916
917 def_dssdev = NULL;
876 918
877 for (i = 0; i < pdata->num_devices; ++i) { 919 for (i = 0; i < pdata->num_devices; ++i) {
878 struct omap_dss_device *dssdev = pdata->devices[i]; 920 struct omap_dss_device *dssdev = pdata->devices[i];
@@ -880,17 +922,76 @@ static void __init hdmi_probe_pdata(struct platform_device *pdev)
880 if (dssdev->type != OMAP_DISPLAY_TYPE_HDMI) 922 if (dssdev->type != OMAP_DISPLAY_TYPE_HDMI)
881 continue; 923 continue;
882 924
883 r = hdmi_init_display(dssdev); 925 if (def_dssdev == NULL)
884 if (r) { 926 def_dssdev = dssdev;
885 DSSERR("device %s init failed: %d\n", dssdev->name, r); 927
886 continue; 928 if (def_disp_name != NULL &&
929 strcmp(dssdev->name, def_disp_name) == 0) {
930 def_dssdev = dssdev;
931 break;
887 } 932 }
933 }
888 934
889 r = omap_dss_register_device(dssdev, &pdev->dev, i); 935 return def_dssdev;
890 if (r) 936}
891 DSSERR("device %s register failed: %d\n", 937
892 dssdev->name, r); 938static void __init hdmi_probe_pdata(struct platform_device *pdev)
939{
940 struct omap_dss_device *plat_dssdev;
941 struct omap_dss_device *dssdev;
942 struct omap_dss_hdmi_data *priv;
943 int r;
944
945 plat_dssdev = hdmi_find_dssdev(pdev);
946
947 if (!plat_dssdev)
948 return;
949
950 dssdev = dss_alloc_and_init_device(&pdev->dev);
951 if (!dssdev)
952 return;
953
954 dss_copy_device_pdata(dssdev, plat_dssdev);
955
956 priv = dssdev->data;
957
958 hdmi.ct_cp_hpd_gpio = priv->ct_cp_hpd_gpio;
959 hdmi.ls_oe_gpio = priv->ls_oe_gpio;
960 hdmi.hpd_gpio = priv->hpd_gpio;
961
962 dssdev->channel = OMAP_DSS_CHANNEL_DIGIT;
963
964 r = hdmi_init_display(dssdev);
965 if (r) {
966 DSSERR("device %s init failed: %d\n", dssdev->name, r);
967 dss_put_device(dssdev);
968 return;
893 } 969 }
970
971 r = dss_add_device(dssdev);
972 if (r) {
973 DSSERR("device %s register failed: %d\n", dssdev->name, r);
974 dss_put_device(dssdev);
975 return;
976 }
977}
978
979static void __init hdmi_init_output(struct platform_device *pdev)
980{
981 struct omap_dss_output *out = &hdmi.output;
982
983 out->pdev = pdev;
984 out->id = OMAP_DSS_OUTPUT_HDMI;
985 out->type = OMAP_DISPLAY_TYPE_HDMI;
986
987 dss_register_output(out);
988}
989
990static void __exit hdmi_uninit_output(struct platform_device *pdev)
991{
992 struct omap_dss_output *out = &hdmi.output;
993
994 dss_unregister_output(out);
894} 995}
895 996
896/* HDMI HW IP initialisation */ 997/* HDMI HW IP initialisation */
@@ -929,23 +1030,37 @@ static int __init omapdss_hdmihw_probe(struct platform_device *pdev)
929 hdmi.ip_data.core_av_offset = HDMI_CORE_AV; 1030 hdmi.ip_data.core_av_offset = HDMI_CORE_AV;
930 hdmi.ip_data.pll_offset = HDMI_PLLCTRL; 1031 hdmi.ip_data.pll_offset = HDMI_PLLCTRL;
931 hdmi.ip_data.phy_offset = HDMI_PHY; 1032 hdmi.ip_data.phy_offset = HDMI_PHY;
1033
932 mutex_init(&hdmi.ip_data.lock); 1034 mutex_init(&hdmi.ip_data.lock);
933 1035
934 hdmi_panel_init(); 1036 hdmi_panel_init();
935 1037
936 dss_debugfs_create_file("hdmi", hdmi_dump_regs); 1038 dss_debugfs_create_file("hdmi", hdmi_dump_regs);
937 1039
1040 hdmi_init_output(pdev);
1041
938 hdmi_probe_pdata(pdev); 1042 hdmi_probe_pdata(pdev);
939 1043
940 return 0; 1044 return 0;
941} 1045}
942 1046
1047static int __exit hdmi_remove_child(struct device *dev, void *data)
1048{
1049 struct omap_dss_device *dssdev = to_dss_device(dev);
1050 hdmi_uninit_display(dssdev);
1051 return 0;
1052}
1053
943static int __exit omapdss_hdmihw_remove(struct platform_device *pdev) 1054static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
944{ 1055{
945 omap_dss_unregister_child_devices(&pdev->dev); 1056 device_for_each_child(&pdev->dev, NULL, hdmi_remove_child);
1057
1058 dss_unregister_child_devices(&pdev->dev);
946 1059
947 hdmi_panel_exit(); 1060 hdmi_panel_exit();
948 1061
1062 hdmi_uninit_output(pdev);
1063
949 pm_runtime_disable(&pdev->dev); 1064 pm_runtime_disable(&pdev->dev);
950 1065
951 hdmi_put_clocks(); 1066 hdmi_put_clocks();
diff --git a/drivers/video/omap2/dss/hdmi_panel.c b/drivers/video/omap2/dss/hdmi_panel.c
index e10844faadf9..69fb115bab32 100644
--- a/drivers/video/omap2/dss/hdmi_panel.c
+++ b/drivers/video/omap2/dss/hdmi_panel.c
@@ -41,17 +41,34 @@ static struct {
41 41
42static int hdmi_panel_probe(struct omap_dss_device *dssdev) 42static int hdmi_panel_probe(struct omap_dss_device *dssdev)
43{ 43{
44 /* Initialize default timings to VGA in DVI mode */
45 const struct omap_video_timings default_timings = {
46 .x_res = 640,
47 .y_res = 480,
48 .pixel_clock = 25175,
49 .hsw = 96,
50 .hfp = 16,
51 .hbp = 48,
52 .vsw = 2,
53 .vfp = 11,
54 .vbp = 31,
55
56 .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
57 .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
58
59 .interlace = false,
60 };
61
44 DSSDBG("ENTER hdmi_panel_probe\n"); 62 DSSDBG("ENTER hdmi_panel_probe\n");
45 63
46 dssdev->panel.timings = (struct omap_video_timings) 64 dssdev->panel.timings = default_timings;
47 { 640, 480, 25175, 96, 16, 48, 2, 11, 31,
48 OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
49 false,
50 };
51 65
52 DSSDBG("hdmi_panel_probe x_res= %d y_res = %d\n", 66 DSSDBG("hdmi_panel_probe x_res= %d y_res = %d\n",
53 dssdev->panel.timings.x_res, 67 dssdev->panel.timings.x_res,
54 dssdev->panel.timings.y_res); 68 dssdev->panel.timings.y_res);
69
70 omapdss_hdmi_display_set_timing(dssdev, &dssdev->panel.timings);
71
55 return 0; 72 return 0;
56} 73}
57 74
@@ -228,6 +245,8 @@ static int hdmi_panel_enable(struct omap_dss_device *dssdev)
228 goto err; 245 goto err;
229 } 246 }
230 247
248 omapdss_hdmi_display_set_timing(dssdev, &dssdev->panel.timings);
249
231 r = omapdss_hdmi_display_enable(dssdev); 250 r = omapdss_hdmi_display_enable(dssdev);
232 if (r) { 251 if (r) {
233 DSSERR("failed to power on\n"); 252 DSSERR("failed to power on\n");
@@ -336,8 +355,8 @@ static void hdmi_set_timings(struct omap_dss_device *dssdev,
336 */ 355 */
337 hdmi_panel_audio_disable(dssdev); 356 hdmi_panel_audio_disable(dssdev);
338 357
358 omapdss_hdmi_display_set_timing(dssdev, timings);
339 dssdev->panel.timings = *timings; 359 dssdev->panel.timings = *timings;
340 omapdss_hdmi_display_set_timing(dssdev);
341 360
342 mutex_unlock(&hdmi.lock); 361 mutex_unlock(&hdmi.lock);
343} 362}
diff --git a/drivers/video/omap2/dss/manager-sysfs.c b/drivers/video/omap2/dss/manager-sysfs.c
new file mode 100644
index 000000000000..9a2fb59b6f89
--- /dev/null
+++ b/drivers/video/omap2/dss/manager-sysfs.c
@@ -0,0 +1,512 @@
1/*
2 * Copyright (C) 2009 Nokia Corporation
3 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
4 *
5 * Some code and ideas taken from drivers/video/omap/ driver
6 * by Imre Deak.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#define DSS_SUBSYS_NAME "MANAGER"
22
23#include <linux/kernel.h>
24#include <linux/slab.h>
25#include <linux/module.h>
26#include <linux/platform_device.h>
27#include <linux/jiffies.h>
28
29#include <video/omapdss.h>
30
31#include "dss.h"
32#include "dss_features.h"
33
34static ssize_t manager_name_show(struct omap_overlay_manager *mgr, char *buf)
35{
36 return snprintf(buf, PAGE_SIZE, "%s\n", mgr->name);
37}
38
39static ssize_t manager_display_show(struct omap_overlay_manager *mgr, char *buf)
40{
41 struct omap_dss_device *dssdev = mgr->get_device(mgr);
42
43 return snprintf(buf, PAGE_SIZE, "%s\n", dssdev ?
44 dssdev->name : "<none>");
45}
46
47static ssize_t manager_display_store(struct omap_overlay_manager *mgr,
48 const char *buf, size_t size)
49{
50 int r = 0;
51 size_t len = size;
52 struct omap_dss_device *dssdev = NULL;
53
54 int match(struct omap_dss_device *dssdev, void *data)
55 {
56 const char *str = data;
57 return sysfs_streq(dssdev->name, str);
58 }
59
60 if (buf[size-1] == '\n')
61 --len;
62
63 if (len > 0)
64 dssdev = omap_dss_find_device((void *)buf, match);
65
66 if (len > 0 && dssdev == NULL)
67 return -EINVAL;
68
69 if (dssdev)
70 DSSDBG("display %s found\n", dssdev->name);
71
72 if (mgr->output) {
73 r = mgr->unset_output(mgr);
74 if (r) {
75 DSSERR("failed to unset current output\n");
76 goto put_device;
77 }
78 }
79
80 if (dssdev) {
81 struct omap_dss_output *out = dssdev->output;
82
83 /*
84 * a registered device should have an output connected to it
85 * already
86 */
87 if (!out) {
88 DSSERR("device has no output connected to it\n");
89 goto put_device;
90 }
91
92 r = mgr->set_output(mgr, out);
93 if (r) {
94 DSSERR("failed to set manager output\n");
95 goto put_device;
96 }
97
98 r = mgr->apply(mgr);
99 if (r) {
100 DSSERR("failed to apply dispc config\n");
101 goto put_device;
102 }
103 }
104
105put_device:
106 if (dssdev)
107 omap_dss_put_device(dssdev);
108
109 return r ? r : size;
110}
111
112static ssize_t manager_default_color_show(struct omap_overlay_manager *mgr,
113 char *buf)
114{
115 struct omap_overlay_manager_info info;
116
117 mgr->get_manager_info(mgr, &info);
118
119 return snprintf(buf, PAGE_SIZE, "%#x\n", info.default_color);
120}
121
122static ssize_t manager_default_color_store(struct omap_overlay_manager *mgr,
123 const char *buf, size_t size)
124{
125 struct omap_overlay_manager_info info;
126 u32 color;
127 int r;
128
129 r = kstrtouint(buf, 0, &color);
130 if (r)
131 return r;
132
133 mgr->get_manager_info(mgr, &info);
134
135 info.default_color = color;
136
137 r = mgr->set_manager_info(mgr, &info);
138 if (r)
139 return r;
140
141 r = mgr->apply(mgr);
142 if (r)
143 return r;
144
145 return size;
146}
147
148static const char *trans_key_type_str[] = {
149 "gfx-destination",
150 "video-source",
151};
152
153static ssize_t manager_trans_key_type_show(struct omap_overlay_manager *mgr,
154 char *buf)
155{
156 enum omap_dss_trans_key_type key_type;
157 struct omap_overlay_manager_info info;
158
159 mgr->get_manager_info(mgr, &info);
160
161 key_type = info.trans_key_type;
162 BUG_ON(key_type >= ARRAY_SIZE(trans_key_type_str));
163
164 return snprintf(buf, PAGE_SIZE, "%s\n", trans_key_type_str[key_type]);
165}
166
167static ssize_t manager_trans_key_type_store(struct omap_overlay_manager *mgr,
168 const char *buf, size_t size)
169{
170 enum omap_dss_trans_key_type key_type;
171 struct omap_overlay_manager_info info;
172 int r;
173
174 for (key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
175 key_type < ARRAY_SIZE(trans_key_type_str); key_type++) {
176 if (sysfs_streq(buf, trans_key_type_str[key_type]))
177 break;
178 }
179
180 if (key_type == ARRAY_SIZE(trans_key_type_str))
181 return -EINVAL;
182
183 mgr->get_manager_info(mgr, &info);
184
185 info.trans_key_type = key_type;
186
187 r = mgr->set_manager_info(mgr, &info);
188 if (r)
189 return r;
190
191 r = mgr->apply(mgr);
192 if (r)
193 return r;
194
195 return size;
196}
197
198static ssize_t manager_trans_key_value_show(struct omap_overlay_manager *mgr,
199 char *buf)
200{
201 struct omap_overlay_manager_info info;
202
203 mgr->get_manager_info(mgr, &info);
204
205 return snprintf(buf, PAGE_SIZE, "%#x\n", info.trans_key);
206}
207
208static ssize_t manager_trans_key_value_store(struct omap_overlay_manager *mgr,
209 const char *buf, size_t size)
210{
211 struct omap_overlay_manager_info info;
212 u32 key_value;
213 int r;
214
215 r = kstrtouint(buf, 0, &key_value);
216 if (r)
217 return r;
218
219 mgr->get_manager_info(mgr, &info);
220
221 info.trans_key = key_value;
222
223 r = mgr->set_manager_info(mgr, &info);
224 if (r)
225 return r;
226
227 r = mgr->apply(mgr);
228 if (r)
229 return r;
230
231 return size;
232}
233
234static ssize_t manager_trans_key_enabled_show(struct omap_overlay_manager *mgr,
235 char *buf)
236{
237 struct omap_overlay_manager_info info;
238
239 mgr->get_manager_info(mgr, &info);
240
241 return snprintf(buf, PAGE_SIZE, "%d\n", info.trans_enabled);
242}
243
244static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr,
245 const char *buf, size_t size)
246{
247 struct omap_overlay_manager_info info;
248 bool enable;
249 int r;
250
251 r = strtobool(buf, &enable);
252 if (r)
253 return r;
254
255 mgr->get_manager_info(mgr, &info);
256
257 info.trans_enabled = enable;
258
259 r = mgr->set_manager_info(mgr, &info);
260 if (r)
261 return r;
262
263 r = mgr->apply(mgr);
264 if (r)
265 return r;
266
267 return size;
268}
269
270static ssize_t manager_alpha_blending_enabled_show(
271 struct omap_overlay_manager *mgr, char *buf)
272{
273 struct omap_overlay_manager_info info;
274
275 mgr->get_manager_info(mgr, &info);
276
277 WARN_ON(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER));
278
279 return snprintf(buf, PAGE_SIZE, "%d\n",
280 info.partial_alpha_enabled);
281}
282
283static ssize_t manager_alpha_blending_enabled_store(
284 struct omap_overlay_manager *mgr,
285 const char *buf, size_t size)
286{
287 struct omap_overlay_manager_info info;
288 bool enable;
289 int r;
290
291 WARN_ON(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER));
292
293 r = strtobool(buf, &enable);
294 if (r)
295 return r;
296
297 mgr->get_manager_info(mgr, &info);
298
299 info.partial_alpha_enabled = enable;
300
301 r = mgr->set_manager_info(mgr, &info);
302 if (r)
303 return r;
304
305 r = mgr->apply(mgr);
306 if (r)
307 return r;
308
309 return size;
310}
311
312static ssize_t manager_cpr_enable_show(struct omap_overlay_manager *mgr,
313 char *buf)
314{
315 struct omap_overlay_manager_info info;
316
317 mgr->get_manager_info(mgr, &info);
318
319 return snprintf(buf, PAGE_SIZE, "%d\n", info.cpr_enable);
320}
321
322static ssize_t manager_cpr_enable_store(struct omap_overlay_manager *mgr,
323 const char *buf, size_t size)
324{
325 struct omap_overlay_manager_info info;
326 int r;
327 bool enable;
328
329 if (!dss_has_feature(FEAT_CPR))
330 return -ENODEV;
331
332 r = strtobool(buf, &enable);
333 if (r)
334 return r;
335
336 mgr->get_manager_info(mgr, &info);
337
338 if (info.cpr_enable == enable)
339 return size;
340
341 info.cpr_enable = enable;
342
343 r = mgr->set_manager_info(mgr, &info);
344 if (r)
345 return r;
346
347 r = mgr->apply(mgr);
348 if (r)
349 return r;
350
351 return size;
352}
353
354static ssize_t manager_cpr_coef_show(struct omap_overlay_manager *mgr,
355 char *buf)
356{
357 struct omap_overlay_manager_info info;
358
359 mgr->get_manager_info(mgr, &info);
360
361 return snprintf(buf, PAGE_SIZE,
362 "%d %d %d %d %d %d %d %d %d\n",
363 info.cpr_coefs.rr,
364 info.cpr_coefs.rg,
365 info.cpr_coefs.rb,
366 info.cpr_coefs.gr,
367 info.cpr_coefs.gg,
368 info.cpr_coefs.gb,
369 info.cpr_coefs.br,
370 info.cpr_coefs.bg,
371 info.cpr_coefs.bb);
372}
373
374static ssize_t manager_cpr_coef_store(struct omap_overlay_manager *mgr,
375 const char *buf, size_t size)
376{
377 struct omap_overlay_manager_info info;
378 struct omap_dss_cpr_coefs coefs;
379 int r, i;
380 s16 *arr;
381
382 if (!dss_has_feature(FEAT_CPR))
383 return -ENODEV;
384
385 if (sscanf(buf, "%hd %hd %hd %hd %hd %hd %hd %hd %hd",
386 &coefs.rr, &coefs.rg, &coefs.rb,
387 &coefs.gr, &coefs.gg, &coefs.gb,
388 &coefs.br, &coefs.bg, &coefs.bb) != 9)
389 return -EINVAL;
390
391 arr = (s16[]){ coefs.rr, coefs.rg, coefs.rb,
392 coefs.gr, coefs.gg, coefs.gb,
393 coefs.br, coefs.bg, coefs.bb };
394
395 for (i = 0; i < 9; ++i) {
396 if (arr[i] < -512 || arr[i] > 511)
397 return -EINVAL;
398 }
399
400 mgr->get_manager_info(mgr, &info);
401
402 info.cpr_coefs = coefs;
403
404 r = mgr->set_manager_info(mgr, &info);
405 if (r)
406 return r;
407
408 r = mgr->apply(mgr);
409 if (r)
410 return r;
411
412 return size;
413}
414
415struct manager_attribute {
416 struct attribute attr;
417 ssize_t (*show)(struct omap_overlay_manager *, char *);
418 ssize_t (*store)(struct omap_overlay_manager *, const char *, size_t);
419};
420
421#define MANAGER_ATTR(_name, _mode, _show, _store) \
422 struct manager_attribute manager_attr_##_name = \
423 __ATTR(_name, _mode, _show, _store)
424
425static MANAGER_ATTR(name, S_IRUGO, manager_name_show, NULL);
426static MANAGER_ATTR(display, S_IRUGO|S_IWUSR,
427 manager_display_show, manager_display_store);
428static MANAGER_ATTR(default_color, S_IRUGO|S_IWUSR,
429 manager_default_color_show, manager_default_color_store);
430static MANAGER_ATTR(trans_key_type, S_IRUGO|S_IWUSR,
431 manager_trans_key_type_show, manager_trans_key_type_store);
432static MANAGER_ATTR(trans_key_value, S_IRUGO|S_IWUSR,
433 manager_trans_key_value_show, manager_trans_key_value_store);
434static MANAGER_ATTR(trans_key_enabled, S_IRUGO|S_IWUSR,
435 manager_trans_key_enabled_show,
436 manager_trans_key_enabled_store);
437static MANAGER_ATTR(alpha_blending_enabled, S_IRUGO|S_IWUSR,
438 manager_alpha_blending_enabled_show,
439 manager_alpha_blending_enabled_store);
440static MANAGER_ATTR(cpr_enable, S_IRUGO|S_IWUSR,
441 manager_cpr_enable_show,
442 manager_cpr_enable_store);
443static MANAGER_ATTR(cpr_coef, S_IRUGO|S_IWUSR,
444 manager_cpr_coef_show,
445 manager_cpr_coef_store);
446
447
448static struct attribute *manager_sysfs_attrs[] = {
449 &manager_attr_name.attr,
450 &manager_attr_display.attr,
451 &manager_attr_default_color.attr,
452 &manager_attr_trans_key_type.attr,
453 &manager_attr_trans_key_value.attr,
454 &manager_attr_trans_key_enabled.attr,
455 &manager_attr_alpha_blending_enabled.attr,
456 &manager_attr_cpr_enable.attr,
457 &manager_attr_cpr_coef.attr,
458 NULL
459};
460
461static ssize_t manager_attr_show(struct kobject *kobj, struct attribute *attr,
462 char *buf)
463{
464 struct omap_overlay_manager *manager;
465 struct manager_attribute *manager_attr;
466
467 manager = container_of(kobj, struct omap_overlay_manager, kobj);
468 manager_attr = container_of(attr, struct manager_attribute, attr);
469
470 if (!manager_attr->show)
471 return -ENOENT;
472
473 return manager_attr->show(manager, buf);
474}
475
476static ssize_t manager_attr_store(struct kobject *kobj, struct attribute *attr,
477 const char *buf, size_t size)
478{
479 struct omap_overlay_manager *manager;
480 struct manager_attribute *manager_attr;
481
482 manager = container_of(kobj, struct omap_overlay_manager, kobj);
483 manager_attr = container_of(attr, struct manager_attribute, attr);
484
485 if (!manager_attr->store)
486 return -ENOENT;
487
488 return manager_attr->store(manager, buf, size);
489}
490
491static const struct sysfs_ops manager_sysfs_ops = {
492 .show = manager_attr_show,
493 .store = manager_attr_store,
494};
495
496static struct kobj_type manager_ktype = {
497 .sysfs_ops = &manager_sysfs_ops,
498 .default_attrs = manager_sysfs_attrs,
499};
500
501int dss_manager_kobj_init(struct omap_overlay_manager *mgr,
502 struct platform_device *pdev)
503{
504 return kobject_init_and_add(&mgr->kobj, &manager_ktype,
505 &pdev->dev.kobj, "manager%d", mgr->id);
506}
507
508void dss_manager_kobj_uninit(struct omap_overlay_manager *mgr)
509{
510 kobject_del(&mgr->kobj);
511 kobject_put(&mgr->kobj);
512}
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 53710fadc82d..c54d2f620ce3 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -36,463 +36,15 @@
36static int num_managers; 36static int num_managers;
37static struct omap_overlay_manager *managers; 37static struct omap_overlay_manager *managers;
38 38
39static ssize_t manager_name_show(struct omap_overlay_manager *mgr, char *buf) 39static inline struct omap_dss_device *dss_mgr_get_device(struct omap_overlay_manager *mgr)
40{ 40{
41 return snprintf(buf, PAGE_SIZE, "%s\n", mgr->name); 41 return mgr->output ? mgr->output->device : NULL;
42} 42}
43 43
44static ssize_t manager_display_show(struct omap_overlay_manager *mgr, char *buf)
45{
46 return snprintf(buf, PAGE_SIZE, "%s\n",
47 mgr->device ? mgr->device->name : "<none>");
48}
49
50static ssize_t manager_display_store(struct omap_overlay_manager *mgr,
51 const char *buf, size_t size)
52{
53 int r = 0;
54 size_t len = size;
55 struct omap_dss_device *dssdev = NULL;
56
57 int match(struct omap_dss_device *dssdev, void *data)
58 {
59 const char *str = data;
60 return sysfs_streq(dssdev->name, str);
61 }
62
63 if (buf[size-1] == '\n')
64 --len;
65
66 if (len > 0)
67 dssdev = omap_dss_find_device((void *)buf, match);
68
69 if (len > 0 && dssdev == NULL)
70 return -EINVAL;
71
72 if (dssdev)
73 DSSDBG("display %s found\n", dssdev->name);
74
75 if (mgr->device) {
76 r = mgr->unset_device(mgr);
77 if (r) {
78 DSSERR("failed to unset display\n");
79 goto put_device;
80 }
81 }
82
83 if (dssdev) {
84 r = mgr->set_device(mgr, dssdev);
85 if (r) {
86 DSSERR("failed to set manager\n");
87 goto put_device;
88 }
89
90 r = mgr->apply(mgr);
91 if (r) {
92 DSSERR("failed to apply dispc config\n");
93 goto put_device;
94 }
95 }
96
97put_device:
98 if (dssdev)
99 omap_dss_put_device(dssdev);
100
101 return r ? r : size;
102}
103
104static ssize_t manager_default_color_show(struct omap_overlay_manager *mgr,
105 char *buf)
106{
107 struct omap_overlay_manager_info info;
108
109 mgr->get_manager_info(mgr, &info);
110
111 return snprintf(buf, PAGE_SIZE, "%#x\n", info.default_color);
112}
113
114static ssize_t manager_default_color_store(struct omap_overlay_manager *mgr,
115 const char *buf, size_t size)
116{
117 struct omap_overlay_manager_info info;
118 u32 color;
119 int r;
120
121 r = kstrtouint(buf, 0, &color);
122 if (r)
123 return r;
124
125 mgr->get_manager_info(mgr, &info);
126
127 info.default_color = color;
128
129 r = mgr->set_manager_info(mgr, &info);
130 if (r)
131 return r;
132
133 r = mgr->apply(mgr);
134 if (r)
135 return r;
136
137 return size;
138}
139
140static const char *trans_key_type_str[] = {
141 "gfx-destination",
142 "video-source",
143};
144
145static ssize_t manager_trans_key_type_show(struct omap_overlay_manager *mgr,
146 char *buf)
147{
148 enum omap_dss_trans_key_type key_type;
149 struct omap_overlay_manager_info info;
150
151 mgr->get_manager_info(mgr, &info);
152
153 key_type = info.trans_key_type;
154 BUG_ON(key_type >= ARRAY_SIZE(trans_key_type_str));
155
156 return snprintf(buf, PAGE_SIZE, "%s\n", trans_key_type_str[key_type]);
157}
158
159static ssize_t manager_trans_key_type_store(struct omap_overlay_manager *mgr,
160 const char *buf, size_t size)
161{
162 enum omap_dss_trans_key_type key_type;
163 struct omap_overlay_manager_info info;
164 int r;
165
166 for (key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
167 key_type < ARRAY_SIZE(trans_key_type_str); key_type++) {
168 if (sysfs_streq(buf, trans_key_type_str[key_type]))
169 break;
170 }
171
172 if (key_type == ARRAY_SIZE(trans_key_type_str))
173 return -EINVAL;
174
175 mgr->get_manager_info(mgr, &info);
176
177 info.trans_key_type = key_type;
178
179 r = mgr->set_manager_info(mgr, &info);
180 if (r)
181 return r;
182
183 r = mgr->apply(mgr);
184 if (r)
185 return r;
186
187 return size;
188}
189
190static ssize_t manager_trans_key_value_show(struct omap_overlay_manager *mgr,
191 char *buf)
192{
193 struct omap_overlay_manager_info info;
194
195 mgr->get_manager_info(mgr, &info);
196
197 return snprintf(buf, PAGE_SIZE, "%#x\n", info.trans_key);
198}
199
200static ssize_t manager_trans_key_value_store(struct omap_overlay_manager *mgr,
201 const char *buf, size_t size)
202{
203 struct omap_overlay_manager_info info;
204 u32 key_value;
205 int r;
206
207 r = kstrtouint(buf, 0, &key_value);
208 if (r)
209 return r;
210
211 mgr->get_manager_info(mgr, &info);
212
213 info.trans_key = key_value;
214
215 r = mgr->set_manager_info(mgr, &info);
216 if (r)
217 return r;
218
219 r = mgr->apply(mgr);
220 if (r)
221 return r;
222
223 return size;
224}
225
226static ssize_t manager_trans_key_enabled_show(struct omap_overlay_manager *mgr,
227 char *buf)
228{
229 struct omap_overlay_manager_info info;
230
231 mgr->get_manager_info(mgr, &info);
232
233 return snprintf(buf, PAGE_SIZE, "%d\n", info.trans_enabled);
234}
235
236static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr,
237 const char *buf, size_t size)
238{
239 struct omap_overlay_manager_info info;
240 bool enable;
241 int r;
242
243 r = strtobool(buf, &enable);
244 if (r)
245 return r;
246
247 mgr->get_manager_info(mgr, &info);
248
249 info.trans_enabled = enable;
250
251 r = mgr->set_manager_info(mgr, &info);
252 if (r)
253 return r;
254
255 r = mgr->apply(mgr);
256 if (r)
257 return r;
258
259 return size;
260}
261
262static ssize_t manager_alpha_blending_enabled_show(
263 struct omap_overlay_manager *mgr, char *buf)
264{
265 struct omap_overlay_manager_info info;
266
267 mgr->get_manager_info(mgr, &info);
268
269 WARN_ON(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER));
270
271 return snprintf(buf, PAGE_SIZE, "%d\n",
272 info.partial_alpha_enabled);
273}
274
275static ssize_t manager_alpha_blending_enabled_store(
276 struct omap_overlay_manager *mgr,
277 const char *buf, size_t size)
278{
279 struct omap_overlay_manager_info info;
280 bool enable;
281 int r;
282
283 WARN_ON(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER));
284
285 r = strtobool(buf, &enable);
286 if (r)
287 return r;
288
289 mgr->get_manager_info(mgr, &info);
290
291 info.partial_alpha_enabled = enable;
292
293 r = mgr->set_manager_info(mgr, &info);
294 if (r)
295 return r;
296
297 r = mgr->apply(mgr);
298 if (r)
299 return r;
300
301 return size;
302}
303
304static ssize_t manager_cpr_enable_show(struct omap_overlay_manager *mgr,
305 char *buf)
306{
307 struct omap_overlay_manager_info info;
308
309 mgr->get_manager_info(mgr, &info);
310
311 return snprintf(buf, PAGE_SIZE, "%d\n", info.cpr_enable);
312}
313
314static ssize_t manager_cpr_enable_store(struct omap_overlay_manager *mgr,
315 const char *buf, size_t size)
316{
317 struct omap_overlay_manager_info info;
318 int r;
319 bool enable;
320
321 if (!dss_has_feature(FEAT_CPR))
322 return -ENODEV;
323
324 r = strtobool(buf, &enable);
325 if (r)
326 return r;
327
328 mgr->get_manager_info(mgr, &info);
329
330 if (info.cpr_enable == enable)
331 return size;
332
333 info.cpr_enable = enable;
334
335 r = mgr->set_manager_info(mgr, &info);
336 if (r)
337 return r;
338
339 r = mgr->apply(mgr);
340 if (r)
341 return r;
342
343 return size;
344}
345
346static ssize_t manager_cpr_coef_show(struct omap_overlay_manager *mgr,
347 char *buf)
348{
349 struct omap_overlay_manager_info info;
350
351 mgr->get_manager_info(mgr, &info);
352
353 return snprintf(buf, PAGE_SIZE,
354 "%d %d %d %d %d %d %d %d %d\n",
355 info.cpr_coefs.rr,
356 info.cpr_coefs.rg,
357 info.cpr_coefs.rb,
358 info.cpr_coefs.gr,
359 info.cpr_coefs.gg,
360 info.cpr_coefs.gb,
361 info.cpr_coefs.br,
362 info.cpr_coefs.bg,
363 info.cpr_coefs.bb);
364}
365
366static ssize_t manager_cpr_coef_store(struct omap_overlay_manager *mgr,
367 const char *buf, size_t size)
368{
369 struct omap_overlay_manager_info info;
370 struct omap_dss_cpr_coefs coefs;
371 int r, i;
372 s16 *arr;
373
374 if (!dss_has_feature(FEAT_CPR))
375 return -ENODEV;
376
377 if (sscanf(buf, "%hd %hd %hd %hd %hd %hd %hd %hd %hd",
378 &coefs.rr, &coefs.rg, &coefs.rb,
379 &coefs.gr, &coefs.gg, &coefs.gb,
380 &coefs.br, &coefs.bg, &coefs.bb) != 9)
381 return -EINVAL;
382
383 arr = (s16[]){ coefs.rr, coefs.rg, coefs.rb,
384 coefs.gr, coefs.gg, coefs.gb,
385 coefs.br, coefs.bg, coefs.bb };
386
387 for (i = 0; i < 9; ++i) {
388 if (arr[i] < -512 || arr[i] > 511)
389 return -EINVAL;
390 }
391
392 mgr->get_manager_info(mgr, &info);
393
394 info.cpr_coefs = coefs;
395
396 r = mgr->set_manager_info(mgr, &info);
397 if (r)
398 return r;
399
400 r = mgr->apply(mgr);
401 if (r)
402 return r;
403
404 return size;
405}
406
407struct manager_attribute {
408 struct attribute attr;
409 ssize_t (*show)(struct omap_overlay_manager *, char *);
410 ssize_t (*store)(struct omap_overlay_manager *, const char *, size_t);
411};
412
413#define MANAGER_ATTR(_name, _mode, _show, _store) \
414 struct manager_attribute manager_attr_##_name = \
415 __ATTR(_name, _mode, _show, _store)
416
417static MANAGER_ATTR(name, S_IRUGO, manager_name_show, NULL);
418static MANAGER_ATTR(display, S_IRUGO|S_IWUSR,
419 manager_display_show, manager_display_store);
420static MANAGER_ATTR(default_color, S_IRUGO|S_IWUSR,
421 manager_default_color_show, manager_default_color_store);
422static MANAGER_ATTR(trans_key_type, S_IRUGO|S_IWUSR,
423 manager_trans_key_type_show, manager_trans_key_type_store);
424static MANAGER_ATTR(trans_key_value, S_IRUGO|S_IWUSR,
425 manager_trans_key_value_show, manager_trans_key_value_store);
426static MANAGER_ATTR(trans_key_enabled, S_IRUGO|S_IWUSR,
427 manager_trans_key_enabled_show,
428 manager_trans_key_enabled_store);
429static MANAGER_ATTR(alpha_blending_enabled, S_IRUGO|S_IWUSR,
430 manager_alpha_blending_enabled_show,
431 manager_alpha_blending_enabled_store);
432static MANAGER_ATTR(cpr_enable, S_IRUGO|S_IWUSR,
433 manager_cpr_enable_show,
434 manager_cpr_enable_store);
435static MANAGER_ATTR(cpr_coef, S_IRUGO|S_IWUSR,
436 manager_cpr_coef_show,
437 manager_cpr_coef_store);
438
439
440static struct attribute *manager_sysfs_attrs[] = {
441 &manager_attr_name.attr,
442 &manager_attr_display.attr,
443 &manager_attr_default_color.attr,
444 &manager_attr_trans_key_type.attr,
445 &manager_attr_trans_key_value.attr,
446 &manager_attr_trans_key_enabled.attr,
447 &manager_attr_alpha_blending_enabled.attr,
448 &manager_attr_cpr_enable.attr,
449 &manager_attr_cpr_coef.attr,
450 NULL
451};
452
453static ssize_t manager_attr_show(struct kobject *kobj, struct attribute *attr,
454 char *buf)
455{
456 struct omap_overlay_manager *manager;
457 struct manager_attribute *manager_attr;
458
459 manager = container_of(kobj, struct omap_overlay_manager, kobj);
460 manager_attr = container_of(attr, struct manager_attribute, attr);
461
462 if (!manager_attr->show)
463 return -ENOENT;
464
465 return manager_attr->show(manager, buf);
466}
467
468static ssize_t manager_attr_store(struct kobject *kobj, struct attribute *attr,
469 const char *buf, size_t size)
470{
471 struct omap_overlay_manager *manager;
472 struct manager_attribute *manager_attr;
473
474 manager = container_of(kobj, struct omap_overlay_manager, kobj);
475 manager_attr = container_of(attr, struct manager_attribute, attr);
476
477 if (!manager_attr->store)
478 return -ENOENT;
479
480 return manager_attr->store(manager, buf, size);
481}
482
483static const struct sysfs_ops manager_sysfs_ops = {
484 .show = manager_attr_show,
485 .store = manager_attr_store,
486};
487
488static struct kobj_type manager_ktype = {
489 .sysfs_ops = &manager_sysfs_ops,
490 .default_attrs = manager_sysfs_attrs,
491};
492
493static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr) 44static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
494{ 45{
495 unsigned long timeout = msecs_to_jiffies(500); 46 unsigned long timeout = msecs_to_jiffies(500);
47 struct omap_dss_device *dssdev = mgr->get_device(mgr);
496 u32 irq; 48 u32 irq;
497 int r; 49 int r;
498 50
@@ -500,9 +52,9 @@ static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
500 if (r) 52 if (r)
501 return r; 53 return r;
502 54
503 if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) 55 if (dssdev->type == OMAP_DISPLAY_TYPE_VENC)
504 irq = DISPC_IRQ_EVSYNC_ODD; 56 irq = DISPC_IRQ_EVSYNC_ODD;
505 else if (mgr->device->type == OMAP_DISPLAY_TYPE_HDMI) 57 else if (dssdev->type == OMAP_DISPLAY_TYPE_HDMI)
506 irq = DISPC_IRQ_EVSYNC_EVEN; 58 irq = DISPC_IRQ_EVSYNC_EVEN;
507 else 59 else
508 irq = dispc_mgr_get_vsync_irq(mgr->id); 60 irq = dispc_mgr_get_vsync_irq(mgr->id);
@@ -547,23 +99,24 @@ int dss_init_overlay_managers(struct platform_device *pdev)
547 break; 99 break;
548 } 100 }
549 101
550 mgr->set_device = &dss_mgr_set_device; 102 mgr->set_output = &dss_mgr_set_output;
551 mgr->unset_device = &dss_mgr_unset_device; 103 mgr->unset_output = &dss_mgr_unset_output;
552 mgr->apply = &omap_dss_mgr_apply; 104 mgr->apply = &omap_dss_mgr_apply;
553 mgr->set_manager_info = &dss_mgr_set_info; 105 mgr->set_manager_info = &dss_mgr_set_info;
554 mgr->get_manager_info = &dss_mgr_get_info; 106 mgr->get_manager_info = &dss_mgr_get_info;
555 mgr->wait_for_go = &dss_mgr_wait_for_go; 107 mgr->wait_for_go = &dss_mgr_wait_for_go;
556 mgr->wait_for_vsync = &dss_mgr_wait_for_vsync; 108 mgr->wait_for_vsync = &dss_mgr_wait_for_vsync;
109 mgr->get_device = &dss_mgr_get_device;
557 110
558 mgr->caps = 0; 111 mgr->caps = 0;
559 mgr->supported_displays = 112 mgr->supported_displays =
560 dss_feat_get_supported_displays(mgr->id); 113 dss_feat_get_supported_displays(mgr->id);
114 mgr->supported_outputs =
115 dss_feat_get_supported_outputs(mgr->id);
561 116
562 INIT_LIST_HEAD(&mgr->overlays); 117 INIT_LIST_HEAD(&mgr->overlays);
563 118
564 r = kobject_init_and_add(&mgr->kobj, &manager_ktype, 119 r = dss_manager_kobj_init(mgr, pdev);
565 &pdev->dev.kobj, "manager%d", i);
566
567 if (r) 120 if (r)
568 DSSERR("failed to create sysfs file\n"); 121 DSSERR("failed to create sysfs file\n");
569 } 122 }
@@ -577,9 +130,7 @@ void dss_uninit_overlay_managers(struct platform_device *pdev)
577 130
578 for (i = 0; i < num_managers; ++i) { 131 for (i = 0; i < num_managers; ++i) {
579 struct omap_overlay_manager *mgr = &managers[i]; 132 struct omap_overlay_manager *mgr = &managers[i];
580 133 dss_manager_kobj_uninit(mgr);
581 kobject_del(&mgr->kobj);
582 kobject_put(&mgr->kobj);
583 } 134 }
584 135
585 kfree(managers); 136 kfree(managers);
diff --git a/drivers/video/omap2/dss/output.c b/drivers/video/omap2/dss/output.c
new file mode 100644
index 000000000000..813f26682b7a
--- /dev/null
+++ b/drivers/video/omap2/dss/output.c
@@ -0,0 +1,148 @@
1/*
2 * Copyright (C) 2012 Texas Instruments Ltd
3 * Author: Archit Taneja <archit@ti.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/platform_device.h>
21#include <linux/slab.h>
22
23#include <video/omapdss.h>
24
25#include "dss.h"
26
27static LIST_HEAD(output_list);
28static DEFINE_MUTEX(output_lock);
29
30int omapdss_output_set_device(struct omap_dss_output *out,
31 struct omap_dss_device *dssdev)
32{
33 int r;
34
35 mutex_lock(&output_lock);
36
37 if (out->device) {
38 DSSERR("output already has device %s connected to it\n",
39 out->device->name);
40 r = -EINVAL;
41 goto err;
42 }
43
44 if (out->type != dssdev->type) {
45 DSSERR("output type and display type don't match\n");
46 r = -EINVAL;
47 goto err;
48 }
49
50 out->device = dssdev;
51 dssdev->output = out;
52
53 mutex_unlock(&output_lock);
54
55 return 0;
56err:
57 mutex_unlock(&output_lock);
58
59 return r;
60}
61EXPORT_SYMBOL(omapdss_output_set_device);
62
63int omapdss_output_unset_device(struct omap_dss_output *out)
64{
65 int r;
66
67 mutex_lock(&output_lock);
68
69 if (!out->device) {
70 DSSERR("output doesn't have a device connected to it\n");
71 r = -EINVAL;
72 goto err;
73 }
74
75 if (out->device->state != OMAP_DSS_DISPLAY_DISABLED) {
76 DSSERR("device %s is not disabled, cannot unset device\n",
77 out->device->name);
78 r = -EINVAL;
79 goto err;
80 }
81
82 out->device->output = NULL;
83 out->device = NULL;
84
85 mutex_unlock(&output_lock);
86
87 return 0;
88err:
89 mutex_unlock(&output_lock);
90
91 return r;
92}
93EXPORT_SYMBOL(omapdss_output_unset_device);
94
95void dss_register_output(struct omap_dss_output *out)
96{
97 list_add_tail(&out->list, &output_list);
98}
99
100void dss_unregister_output(struct omap_dss_output *out)
101{
102 list_del(&out->list);
103}
104
105struct omap_dss_output *omap_dss_get_output(enum omap_dss_output_id id)
106{
107 struct omap_dss_output *out;
108
109 list_for_each_entry(out, &output_list, list) {
110 if (out->id == id)
111 return out;
112 }
113
114 return NULL;
115}
116
117struct omap_dss_output *omapdss_get_output_from_dssdev(struct omap_dss_device *dssdev)
118{
119 struct omap_dss_output *out = NULL;
120 enum omap_dss_output_id id;
121
122 switch (dssdev->type) {
123 case OMAP_DISPLAY_TYPE_DPI:
124 out = omap_dss_get_output(OMAP_DSS_OUTPUT_DPI);
125 break;
126 case OMAP_DISPLAY_TYPE_DBI:
127 out = omap_dss_get_output(OMAP_DSS_OUTPUT_DBI);
128 break;
129 case OMAP_DISPLAY_TYPE_SDI:
130 out = omap_dss_get_output(OMAP_DSS_OUTPUT_SDI);
131 break;
132 case OMAP_DISPLAY_TYPE_VENC:
133 out = omap_dss_get_output(OMAP_DSS_OUTPUT_VENC);
134 break;
135 case OMAP_DISPLAY_TYPE_HDMI:
136 out = omap_dss_get_output(OMAP_DSS_OUTPUT_HDMI);
137 break;
138 case OMAP_DISPLAY_TYPE_DSI:
139 id = dssdev->phy.dsi.module == 0 ? OMAP_DSS_OUTPUT_DSI1 :
140 OMAP_DSS_OUTPUT_DSI2;
141 out = omap_dss_get_output(id);
142 break;
143 default:
144 break;
145 }
146
147 return out;
148}
diff --git a/drivers/video/omap2/dss/overlay-sysfs.c b/drivers/video/omap2/dss/overlay-sysfs.c
new file mode 100644
index 000000000000..4cc5ddebfb34
--- /dev/null
+++ b/drivers/video/omap2/dss/overlay-sysfs.c
@@ -0,0 +1,456 @@
1/*
2 * Copyright (C) 2009 Nokia Corporation
3 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
4 *
5 * Some code and ideas taken from drivers/video/omap/ driver
6 * by Imre Deak.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#define DSS_SUBSYS_NAME "OVERLAY"
22
23#include <linux/module.h>
24#include <linux/err.h>
25#include <linux/sysfs.h>
26#include <linux/kobject.h>
27#include <linux/platform_device.h>
28
29#include <video/omapdss.h>
30
31#include "dss.h"
32#include "dss_features.h"
33
34static ssize_t overlay_name_show(struct omap_overlay *ovl, char *buf)
35{
36 return snprintf(buf, PAGE_SIZE, "%s\n", ovl->name);
37}
38
39static ssize_t overlay_manager_show(struct omap_overlay *ovl, char *buf)
40{
41 return snprintf(buf, PAGE_SIZE, "%s\n",
42 ovl->manager ? ovl->manager->name : "<none>");
43}
44
45static ssize_t overlay_manager_store(struct omap_overlay *ovl, const char *buf,
46 size_t size)
47{
48 int i, r;
49 struct omap_overlay_manager *mgr = NULL;
50 struct omap_overlay_manager *old_mgr;
51 int len = size;
52
53 if (buf[size-1] == '\n')
54 --len;
55
56 if (len > 0) {
57 for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
58 mgr = omap_dss_get_overlay_manager(i);
59
60 if (sysfs_streq(buf, mgr->name))
61 break;
62
63 mgr = NULL;
64 }
65 }
66
67 if (len > 0 && mgr == NULL)
68 return -EINVAL;
69
70 if (mgr)
71 DSSDBG("manager %s found\n", mgr->name);
72
73 if (mgr == ovl->manager)
74 return size;
75
76 old_mgr = ovl->manager;
77
78 r = dispc_runtime_get();
79 if (r)
80 return r;
81
82 /* detach old manager */
83 if (old_mgr) {
84 r = ovl->unset_manager(ovl);
85 if (r) {
86 DSSERR("detach failed\n");
87 goto err;
88 }
89
90 r = old_mgr->apply(old_mgr);
91 if (r)
92 goto err;
93 }
94
95 if (mgr) {
96 r = ovl->set_manager(ovl, mgr);
97 if (r) {
98 DSSERR("Failed to attach overlay\n");
99 goto err;
100 }
101
102 r = mgr->apply(mgr);
103 if (r)
104 goto err;
105 }
106
107 dispc_runtime_put();
108
109 return size;
110
111err:
112 dispc_runtime_put();
113 return r;
114}
115
116static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf)
117{
118 struct omap_overlay_info info;
119
120 ovl->get_overlay_info(ovl, &info);
121
122 return snprintf(buf, PAGE_SIZE, "%d,%d\n",
123 info.width, info.height);
124}
125
126static ssize_t overlay_screen_width_show(struct omap_overlay *ovl, char *buf)
127{
128 struct omap_overlay_info info;
129
130 ovl->get_overlay_info(ovl, &info);
131
132 return snprintf(buf, PAGE_SIZE, "%d\n", info.screen_width);
133}
134
135static ssize_t overlay_position_show(struct omap_overlay *ovl, char *buf)
136{
137 struct omap_overlay_info info;
138
139 ovl->get_overlay_info(ovl, &info);
140
141 return snprintf(buf, PAGE_SIZE, "%d,%d\n",
142 info.pos_x, info.pos_y);
143}
144
145static ssize_t overlay_position_store(struct omap_overlay *ovl,
146 const char *buf, size_t size)
147{
148 int r;
149 char *last;
150 struct omap_overlay_info info;
151
152 ovl->get_overlay_info(ovl, &info);
153
154 info.pos_x = simple_strtoul(buf, &last, 10);
155 ++last;
156 if (last - buf >= size)
157 return -EINVAL;
158
159 info.pos_y = simple_strtoul(last, &last, 10);
160
161 r = ovl->set_overlay_info(ovl, &info);
162 if (r)
163 return r;
164
165 if (ovl->manager) {
166 r = ovl->manager->apply(ovl->manager);
167 if (r)
168 return r;
169 }
170
171 return size;
172}
173
174static ssize_t overlay_output_size_show(struct omap_overlay *ovl, char *buf)
175{
176 struct omap_overlay_info info;
177
178 ovl->get_overlay_info(ovl, &info);
179
180 return snprintf(buf, PAGE_SIZE, "%d,%d\n",
181 info.out_width, info.out_height);
182}
183
184static ssize_t overlay_output_size_store(struct omap_overlay *ovl,
185 const char *buf, size_t size)
186{
187 int r;
188 char *last;
189 struct omap_overlay_info info;
190
191 ovl->get_overlay_info(ovl, &info);
192
193 info.out_width = simple_strtoul(buf, &last, 10);
194 ++last;
195 if (last - buf >= size)
196 return -EINVAL;
197
198 info.out_height = simple_strtoul(last, &last, 10);
199
200 r = ovl->set_overlay_info(ovl, &info);
201 if (r)
202 return r;
203
204 if (ovl->manager) {
205 r = ovl->manager->apply(ovl->manager);
206 if (r)
207 return r;
208 }
209
210 return size;
211}
212
213static ssize_t overlay_enabled_show(struct omap_overlay *ovl, char *buf)
214{
215 return snprintf(buf, PAGE_SIZE, "%d\n", ovl->is_enabled(ovl));
216}
217
218static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf,
219 size_t size)
220{
221 int r;
222 bool enable;
223
224 r = strtobool(buf, &enable);
225 if (r)
226 return r;
227
228 if (enable)
229 r = ovl->enable(ovl);
230 else
231 r = ovl->disable(ovl);
232
233 if (r)
234 return r;
235
236 return size;
237}
238
239static ssize_t overlay_global_alpha_show(struct omap_overlay *ovl, char *buf)
240{
241 struct omap_overlay_info info;
242
243 ovl->get_overlay_info(ovl, &info);
244
245 return snprintf(buf, PAGE_SIZE, "%d\n",
246 info.global_alpha);
247}
248
249static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
250 const char *buf, size_t size)
251{
252 int r;
253 u8 alpha;
254 struct omap_overlay_info info;
255
256 if ((ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0)
257 return -ENODEV;
258
259 r = kstrtou8(buf, 0, &alpha);
260 if (r)
261 return r;
262
263 ovl->get_overlay_info(ovl, &info);
264
265 info.global_alpha = alpha;
266
267 r = ovl->set_overlay_info(ovl, &info);
268 if (r)
269 return r;
270
271 if (ovl->manager) {
272 r = ovl->manager->apply(ovl->manager);
273 if (r)
274 return r;
275 }
276
277 return size;
278}
279
280static ssize_t overlay_pre_mult_alpha_show(struct omap_overlay *ovl,
281 char *buf)
282{
283 struct omap_overlay_info info;
284
285 ovl->get_overlay_info(ovl, &info);
286
287 return snprintf(buf, PAGE_SIZE, "%d\n",
288 info.pre_mult_alpha);
289}
290
291static ssize_t overlay_pre_mult_alpha_store(struct omap_overlay *ovl,
292 const char *buf, size_t size)
293{
294 int r;
295 u8 alpha;
296 struct omap_overlay_info info;
297
298 if ((ovl->caps & OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA) == 0)
299 return -ENODEV;
300
301 r = kstrtou8(buf, 0, &alpha);
302 if (r)
303 return r;
304
305 ovl->get_overlay_info(ovl, &info);
306
307 info.pre_mult_alpha = alpha;
308
309 r = ovl->set_overlay_info(ovl, &info);
310 if (r)
311 return r;
312
313 if (ovl->manager) {
314 r = ovl->manager->apply(ovl->manager);
315 if (r)
316 return r;
317 }
318
319 return size;
320}
321
322static ssize_t overlay_zorder_show(struct omap_overlay *ovl, char *buf)
323{
324 struct omap_overlay_info info;
325
326 ovl->get_overlay_info(ovl, &info);
327
328 return snprintf(buf, PAGE_SIZE, "%d\n", info.zorder);
329}
330
331static ssize_t overlay_zorder_store(struct omap_overlay *ovl,
332 const char *buf, size_t size)
333{
334 int r;
335 u8 zorder;
336 struct omap_overlay_info info;
337
338 if ((ovl->caps & OMAP_DSS_OVL_CAP_ZORDER) == 0)
339 return -ENODEV;
340
341 r = kstrtou8(buf, 0, &zorder);
342 if (r)
343 return r;
344
345 ovl->get_overlay_info(ovl, &info);
346
347 info.zorder = zorder;
348
349 r = ovl->set_overlay_info(ovl, &info);
350 if (r)
351 return r;
352
353 if (ovl->manager) {
354 r = ovl->manager->apply(ovl->manager);
355 if (r)
356 return r;
357 }
358
359 return size;
360}
361
362struct overlay_attribute {
363 struct attribute attr;
364 ssize_t (*show)(struct omap_overlay *, char *);
365 ssize_t (*store)(struct omap_overlay *, const char *, size_t);
366};
367
368#define OVERLAY_ATTR(_name, _mode, _show, _store) \
369 struct overlay_attribute overlay_attr_##_name = \
370 __ATTR(_name, _mode, _show, _store)
371
372static OVERLAY_ATTR(name, S_IRUGO, overlay_name_show, NULL);
373static OVERLAY_ATTR(manager, S_IRUGO|S_IWUSR,
374 overlay_manager_show, overlay_manager_store);
375static OVERLAY_ATTR(input_size, S_IRUGO, overlay_input_size_show, NULL);
376static OVERLAY_ATTR(screen_width, S_IRUGO, overlay_screen_width_show, NULL);
377static OVERLAY_ATTR(position, S_IRUGO|S_IWUSR,
378 overlay_position_show, overlay_position_store);
379static OVERLAY_ATTR(output_size, S_IRUGO|S_IWUSR,
380 overlay_output_size_show, overlay_output_size_store);
381static OVERLAY_ATTR(enabled, S_IRUGO|S_IWUSR,
382 overlay_enabled_show, overlay_enabled_store);
383static OVERLAY_ATTR(global_alpha, S_IRUGO|S_IWUSR,
384 overlay_global_alpha_show, overlay_global_alpha_store);
385static OVERLAY_ATTR(pre_mult_alpha, S_IRUGO|S_IWUSR,
386 overlay_pre_mult_alpha_show,
387 overlay_pre_mult_alpha_store);
388static OVERLAY_ATTR(zorder, S_IRUGO|S_IWUSR,
389 overlay_zorder_show, overlay_zorder_store);
390
391static struct attribute *overlay_sysfs_attrs[] = {
392 &overlay_attr_name.attr,
393 &overlay_attr_manager.attr,
394 &overlay_attr_input_size.attr,
395 &overlay_attr_screen_width.attr,
396 &overlay_attr_position.attr,
397 &overlay_attr_output_size.attr,
398 &overlay_attr_enabled.attr,
399 &overlay_attr_global_alpha.attr,
400 &overlay_attr_pre_mult_alpha.attr,
401 &overlay_attr_zorder.attr,
402 NULL
403};
404
405static ssize_t overlay_attr_show(struct kobject *kobj, struct attribute *attr,
406 char *buf)
407{
408 struct omap_overlay *overlay;
409 struct overlay_attribute *overlay_attr;
410
411 overlay = container_of(kobj, struct omap_overlay, kobj);
412 overlay_attr = container_of(attr, struct overlay_attribute, attr);
413
414 if (!overlay_attr->show)
415 return -ENOENT;
416
417 return overlay_attr->show(overlay, buf);
418}
419
420static ssize_t overlay_attr_store(struct kobject *kobj, struct attribute *attr,
421 const char *buf, size_t size)
422{
423 struct omap_overlay *overlay;
424 struct overlay_attribute *overlay_attr;
425
426 overlay = container_of(kobj, struct omap_overlay, kobj);
427 overlay_attr = container_of(attr, struct overlay_attribute, attr);
428
429 if (!overlay_attr->store)
430 return -ENOENT;
431
432 return overlay_attr->store(overlay, buf, size);
433}
434
435static const struct sysfs_ops overlay_sysfs_ops = {
436 .show = overlay_attr_show,
437 .store = overlay_attr_store,
438};
439
440static struct kobj_type overlay_ktype = {
441 .sysfs_ops = &overlay_sysfs_ops,
442 .default_attrs = overlay_sysfs_attrs,
443};
444
445int dss_overlay_kobj_init(struct omap_overlay *ovl,
446 struct platform_device *pdev)
447{
448 return kobject_init_and_add(&ovl->kobj, &overlay_ktype,
449 &pdev->dev.kobj, "overlay%d", ovl->id);
450}
451
452void dss_overlay_kobj_uninit(struct omap_overlay *ovl)
453{
454 kobject_del(&ovl->kobj);
455 kobject_put(&ovl->kobj);
456}
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index 952c6fad9a81..45f4994bc6b0 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -26,13 +26,11 @@
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/err.h> 27#include <linux/err.h>
28#include <linux/sysfs.h> 28#include <linux/sysfs.h>
29#include <linux/kobject.h>
30#include <linux/platform_device.h> 29#include <linux/platform_device.h>
31#include <linux/delay.h> 30#include <linux/delay.h>
32#include <linux/slab.h> 31#include <linux/slab.h>
33 32
34#include <video/omapdss.h> 33#include <video/omapdss.h>
35#include <plat/cpu.h>
36 34
37#include "dss.h" 35#include "dss.h"
38#include "dss_features.h" 36#include "dss_features.h"
@@ -40,417 +38,13 @@
40static int num_overlays; 38static int num_overlays;
41static struct omap_overlay *overlays; 39static struct omap_overlay *overlays;
42 40
43static ssize_t overlay_name_show(struct omap_overlay *ovl, char *buf) 41static inline struct omap_dss_device *dss_ovl_get_device(struct omap_overlay *ovl)
44{ 42{
45 return snprintf(buf, PAGE_SIZE, "%s\n", ovl->name); 43 return ovl->manager ?
44 (ovl->manager->output ? ovl->manager->output->device : NULL) :
45 NULL;
46} 46}
47 47
48static ssize_t overlay_manager_show(struct omap_overlay *ovl, char *buf)
49{
50 return snprintf(buf, PAGE_SIZE, "%s\n",
51 ovl->manager ? ovl->manager->name : "<none>");
52}
53
54static ssize_t overlay_manager_store(struct omap_overlay *ovl, const char *buf,
55 size_t size)
56{
57 int i, r;
58 struct omap_overlay_manager *mgr = NULL;
59 struct omap_overlay_manager *old_mgr;
60 int len = size;
61
62 if (buf[size-1] == '\n')
63 --len;
64
65 if (len > 0) {
66 for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
67 mgr = omap_dss_get_overlay_manager(i);
68
69 if (sysfs_streq(buf, mgr->name))
70 break;
71
72 mgr = NULL;
73 }
74 }
75
76 if (len > 0 && mgr == NULL)
77 return -EINVAL;
78
79 if (mgr)
80 DSSDBG("manager %s found\n", mgr->name);
81
82 if (mgr == ovl->manager)
83 return size;
84
85 old_mgr = ovl->manager;
86
87 r = dispc_runtime_get();
88 if (r)
89 return r;
90
91 /* detach old manager */
92 if (old_mgr) {
93 r = ovl->unset_manager(ovl);
94 if (r) {
95 DSSERR("detach failed\n");
96 goto err;
97 }
98
99 r = old_mgr->apply(old_mgr);
100 if (r)
101 goto err;
102 }
103
104 if (mgr) {
105 r = ovl->set_manager(ovl, mgr);
106 if (r) {
107 DSSERR("Failed to attach overlay\n");
108 goto err;
109 }
110
111 r = mgr->apply(mgr);
112 if (r)
113 goto err;
114 }
115
116 dispc_runtime_put();
117
118 return size;
119
120err:
121 dispc_runtime_put();
122 return r;
123}
124
125static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf)
126{
127 struct omap_overlay_info info;
128
129 ovl->get_overlay_info(ovl, &info);
130
131 return snprintf(buf, PAGE_SIZE, "%d,%d\n",
132 info.width, info.height);
133}
134
135static ssize_t overlay_screen_width_show(struct omap_overlay *ovl, char *buf)
136{
137 struct omap_overlay_info info;
138
139 ovl->get_overlay_info(ovl, &info);
140
141 return snprintf(buf, PAGE_SIZE, "%d\n", info.screen_width);
142}
143
144static ssize_t overlay_position_show(struct omap_overlay *ovl, char *buf)
145{
146 struct omap_overlay_info info;
147
148 ovl->get_overlay_info(ovl, &info);
149
150 return snprintf(buf, PAGE_SIZE, "%d,%d\n",
151 info.pos_x, info.pos_y);
152}
153
154static ssize_t overlay_position_store(struct omap_overlay *ovl,
155 const char *buf, size_t size)
156{
157 int r;
158 char *last;
159 struct omap_overlay_info info;
160
161 ovl->get_overlay_info(ovl, &info);
162
163 info.pos_x = simple_strtoul(buf, &last, 10);
164 ++last;
165 if (last - buf >= size)
166 return -EINVAL;
167
168 info.pos_y = simple_strtoul(last, &last, 10);
169
170 r = ovl->set_overlay_info(ovl, &info);
171 if (r)
172 return r;
173
174 if (ovl->manager) {
175 r = ovl->manager->apply(ovl->manager);
176 if (r)
177 return r;
178 }
179
180 return size;
181}
182
183static ssize_t overlay_output_size_show(struct omap_overlay *ovl, char *buf)
184{
185 struct omap_overlay_info info;
186
187 ovl->get_overlay_info(ovl, &info);
188
189 return snprintf(buf, PAGE_SIZE, "%d,%d\n",
190 info.out_width, info.out_height);
191}
192
193static ssize_t overlay_output_size_store(struct omap_overlay *ovl,
194 const char *buf, size_t size)
195{
196 int r;
197 char *last;
198 struct omap_overlay_info info;
199
200 ovl->get_overlay_info(ovl, &info);
201
202 info.out_width = simple_strtoul(buf, &last, 10);
203 ++last;
204 if (last - buf >= size)
205 return -EINVAL;
206
207 info.out_height = simple_strtoul(last, &last, 10);
208
209 r = ovl->set_overlay_info(ovl, &info);
210 if (r)
211 return r;
212
213 if (ovl->manager) {
214 r = ovl->manager->apply(ovl->manager);
215 if (r)
216 return r;
217 }
218
219 return size;
220}
221
222static ssize_t overlay_enabled_show(struct omap_overlay *ovl, char *buf)
223{
224 return snprintf(buf, PAGE_SIZE, "%d\n", ovl->is_enabled(ovl));
225}
226
227static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf,
228 size_t size)
229{
230 int r;
231 bool enable;
232
233 r = strtobool(buf, &enable);
234 if (r)
235 return r;
236
237 if (enable)
238 r = ovl->enable(ovl);
239 else
240 r = ovl->disable(ovl);
241
242 if (r)
243 return r;
244
245 return size;
246}
247
248static ssize_t overlay_global_alpha_show(struct omap_overlay *ovl, char *buf)
249{
250 struct omap_overlay_info info;
251
252 ovl->get_overlay_info(ovl, &info);
253
254 return snprintf(buf, PAGE_SIZE, "%d\n",
255 info.global_alpha);
256}
257
258static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
259 const char *buf, size_t size)
260{
261 int r;
262 u8 alpha;
263 struct omap_overlay_info info;
264
265 if ((ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0)
266 return -ENODEV;
267
268 r = kstrtou8(buf, 0, &alpha);
269 if (r)
270 return r;
271
272 ovl->get_overlay_info(ovl, &info);
273
274 info.global_alpha = alpha;
275
276 r = ovl->set_overlay_info(ovl, &info);
277 if (r)
278 return r;
279
280 if (ovl->manager) {
281 r = ovl->manager->apply(ovl->manager);
282 if (r)
283 return r;
284 }
285
286 return size;
287}
288
289static ssize_t overlay_pre_mult_alpha_show(struct omap_overlay *ovl,
290 char *buf)
291{
292 struct omap_overlay_info info;
293
294 ovl->get_overlay_info(ovl, &info);
295
296 return snprintf(buf, PAGE_SIZE, "%d\n",
297 info.pre_mult_alpha);
298}
299
300static ssize_t overlay_pre_mult_alpha_store(struct omap_overlay *ovl,
301 const char *buf, size_t size)
302{
303 int r;
304 u8 alpha;
305 struct omap_overlay_info info;
306
307 if ((ovl->caps & OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA) == 0)
308 return -ENODEV;
309
310 r = kstrtou8(buf, 0, &alpha);
311 if (r)
312 return r;
313
314 ovl->get_overlay_info(ovl, &info);
315
316 info.pre_mult_alpha = alpha;
317
318 r = ovl->set_overlay_info(ovl, &info);
319 if (r)
320 return r;
321
322 if (ovl->manager) {
323 r = ovl->manager->apply(ovl->manager);
324 if (r)
325 return r;
326 }
327
328 return size;
329}
330
331static ssize_t overlay_zorder_show(struct omap_overlay *ovl, char *buf)
332{
333 struct omap_overlay_info info;
334
335 ovl->get_overlay_info(ovl, &info);
336
337 return snprintf(buf, PAGE_SIZE, "%d\n", info.zorder);
338}
339
340static ssize_t overlay_zorder_store(struct omap_overlay *ovl,
341 const char *buf, size_t size)
342{
343 int r;
344 u8 zorder;
345 struct omap_overlay_info info;
346
347 if ((ovl->caps & OMAP_DSS_OVL_CAP_ZORDER) == 0)
348 return -ENODEV;
349
350 r = kstrtou8(buf, 0, &zorder);
351 if (r)
352 return r;
353
354 ovl->get_overlay_info(ovl, &info);
355
356 info.zorder = zorder;
357
358 r = ovl->set_overlay_info(ovl, &info);
359 if (r)
360 return r;
361
362 if (ovl->manager) {
363 r = ovl->manager->apply(ovl->manager);
364 if (r)
365 return r;
366 }
367
368 return size;
369}
370
371struct overlay_attribute {
372 struct attribute attr;
373 ssize_t (*show)(struct omap_overlay *, char *);
374 ssize_t (*store)(struct omap_overlay *, const char *, size_t);
375};
376
377#define OVERLAY_ATTR(_name, _mode, _show, _store) \
378 struct overlay_attribute overlay_attr_##_name = \
379 __ATTR(_name, _mode, _show, _store)
380
381static OVERLAY_ATTR(name, S_IRUGO, overlay_name_show, NULL);
382static OVERLAY_ATTR(manager, S_IRUGO|S_IWUSR,
383 overlay_manager_show, overlay_manager_store);
384static OVERLAY_ATTR(input_size, S_IRUGO, overlay_input_size_show, NULL);
385static OVERLAY_ATTR(screen_width, S_IRUGO, overlay_screen_width_show, NULL);
386static OVERLAY_ATTR(position, S_IRUGO|S_IWUSR,
387 overlay_position_show, overlay_position_store);
388static OVERLAY_ATTR(output_size, S_IRUGO|S_IWUSR,
389 overlay_output_size_show, overlay_output_size_store);
390static OVERLAY_ATTR(enabled, S_IRUGO|S_IWUSR,
391 overlay_enabled_show, overlay_enabled_store);
392static OVERLAY_ATTR(global_alpha, S_IRUGO|S_IWUSR,
393 overlay_global_alpha_show, overlay_global_alpha_store);
394static OVERLAY_ATTR(pre_mult_alpha, S_IRUGO|S_IWUSR,
395 overlay_pre_mult_alpha_show,
396 overlay_pre_mult_alpha_store);
397static OVERLAY_ATTR(zorder, S_IRUGO|S_IWUSR,
398 overlay_zorder_show, overlay_zorder_store);
399
400static struct attribute *overlay_sysfs_attrs[] = {
401 &overlay_attr_name.attr,
402 &overlay_attr_manager.attr,
403 &overlay_attr_input_size.attr,
404 &overlay_attr_screen_width.attr,
405 &overlay_attr_position.attr,
406 &overlay_attr_output_size.attr,
407 &overlay_attr_enabled.attr,
408 &overlay_attr_global_alpha.attr,
409 &overlay_attr_pre_mult_alpha.attr,
410 &overlay_attr_zorder.attr,
411 NULL
412};
413
414static ssize_t overlay_attr_show(struct kobject *kobj, struct attribute *attr,
415 char *buf)
416{
417 struct omap_overlay *overlay;
418 struct overlay_attribute *overlay_attr;
419
420 overlay = container_of(kobj, struct omap_overlay, kobj);
421 overlay_attr = container_of(attr, struct overlay_attribute, attr);
422
423 if (!overlay_attr->show)
424 return -ENOENT;
425
426 return overlay_attr->show(overlay, buf);
427}
428
429static ssize_t overlay_attr_store(struct kobject *kobj, struct attribute *attr,
430 const char *buf, size_t size)
431{
432 struct omap_overlay *overlay;
433 struct overlay_attribute *overlay_attr;
434
435 overlay = container_of(kobj, struct omap_overlay, kobj);
436 overlay_attr = container_of(attr, struct overlay_attribute, attr);
437
438 if (!overlay_attr->store)
439 return -ENOENT;
440
441 return overlay_attr->store(overlay, buf, size);
442}
443
444static const struct sysfs_ops overlay_sysfs_ops = {
445 .show = overlay_attr_show,
446 .store = overlay_attr_store,
447};
448
449static struct kobj_type overlay_ktype = {
450 .sysfs_ops = &overlay_sysfs_ops,
451 .default_attrs = overlay_sysfs_attrs,
452};
453
454int omap_dss_get_num_overlays(void) 48int omap_dss_get_num_overlays(void)
455{ 49{
456 return num_overlays; 50 return num_overlays;
@@ -507,97 +101,25 @@ void dss_init_overlays(struct platform_device *pdev)
507 ovl->set_overlay_info = &dss_ovl_set_info; 101 ovl->set_overlay_info = &dss_ovl_set_info;
508 ovl->get_overlay_info = &dss_ovl_get_info; 102 ovl->get_overlay_info = &dss_ovl_get_info;
509 ovl->wait_for_go = &dss_mgr_wait_for_go_ovl; 103 ovl->wait_for_go = &dss_mgr_wait_for_go_ovl;
104 ovl->get_device = &dss_ovl_get_device;
510 105
511 ovl->caps = dss_feat_get_overlay_caps(ovl->id); 106 ovl->caps = dss_feat_get_overlay_caps(ovl->id);
512 ovl->supported_modes = 107 ovl->supported_modes =
513 dss_feat_get_supported_color_modes(ovl->id); 108 dss_feat_get_supported_color_modes(ovl->id);
514 109
515 r = kobject_init_and_add(&ovl->kobj, &overlay_ktype, 110 r = dss_overlay_kobj_init(ovl, pdev);
516 &pdev->dev.kobj, "overlay%d", i);
517
518 if (r) 111 if (r)
519 DSSERR("failed to create sysfs file\n"); 112 DSSERR("failed to create sysfs file\n");
520 } 113 }
521} 114}
522 115
523/* connect overlays to the new device, if not already connected. if force
524 * selected, connect always. */
525void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
526{
527 int i;
528 struct omap_overlay_manager *lcd_mgr;
529 struct omap_overlay_manager *tv_mgr;
530 struct omap_overlay_manager *lcd2_mgr = NULL;
531 struct omap_overlay_manager *lcd3_mgr = NULL;
532 struct omap_overlay_manager *mgr = NULL;
533
534 lcd_mgr = omap_dss_get_overlay_manager(OMAP_DSS_CHANNEL_LCD);
535 tv_mgr = omap_dss_get_overlay_manager(OMAP_DSS_CHANNEL_DIGIT);
536 if (dss_has_feature(FEAT_MGR_LCD3))
537 lcd3_mgr = omap_dss_get_overlay_manager(OMAP_DSS_CHANNEL_LCD3);
538 if (dss_has_feature(FEAT_MGR_LCD2))
539 lcd2_mgr = omap_dss_get_overlay_manager(OMAP_DSS_CHANNEL_LCD2);
540
541 if (dssdev->channel == OMAP_DSS_CHANNEL_LCD3) {
542 if (!lcd3_mgr->device || force) {
543 if (lcd3_mgr->device)
544 lcd3_mgr->unset_device(lcd3_mgr);
545 lcd3_mgr->set_device(lcd3_mgr, dssdev);
546 mgr = lcd3_mgr;
547 }
548 } else if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2) {
549 if (!lcd2_mgr->device || force) {
550 if (lcd2_mgr->device)
551 lcd2_mgr->unset_device(lcd2_mgr);
552 lcd2_mgr->set_device(lcd2_mgr, dssdev);
553 mgr = lcd2_mgr;
554 }
555 } else if (dssdev->type != OMAP_DISPLAY_TYPE_VENC
556 && dssdev->type != OMAP_DISPLAY_TYPE_HDMI) {
557 if (!lcd_mgr->device || force) {
558 if (lcd_mgr->device)
559 lcd_mgr->unset_device(lcd_mgr);
560 lcd_mgr->set_device(lcd_mgr, dssdev);
561 mgr = lcd_mgr;
562 }
563 }
564
565 if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
566 || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
567 if (!tv_mgr->device || force) {
568 if (tv_mgr->device)
569 tv_mgr->unset_device(tv_mgr);
570 tv_mgr->set_device(tv_mgr, dssdev);
571 mgr = tv_mgr;
572 }
573 }
574
575 if (mgr) {
576 dispc_runtime_get();
577
578 for (i = 0; i < dss_feat_get_num_ovls(); i++) {
579 struct omap_overlay *ovl;
580 ovl = omap_dss_get_overlay(i);
581 if (!ovl->manager || force) {
582 if (ovl->manager)
583 ovl->unset_manager(ovl);
584 ovl->set_manager(ovl, mgr);
585 }
586 }
587
588 dispc_runtime_put();
589 }
590}
591
592void dss_uninit_overlays(struct platform_device *pdev) 116void dss_uninit_overlays(struct platform_device *pdev)
593{ 117{
594 int i; 118 int i;
595 119
596 for (i = 0; i < num_overlays; ++i) { 120 for (i = 0; i < num_overlays; ++i) {
597 struct omap_overlay *ovl = &overlays[i]; 121 struct omap_overlay *ovl = &overlays[i];
598 122 dss_overlay_kobj_uninit(ovl);
599 kobject_del(&ovl->kobj);
600 kobject_put(&ovl->kobj);
601 } 123 }
602 124
603 kfree(overlays); 125 kfree(overlays);
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index 7c087424b634..7282e5af3e1a 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -111,6 +111,13 @@ static struct {
111 struct omap_dss_device *dssdev[2]; 111 struct omap_dss_device *dssdev[2];
112 112
113 struct semaphore bus_lock; 113 struct semaphore bus_lock;
114
115 struct omap_video_timings timings;
116 int pixel_size;
117 int data_lines;
118 struct rfbi_timings intf_timings;
119
120 struct omap_dss_output output;
114} rfbi; 121} rfbi;
115 122
116static inline void rfbi_write_reg(const struct rfbi_reg idx, u32 val) 123static inline void rfbi_write_reg(const struct rfbi_reg idx, u32 val)
@@ -300,30 +307,23 @@ void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width,
300} 307}
301EXPORT_SYMBOL(omap_rfbi_write_pixels); 308EXPORT_SYMBOL(omap_rfbi_write_pixels);
302 309
303static int rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, 310static int rfbi_transfer_area(struct omap_dss_device *dssdev,
304 u16 height, void (*callback)(void *data), void *data) 311 void (*callback)(void *data), void *data)
305{ 312{
306 u32 l; 313 u32 l;
307 int r; 314 int r;
308 struct omap_video_timings timings = { 315 struct omap_overlay_manager *mgr = dssdev->output->manager;
309 .hsw = 1, 316 u16 width = rfbi.timings.x_res;
310 .hfp = 1, 317 u16 height = rfbi.timings.y_res;
311 .hbp = 1,
312 .vsw = 1,
313 .vfp = 0,
314 .vbp = 0,
315 .x_res = width,
316 .y_res = height,
317 };
318 318
319 /*BUG_ON(callback == 0);*/ 319 /*BUG_ON(callback == 0);*/
320 BUG_ON(rfbi.framedone_callback != NULL); 320 BUG_ON(rfbi.framedone_callback != NULL);
321 321
322 DSSDBG("rfbi_transfer_area %dx%d\n", width, height); 322 DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
323 323
324 dss_mgr_set_timings(dssdev->manager, &timings); 324 dss_mgr_set_timings(mgr, &rfbi.timings);
325 325
326 r = dss_mgr_enable(dssdev->manager); 326 r = dss_mgr_enable(mgr);
327 if (r) 327 if (r)
328 return r; 328 return r;
329 329
@@ -770,62 +770,45 @@ static int rfbi_configure(int rfbi_module, int bpp, int lines)
770 return 0; 770 return 0;
771} 771}
772 772
773int omap_rfbi_configure(struct omap_dss_device *dssdev, int pixel_size, 773int omap_rfbi_configure(struct omap_dss_device *dssdev)
774 int data_lines)
775{ 774{
776 return rfbi_configure(dssdev->phy.rfbi.channel, pixel_size, data_lines); 775 return rfbi_configure(dssdev->phy.rfbi.channel, rfbi.pixel_size,
776 rfbi.data_lines);
777} 777}
778EXPORT_SYMBOL(omap_rfbi_configure); 778EXPORT_SYMBOL(omap_rfbi_configure);
779 779
780int omap_rfbi_prepare_update(struct omap_dss_device *dssdev, 780int omap_rfbi_update(struct omap_dss_device *dssdev, void (*callback)(void *),
781 u16 *x, u16 *y, u16 *w, u16 *h) 781 void *data)
782{ 782{
783 u16 dw, dh; 783 return rfbi_transfer_area(dssdev, callback, data);
784 struct omap_video_timings timings = { 784}
785 .hsw = 1, 785EXPORT_SYMBOL(omap_rfbi_update);
786 .hfp = 1,
787 .hbp = 1,
788 .vsw = 1,
789 .vfp = 0,
790 .vbp = 0,
791 .x_res = *w,
792 .y_res = *h,
793 };
794
795 dssdev->driver->get_resolution(dssdev, &dw, &dh);
796
797 if (*x > dw || *y > dh)
798 return -EINVAL;
799
800 if (*x + *w > dw)
801 return -EINVAL;
802
803 if (*y + *h > dh)
804 return -EINVAL;
805
806 if (*w == 1)
807 return -EINVAL;
808
809 if (*w == 0 || *h == 0)
810 return -EINVAL;
811
812 dss_mgr_set_timings(dssdev->manager, &timings);
813 786
814 return 0; 787void omapdss_rfbi_set_size(struct omap_dss_device *dssdev, u16 w, u16 h)
788{
789 rfbi.timings.x_res = w;
790 rfbi.timings.y_res = h;
815} 791}
816EXPORT_SYMBOL(omap_rfbi_prepare_update); 792EXPORT_SYMBOL(omapdss_rfbi_set_size);
817 793
818int omap_rfbi_update(struct omap_dss_device *dssdev, 794void omapdss_rfbi_set_pixel_size(struct omap_dss_device *dssdev, int pixel_size)
819 u16 x, u16 y, u16 w, u16 h,
820 void (*callback)(void *), void *data)
821{ 795{
822 int r; 796 rfbi.pixel_size = pixel_size;
797}
798EXPORT_SYMBOL(omapdss_rfbi_set_pixel_size);
823 799
824 r = rfbi_transfer_area(dssdev, w, h, callback, data); 800void omapdss_rfbi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
801{
802 rfbi.data_lines = data_lines;
803}
804EXPORT_SYMBOL(omapdss_rfbi_set_data_lines);
825 805
826 return r; 806void omapdss_rfbi_set_interface_timings(struct omap_dss_device *dssdev,
807 struct rfbi_timings *timings)
808{
809 rfbi.intf_timings = *timings;
827} 810}
828EXPORT_SYMBOL(omap_rfbi_update); 811EXPORT_SYMBOL(omapdss_rfbi_set_interface_timings);
829 812
830static void rfbi_dump_regs(struct seq_file *s) 813static void rfbi_dump_regs(struct seq_file *s)
831{ 814{
@@ -869,6 +852,7 @@ static void rfbi_dump_regs(struct seq_file *s)
869 852
870static void rfbi_config_lcd_manager(struct omap_dss_device *dssdev) 853static void rfbi_config_lcd_manager(struct omap_dss_device *dssdev)
871{ 854{
855 struct omap_overlay_manager *mgr = dssdev->output->manager;
872 struct dss_lcd_mgr_config mgr_config; 856 struct dss_lcd_mgr_config mgr_config;
873 857
874 mgr_config.io_pad_mode = DSS_IO_PAD_MODE_RFBI; 858 mgr_config.io_pad_mode = DSS_IO_PAD_MODE_RFBI;
@@ -877,18 +861,40 @@ static void rfbi_config_lcd_manager(struct omap_dss_device *dssdev)
877 /* Do we need fifohandcheck for RFBI? */ 861 /* Do we need fifohandcheck for RFBI? */
878 mgr_config.fifohandcheck = false; 862 mgr_config.fifohandcheck = false;
879 863
880 mgr_config.video_port_width = dssdev->ctrl.pixel_size; 864 mgr_config.video_port_width = rfbi.pixel_size;
881 mgr_config.lcden_sig_polarity = 0; 865 mgr_config.lcden_sig_polarity = 0;
882 866
883 dss_mgr_set_lcd_config(dssdev->manager, &mgr_config); 867 dss_mgr_set_lcd_config(mgr, &mgr_config);
868
869 /*
870 * Set rfbi.timings with default values, the x_res and y_res fields
871 * are expected to be already configured by the panel driver via
872 * omapdss_rfbi_set_size()
873 */
874 rfbi.timings.hsw = 1;
875 rfbi.timings.hfp = 1;
876 rfbi.timings.hbp = 1;
877 rfbi.timings.vsw = 1;
878 rfbi.timings.vfp = 0;
879 rfbi.timings.vbp = 0;
880
881 rfbi.timings.interlace = false;
882 rfbi.timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
883 rfbi.timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
884 rfbi.timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
885 rfbi.timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
886 rfbi.timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
887
888 dss_mgr_set_timings(mgr, &rfbi.timings);
884} 889}
885 890
886int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev) 891int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
887{ 892{
893 struct omap_dss_output *out = dssdev->output;
888 int r; 894 int r;
889 895
890 if (dssdev->manager == NULL) { 896 if (out == NULL || out->manager == NULL) {
891 DSSERR("failed to enable display: no manager\n"); 897 DSSERR("failed to enable display: no output/manager\n");
892 return -ENODEV; 898 return -ENODEV;
893 } 899 }
894 900
@@ -911,13 +917,10 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
911 917
912 rfbi_config_lcd_manager(dssdev); 918 rfbi_config_lcd_manager(dssdev);
913 919
914 rfbi_configure(dssdev->phy.rfbi.channel, 920 rfbi_configure(dssdev->phy.rfbi.channel, rfbi.pixel_size,
915 dssdev->ctrl.pixel_size, 921 rfbi.data_lines);
916 dssdev->phy.rfbi.data_lines);
917
918 rfbi_set_timings(dssdev->phy.rfbi.channel,
919 &dssdev->ctrl.rfbi_timings);
920 922
923 rfbi_set_timings(dssdev->phy.rfbi.channel, &rfbi.intf_timings);
921 924
922 return 0; 925 return 0;
923err1: 926err1:
@@ -941,14 +944,17 @@ EXPORT_SYMBOL(omapdss_rfbi_display_disable);
941static int __init rfbi_init_display(struct omap_dss_device *dssdev) 944static int __init rfbi_init_display(struct omap_dss_device *dssdev)
942{ 945{
943 rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev; 946 rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev;
944 dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
945 return 0; 947 return 0;
946} 948}
947 949
948static void __init rfbi_probe_pdata(struct platform_device *pdev) 950static struct omap_dss_device * __init rfbi_find_dssdev(struct platform_device *pdev)
949{ 951{
950 struct omap_dss_board_info *pdata = pdev->dev.platform_data; 952 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
951 int i, r; 953 const char *def_disp_name = dss_get_default_display_name();
954 struct omap_dss_device *def_dssdev;
955 int i;
956
957 def_dssdev = NULL;
952 958
953 for (i = 0; i < pdata->num_devices; ++i) { 959 for (i = 0; i < pdata->num_devices; ++i) {
954 struct omap_dss_device *dssdev = pdata->devices[i]; 960 struct omap_dss_device *dssdev = pdata->devices[i];
@@ -956,17 +962,67 @@ static void __init rfbi_probe_pdata(struct platform_device *pdev)
956 if (dssdev->type != OMAP_DISPLAY_TYPE_DBI) 962 if (dssdev->type != OMAP_DISPLAY_TYPE_DBI)
957 continue; 963 continue;
958 964
959 r = rfbi_init_display(dssdev); 965 if (def_dssdev == NULL)
960 if (r) { 966 def_dssdev = dssdev;
961 DSSERR("device %s init failed: %d\n", dssdev->name, r); 967
962 continue; 968 if (def_disp_name != NULL &&
969 strcmp(dssdev->name, def_disp_name) == 0) {
970 def_dssdev = dssdev;
971 break;
963 } 972 }
973 }
974
975 return def_dssdev;
976}
977
978static void __init rfbi_probe_pdata(struct platform_device *rfbidev)
979{
980 struct omap_dss_device *plat_dssdev;
981 struct omap_dss_device *dssdev;
982 int r;
983
984 plat_dssdev = rfbi_find_dssdev(rfbidev);
985
986 if (!plat_dssdev)
987 return;
988
989 dssdev = dss_alloc_and_init_device(&rfbidev->dev);
990 if (!dssdev)
991 return;
992
993 dss_copy_device_pdata(dssdev, plat_dssdev);
964 994
965 r = omap_dss_register_device(dssdev, &pdev->dev, i); 995 r = rfbi_init_display(dssdev);
966 if (r) 996 if (r) {
967 DSSERR("device %s register failed: %d\n", 997 DSSERR("device %s init failed: %d\n", dssdev->name, r);
968 dssdev->name, r); 998 dss_put_device(dssdev);
999 return;
969 } 1000 }
1001
1002 r = dss_add_device(dssdev);
1003 if (r) {
1004 DSSERR("device %s register failed: %d\n", dssdev->name, r);
1005 dss_put_device(dssdev);
1006 return;
1007 }
1008}
1009
1010static void __init rfbi_init_output(struct platform_device *pdev)
1011{
1012 struct omap_dss_output *out = &rfbi.output;
1013
1014 out->pdev = pdev;
1015 out->id = OMAP_DSS_OUTPUT_DBI;
1016 out->type = OMAP_DISPLAY_TYPE_DBI;
1017
1018 dss_register_output(out);
1019}
1020
1021static void __exit rfbi_uninit_output(struct platform_device *pdev)
1022{
1023 struct omap_dss_output *out = &rfbi.output;
1024
1025 dss_unregister_output(out);
970} 1026}
971 1027
972/* RFBI HW IP initialisation */ 1028/* RFBI HW IP initialisation */
@@ -1020,6 +1076,8 @@ static int __init omap_rfbihw_probe(struct platform_device *pdev)
1020 1076
1021 dss_debugfs_create_file("rfbi", rfbi_dump_regs); 1077 dss_debugfs_create_file("rfbi", rfbi_dump_regs);
1022 1078
1079 rfbi_init_output(pdev);
1080
1023 rfbi_probe_pdata(pdev); 1081 rfbi_probe_pdata(pdev);
1024 1082
1025 return 0; 1083 return 0;
@@ -1031,8 +1089,12 @@ err_runtime_get:
1031 1089
1032static int __exit omap_rfbihw_remove(struct platform_device *pdev) 1090static int __exit omap_rfbihw_remove(struct platform_device *pdev)
1033{ 1091{
1034 omap_dss_unregister_child_devices(&pdev->dev); 1092 dss_unregister_child_devices(&pdev->dev);
1093
1094 rfbi_uninit_output(pdev);
1095
1035 pm_runtime_disable(&pdev->dev); 1096 pm_runtime_disable(&pdev->dev);
1097
1036 return 0; 1098 return 0;
1037} 1099}
1038 1100
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index f43bfe17b3b6..7760851f6e5d 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -25,6 +25,7 @@
25#include <linux/regulator/consumer.h> 25#include <linux/regulator/consumer.h>
26#include <linux/export.h> 26#include <linux/export.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/string.h>
28 29
29#include <video/omapdss.h> 30#include <video/omapdss.h>
30#include "dss.h" 31#include "dss.h"
@@ -34,10 +35,16 @@ static struct {
34 struct regulator *vdds_sdi_reg; 35 struct regulator *vdds_sdi_reg;
35 36
36 struct dss_lcd_mgr_config mgr_config; 37 struct dss_lcd_mgr_config mgr_config;
38 struct omap_video_timings timings;
39 int datapairs;
40
41 struct omap_dss_output output;
37} sdi; 42} sdi;
38 43
39static void sdi_config_lcd_manager(struct omap_dss_device *dssdev) 44static void sdi_config_lcd_manager(struct omap_dss_device *dssdev)
40{ 45{
46 struct omap_overlay_manager *mgr = dssdev->output->manager;
47
41 sdi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS; 48 sdi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
42 49
43 sdi.mgr_config.stallmode = false; 50 sdi.mgr_config.stallmode = false;
@@ -46,19 +53,20 @@ static void sdi_config_lcd_manager(struct omap_dss_device *dssdev)
46 sdi.mgr_config.video_port_width = 24; 53 sdi.mgr_config.video_port_width = 24;
47 sdi.mgr_config.lcden_sig_polarity = 1; 54 sdi.mgr_config.lcden_sig_polarity = 1;
48 55
49 dss_mgr_set_lcd_config(dssdev->manager, &sdi.mgr_config); 56 dss_mgr_set_lcd_config(mgr, &sdi.mgr_config);
50} 57}
51 58
52int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) 59int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
53{ 60{
54 struct omap_video_timings *t = &dssdev->panel.timings; 61 struct omap_dss_output *out = dssdev->output;
62 struct omap_video_timings *t = &sdi.timings;
55 struct dss_clock_info dss_cinfo; 63 struct dss_clock_info dss_cinfo;
56 struct dispc_clock_info dispc_cinfo; 64 struct dispc_clock_info dispc_cinfo;
57 unsigned long pck; 65 unsigned long pck;
58 int r; 66 int r;
59 67
60 if (dssdev->manager == NULL) { 68 if (out == NULL || out->manager == NULL) {
61 DSSERR("failed to enable display: no manager\n"); 69 DSSERR("failed to enable display: no output/manager\n");
62 return -ENODEV; 70 return -ENODEV;
63 } 71 }
64 72
@@ -77,8 +85,8 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
77 goto err_get_dispc; 85 goto err_get_dispc;
78 86
79 /* 15.5.9.1.2 */ 87 /* 15.5.9.1.2 */
80 dssdev->panel.timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; 88 t->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
81 dssdev->panel.timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; 89 t->sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
82 90
83 r = dss_calc_clock_div(t->pixel_clock * 1000, &dss_cinfo, &dispc_cinfo); 91 r = dss_calc_clock_div(t->pixel_clock * 1000, &dss_cinfo, &dispc_cinfo);
84 if (r) 92 if (r)
@@ -97,7 +105,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
97 } 105 }
98 106
99 107
100 dss_mgr_set_timings(dssdev->manager, t); 108 dss_mgr_set_timings(out->manager, t);
101 109
102 r = dss_set_clock_div(&dss_cinfo); 110 r = dss_set_clock_div(&dss_cinfo);
103 if (r) 111 if (r)
@@ -116,16 +124,15 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
116 * need to care about the shadow register mechanism for pck-free. The 124 * need to care about the shadow register mechanism for pck-free. The
117 * exact reason for this is unknown. 125 * exact reason for this is unknown.
118 */ 126 */
119 dispc_mgr_set_clock_div(dssdev->manager->id, 127 dispc_mgr_set_clock_div(out->manager->id, &sdi.mgr_config.clock_info);
120 &sdi.mgr_config.clock_info);
121 128
122 dss_sdi_init(dssdev->phy.sdi.datapairs); 129 dss_sdi_init(sdi.datapairs);
123 r = dss_sdi_enable(); 130 r = dss_sdi_enable();
124 if (r) 131 if (r)
125 goto err_sdi_enable; 132 goto err_sdi_enable;
126 mdelay(2); 133 mdelay(2);
127 134
128 r = dss_mgr_enable(dssdev->manager); 135 r = dss_mgr_enable(out->manager);
129 if (r) 136 if (r)
130 goto err_mgr_enable; 137 goto err_mgr_enable;
131 138
@@ -148,7 +155,9 @@ EXPORT_SYMBOL(omapdss_sdi_display_enable);
148 155
149void omapdss_sdi_display_disable(struct omap_dss_device *dssdev) 156void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
150{ 157{
151 dss_mgr_disable(dssdev->manager); 158 struct omap_overlay_manager *mgr = dssdev->output->manager;
159
160 dss_mgr_disable(mgr);
152 161
153 dss_sdi_disable(); 162 dss_sdi_disable();
154 163
@@ -160,6 +169,19 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
160} 169}
161EXPORT_SYMBOL(omapdss_sdi_display_disable); 170EXPORT_SYMBOL(omapdss_sdi_display_disable);
162 171
172void omapdss_sdi_set_timings(struct omap_dss_device *dssdev,
173 struct omap_video_timings *timings)
174{
175 sdi.timings = *timings;
176}
177EXPORT_SYMBOL(omapdss_sdi_set_timings);
178
179void omapdss_sdi_set_datapairs(struct omap_dss_device *dssdev, int datapairs)
180{
181 sdi.datapairs = datapairs;
182}
183EXPORT_SYMBOL(omapdss_sdi_set_datapairs);
184
163static int __init sdi_init_display(struct omap_dss_device *dssdev) 185static int __init sdi_init_display(struct omap_dss_device *dssdev)
164{ 186{
165 DSSDBG("SDI init\n"); 187 DSSDBG("SDI init\n");
@@ -180,10 +202,14 @@ static int __init sdi_init_display(struct omap_dss_device *dssdev)
180 return 0; 202 return 0;
181} 203}
182 204
183static void __init sdi_probe_pdata(struct platform_device *pdev) 205static struct omap_dss_device * __init sdi_find_dssdev(struct platform_device *pdev)
184{ 206{
185 struct omap_dss_board_info *pdata = pdev->dev.platform_data; 207 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
186 int i, r; 208 const char *def_disp_name = dss_get_default_display_name();
209 struct omap_dss_device *def_dssdev;
210 int i;
211
212 def_dssdev = NULL;
187 213
188 for (i = 0; i < pdata->num_devices; ++i) { 214 for (i = 0; i < pdata->num_devices; ++i) {
189 struct omap_dss_device *dssdev = pdata->devices[i]; 215 struct omap_dss_device *dssdev = pdata->devices[i];
@@ -191,21 +217,73 @@ static void __init sdi_probe_pdata(struct platform_device *pdev)
191 if (dssdev->type != OMAP_DISPLAY_TYPE_SDI) 217 if (dssdev->type != OMAP_DISPLAY_TYPE_SDI)
192 continue; 218 continue;
193 219
194 r = sdi_init_display(dssdev); 220 if (def_dssdev == NULL)
195 if (r) { 221 def_dssdev = dssdev;
196 DSSERR("device %s init failed: %d\n", dssdev->name, r); 222
197 continue; 223 if (def_disp_name != NULL &&
224 strcmp(dssdev->name, def_disp_name) == 0) {
225 def_dssdev = dssdev;
226 break;
198 } 227 }
228 }
229
230 return def_dssdev;
231}
232
233static void __init sdi_probe_pdata(struct platform_device *sdidev)
234{
235 struct omap_dss_device *plat_dssdev;
236 struct omap_dss_device *dssdev;
237 int r;
238
239 plat_dssdev = sdi_find_dssdev(sdidev);
199 240
200 r = omap_dss_register_device(dssdev, &pdev->dev, i); 241 if (!plat_dssdev)
201 if (r) 242 return;
202 DSSERR("device %s register failed: %d\n", 243
203 dssdev->name, r); 244 dssdev = dss_alloc_and_init_device(&sdidev->dev);
245 if (!dssdev)
246 return;
247
248 dss_copy_device_pdata(dssdev, plat_dssdev);
249
250 r = sdi_init_display(dssdev);
251 if (r) {
252 DSSERR("device %s init failed: %d\n", dssdev->name, r);
253 dss_put_device(dssdev);
254 return;
204 } 255 }
256
257 r = dss_add_device(dssdev);
258 if (r) {
259 DSSERR("device %s register failed: %d\n", dssdev->name, r);
260 dss_put_device(dssdev);
261 return;
262 }
263}
264
265static void __init sdi_init_output(struct platform_device *pdev)
266{
267 struct omap_dss_output *out = &sdi.output;
268
269 out->pdev = pdev;
270 out->id = OMAP_DSS_OUTPUT_SDI;
271 out->type = OMAP_DISPLAY_TYPE_SDI;
272
273 dss_register_output(out);
274}
275
276static void __exit sdi_uninit_output(struct platform_device *pdev)
277{
278 struct omap_dss_output *out = &sdi.output;
279
280 dss_unregister_output(out);
205} 281}
206 282
207static int __init omap_sdi_probe(struct platform_device *pdev) 283static int __init omap_sdi_probe(struct platform_device *pdev)
208{ 284{
285 sdi_init_output(pdev);
286
209 sdi_probe_pdata(pdev); 287 sdi_probe_pdata(pdev);
210 288
211 return 0; 289 return 0;
@@ -213,7 +291,9 @@ static int __init omap_sdi_probe(struct platform_device *pdev)
213 291
214static int __exit omap_sdi_remove(struct platform_device *pdev) 292static int __exit omap_sdi_remove(struct platform_device *pdev)
215{ 293{
216 omap_dss_unregister_child_devices(&pdev->dev); 294 dss_unregister_child_devices(&pdev->dev);
295
296 sdi_uninit_output(pdev);
217 297
218 return 0; 298 return 0;
219} 299}
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 3a220877461a..56efa3bb465d 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -36,7 +36,6 @@
36#include <linux/pm_runtime.h> 36#include <linux/pm_runtime.h>
37 37
38#include <video/omapdss.h> 38#include <video/omapdss.h>
39#include <plat/cpu.h>
40 39
41#include "dss.h" 40#include "dss.h"
42#include "dss_features.h" 41#include "dss_features.h"
@@ -300,6 +299,12 @@ static struct {
300 struct regulator *vdda_dac_reg; 299 struct regulator *vdda_dac_reg;
301 300
302 struct clk *tv_dac_clk; 301 struct clk *tv_dac_clk;
302
303 struct omap_video_timings timings;
304 enum omap_dss_venc_type type;
305 bool invert_polarity;
306
307 struct omap_dss_output output;
303} venc; 308} venc;
304 309
305static inline void venc_write_reg(int idx, u32 val) 310static inline void venc_write_reg(int idx, u32 val)
@@ -424,65 +429,67 @@ static const struct venc_config *venc_timings_to_config(
424 429
425static int venc_power_on(struct omap_dss_device *dssdev) 430static int venc_power_on(struct omap_dss_device *dssdev)
426{ 431{
432 struct omap_overlay_manager *mgr = dssdev->output->manager;
427 u32 l; 433 u32 l;
428 int r; 434 int r;
429 435
436 r = venc_runtime_get();
437 if (r)
438 goto err0;
439
430 venc_reset(); 440 venc_reset();
431 venc_write_config(venc_timings_to_config(&dssdev->panel.timings)); 441 venc_write_config(venc_timings_to_config(&venc.timings));
432 442
433 dss_set_venc_output(dssdev->phy.venc.type); 443 dss_set_venc_output(venc.type);
434 dss_set_dac_pwrdn_bgz(1); 444 dss_set_dac_pwrdn_bgz(1);
435 445
436 l = 0; 446 l = 0;
437 447
438 if (dssdev->phy.venc.type == OMAP_DSS_VENC_TYPE_COMPOSITE) 448 if (venc.type == OMAP_DSS_VENC_TYPE_COMPOSITE)
439 l |= 1 << 1; 449 l |= 1 << 1;
440 else /* S-Video */ 450 else /* S-Video */
441 l |= (1 << 0) | (1 << 2); 451 l |= (1 << 0) | (1 << 2);
442 452
443 if (dssdev->phy.venc.invert_polarity == false) 453 if (venc.invert_polarity == false)
444 l |= 1 << 3; 454 l |= 1 << 3;
445 455
446 venc_write_reg(VENC_OUTPUT_CONTROL, l); 456 venc_write_reg(VENC_OUTPUT_CONTROL, l);
447 457
448 dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings); 458 dss_mgr_set_timings(mgr, &venc.timings);
449 459
450 r = regulator_enable(venc.vdda_dac_reg); 460 r = regulator_enable(venc.vdda_dac_reg);
451 if (r) 461 if (r)
452 goto err; 462 goto err1;
453
454 if (dssdev->platform_enable)
455 dssdev->platform_enable(dssdev);
456 463
457 r = dss_mgr_enable(dssdev->manager); 464 r = dss_mgr_enable(mgr);
458 if (r) 465 if (r)
459 goto err; 466 goto err2;
460 467
461 return 0; 468 return 0;
462 469
463err: 470err2:
471 regulator_disable(venc.vdda_dac_reg);
472err1:
464 venc_write_reg(VENC_OUTPUT_CONTROL, 0); 473 venc_write_reg(VENC_OUTPUT_CONTROL, 0);
465 dss_set_dac_pwrdn_bgz(0); 474 dss_set_dac_pwrdn_bgz(0);
466 475
467 if (dssdev->platform_disable) 476 venc_runtime_put();
468 dssdev->platform_disable(dssdev); 477err0:
469
470 regulator_disable(venc.vdda_dac_reg);
471
472 return r; 478 return r;
473} 479}
474 480
475static void venc_power_off(struct omap_dss_device *dssdev) 481static void venc_power_off(struct omap_dss_device *dssdev)
476{ 482{
483 struct omap_overlay_manager *mgr = dssdev->output->manager;
484
477 venc_write_reg(VENC_OUTPUT_CONTROL, 0); 485 venc_write_reg(VENC_OUTPUT_CONTROL, 0);
478 dss_set_dac_pwrdn_bgz(0); 486 dss_set_dac_pwrdn_bgz(0);
479 487
480 dss_mgr_disable(dssdev->manager); 488 dss_mgr_disable(mgr);
481
482 if (dssdev->platform_disable)
483 dssdev->platform_disable(dssdev);
484 489
485 regulator_disable(venc.vdda_dac_reg); 490 regulator_disable(venc.vdda_dac_reg);
491
492 venc_runtime_put();
486} 493}
487 494
488unsigned long venc_get_pixel_clock(void) 495unsigned long venc_get_pixel_clock(void)
@@ -491,171 +498,83 @@ unsigned long venc_get_pixel_clock(void)
491 return 13500000; 498 return 13500000;
492} 499}
493 500
494static ssize_t display_output_type_show(struct device *dev, 501int omapdss_venc_display_enable(struct omap_dss_device *dssdev)
495 struct device_attribute *attr, char *buf)
496{ 502{
497 struct omap_dss_device *dssdev = to_dss_device(dev); 503 struct omap_dss_output *out = dssdev->output;
498 const char *ret; 504 int r;
499
500 switch (dssdev->phy.venc.type) {
501 case OMAP_DSS_VENC_TYPE_COMPOSITE:
502 ret = "composite";
503 break;
504 case OMAP_DSS_VENC_TYPE_SVIDEO:
505 ret = "svideo";
506 break;
507 default:
508 return -EINVAL;
509 }
510
511 return snprintf(buf, PAGE_SIZE, "%s\n", ret);
512}
513 505
514static ssize_t display_output_type_store(struct device *dev, 506 DSSDBG("venc_display_enable\n");
515 struct device_attribute *attr, const char *buf, size_t size)
516{
517 struct omap_dss_device *dssdev = to_dss_device(dev);
518 enum omap_dss_venc_type new_type;
519
520 if (sysfs_streq("composite", buf))
521 new_type = OMAP_DSS_VENC_TYPE_COMPOSITE;
522 else if (sysfs_streq("svideo", buf))
523 new_type = OMAP_DSS_VENC_TYPE_SVIDEO;
524 else
525 return -EINVAL;
526 507
527 mutex_lock(&venc.venc_lock); 508 mutex_lock(&venc.venc_lock);
528 509
529 if (dssdev->phy.venc.type != new_type) { 510 if (out == NULL || out->manager == NULL) {
530 dssdev->phy.venc.type = new_type; 511 DSSERR("Failed to enable display: no output/manager\n");
531 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { 512 r = -ENODEV;
532 venc_power_off(dssdev); 513 goto err0;
533 venc_power_on(dssdev);
534 }
535 } 514 }
536 515
537 mutex_unlock(&venc.venc_lock);
538
539 return size;
540}
541
542static DEVICE_ATTR(output_type, S_IRUGO | S_IWUSR,
543 display_output_type_show, display_output_type_store);
544
545/* driver */
546static int venc_panel_probe(struct omap_dss_device *dssdev)
547{
548 dssdev->panel.timings = omap_dss_pal_timings;
549
550 return device_create_file(&dssdev->dev, &dev_attr_output_type);
551}
552
553static void venc_panel_remove(struct omap_dss_device *dssdev)
554{
555 device_remove_file(&dssdev->dev, &dev_attr_output_type);
556}
557
558static int venc_panel_enable(struct omap_dss_device *dssdev)
559{
560 int r = 0;
561
562 DSSDBG("venc_enable_display\n");
563
564 mutex_lock(&venc.venc_lock);
565
566 r = omap_dss_start_device(dssdev); 516 r = omap_dss_start_device(dssdev);
567 if (r) { 517 if (r) {
568 DSSERR("failed to start device\n"); 518 DSSERR("failed to start device\n");
569 goto err0; 519 goto err0;
570 } 520 }
571 521
572 if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) { 522 if (dssdev->platform_enable)
573 r = -EINVAL; 523 dssdev->platform_enable(dssdev);
574 goto err1;
575 }
576 524
577 r = venc_runtime_get();
578 if (r)
579 goto err1;
580 525
581 r = venc_power_on(dssdev); 526 r = venc_power_on(dssdev);
582 if (r) 527 if (r)
583 goto err2; 528 goto err1;
584 529
585 venc.wss_data = 0; 530 venc.wss_data = 0;
586 531
587 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
588
589 mutex_unlock(&venc.venc_lock); 532 mutex_unlock(&venc.venc_lock);
533
590 return 0; 534 return 0;
591err2:
592 venc_runtime_put();
593err1: 535err1:
536 if (dssdev->platform_disable)
537 dssdev->platform_disable(dssdev);
594 omap_dss_stop_device(dssdev); 538 omap_dss_stop_device(dssdev);
595err0: 539err0:
596 mutex_unlock(&venc.venc_lock); 540 mutex_unlock(&venc.venc_lock);
597
598 return r; 541 return r;
599} 542}
600 543
601static void venc_panel_disable(struct omap_dss_device *dssdev) 544void omapdss_venc_display_disable(struct omap_dss_device *dssdev)
602{ 545{
603 DSSDBG("venc_disable_display\n"); 546 DSSDBG("venc_display_disable\n");
604 547
605 mutex_lock(&venc.venc_lock); 548 mutex_lock(&venc.venc_lock);
606 549
607 if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED)
608 goto end;
609
610 if (dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED) {
611 /* suspended is the same as disabled with venc */
612 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
613 goto end;
614 }
615
616 venc_power_off(dssdev); 550 venc_power_off(dssdev);
617 551
618 venc_runtime_put();
619
620 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
621
622 omap_dss_stop_device(dssdev); 552 omap_dss_stop_device(dssdev);
623end:
624 mutex_unlock(&venc.venc_lock);
625}
626 553
627static int venc_panel_suspend(struct omap_dss_device *dssdev) 554 if (dssdev->platform_disable)
628{ 555 dssdev->platform_disable(dssdev);
629 venc_panel_disable(dssdev);
630 return 0;
631}
632 556
633static int venc_panel_resume(struct omap_dss_device *dssdev) 557 mutex_unlock(&venc.venc_lock);
634{
635 return venc_panel_enable(dssdev);
636} 558}
637 559
638static void venc_set_timings(struct omap_dss_device *dssdev, 560void omapdss_venc_set_timings(struct omap_dss_device *dssdev,
639 struct omap_video_timings *timings) 561 struct omap_video_timings *timings)
640{ 562{
641 DSSDBG("venc_set_timings\n"); 563 DSSDBG("venc_set_timings\n");
642 564
565 mutex_lock(&venc.venc_lock);
566
643 /* Reset WSS data when the TV standard changes. */ 567 /* Reset WSS data when the TV standard changes. */
644 if (memcmp(&dssdev->panel.timings, timings, sizeof(*timings))) 568 if (memcmp(&venc.timings, timings, sizeof(*timings)))
645 venc.wss_data = 0; 569 venc.wss_data = 0;
646 570
647 dssdev->panel.timings = *timings; 571 venc.timings = *timings;
648 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { 572
649 /* turn the venc off and on to get new timings to use */ 573 mutex_unlock(&venc.venc_lock);
650 venc_panel_disable(dssdev);
651 venc_panel_enable(dssdev);
652 } else {
653 dss_mgr_set_timings(dssdev->manager, timings);
654 }
655} 574}
656 575
657static int venc_check_timings(struct omap_dss_device *dssdev, 576int omapdss_venc_check_timings(struct omap_dss_device *dssdev,
658 struct omap_video_timings *timings) 577 struct omap_video_timings *timings)
659{ 578{
660 DSSDBG("venc_check_timings\n"); 579 DSSDBG("venc_check_timings\n");
661 580
@@ -668,13 +587,13 @@ static int venc_check_timings(struct omap_dss_device *dssdev,
668 return -EINVAL; 587 return -EINVAL;
669} 588}
670 589
671static u32 venc_get_wss(struct omap_dss_device *dssdev) 590u32 omapdss_venc_get_wss(struct omap_dss_device *dssdev)
672{ 591{
673 /* Invert due to VENC_L21_WC_CTL:INV=1 */ 592 /* Invert due to VENC_L21_WC_CTL:INV=1 */
674 return (venc.wss_data >> 8) ^ 0xfffff; 593 return (venc.wss_data >> 8) ^ 0xfffff;
675} 594}
676 595
677static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss) 596int omapdss_venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
678{ 597{
679 const struct venc_config *config; 598 const struct venc_config *config;
680 int r; 599 int r;
@@ -683,7 +602,7 @@ static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
683 602
684 mutex_lock(&venc.venc_lock); 603 mutex_lock(&venc.venc_lock);
685 604
686 config = venc_timings_to_config(&dssdev->panel.timings); 605 config = venc_timings_to_config(&venc.timings);
687 606
688 /* Invert due to VENC_L21_WC_CTL:INV=1 */ 607 /* Invert due to VENC_L21_WC_CTL:INV=1 */
689 venc.wss_data = (wss ^ 0xfffff) << 8; 608 venc.wss_data = (wss ^ 0xfffff) << 8;
@@ -703,30 +622,25 @@ err:
703 return r; 622 return r;
704} 623}
705 624
706static struct omap_dss_driver venc_driver = { 625void omapdss_venc_set_type(struct omap_dss_device *dssdev,
707 .probe = venc_panel_probe, 626 enum omap_dss_venc_type type)
708 .remove = venc_panel_remove, 627{
628 mutex_lock(&venc.venc_lock);
709 629
710 .enable = venc_panel_enable, 630 venc.type = type;
711 .disable = venc_panel_disable,
712 .suspend = venc_panel_suspend,
713 .resume = venc_panel_resume,
714 631
715 .get_resolution = omapdss_default_get_resolution, 632 mutex_unlock(&venc.venc_lock);
716 .get_recommended_bpp = omapdss_default_get_recommended_bpp, 633}
717 634
718 .set_timings = venc_set_timings, 635void omapdss_venc_invert_vid_out_polarity(struct omap_dss_device *dssdev,
719 .check_timings = venc_check_timings, 636 bool invert_polarity)
637{
638 mutex_lock(&venc.venc_lock);
720 639
721 .get_wss = venc_get_wss, 640 venc.invert_polarity = invert_polarity;
722 .set_wss = venc_set_wss,
723 641
724 .driver = { 642 mutex_unlock(&venc.venc_lock);
725 .name = "venc", 643}
726 .owner = THIS_MODULE,
727 },
728};
729/* driver end */
730 644
731static int __init venc_init_display(struct omap_dss_device *dssdev) 645static int __init venc_init_display(struct omap_dss_device *dssdev)
732{ 646{
@@ -752,11 +666,6 @@ static void venc_dump_regs(struct seq_file *s)
752{ 666{
753#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r)) 667#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r))
754 668
755 if (cpu_is_omap44xx()) {
756 seq_printf(s, "VENC currently disabled on OMAP44xx\n");
757 return;
758 }
759
760 if (venc_runtime_get()) 669 if (venc_runtime_get())
761 return; 670 return;
762 671
@@ -832,10 +741,14 @@ static void venc_put_clocks(void)
832 clk_put(venc.tv_dac_clk); 741 clk_put(venc.tv_dac_clk);
833} 742}
834 743
835static void __init venc_probe_pdata(struct platform_device *pdev) 744static struct omap_dss_device * __init venc_find_dssdev(struct platform_device *pdev)
836{ 745{
837 struct omap_dss_board_info *pdata = pdev->dev.platform_data; 746 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
838 int r, i; 747 const char *def_disp_name = dss_get_default_display_name();
748 struct omap_dss_device *def_dssdev;
749 int i;
750
751 def_dssdev = NULL;
839 752
840 for (i = 0; i < pdata->num_devices; ++i) { 753 for (i = 0; i < pdata->num_devices; ++i) {
841 struct omap_dss_device *dssdev = pdata->devices[i]; 754 struct omap_dss_device *dssdev = pdata->devices[i];
@@ -843,17 +756,69 @@ static void __init venc_probe_pdata(struct platform_device *pdev)
843 if (dssdev->type != OMAP_DISPLAY_TYPE_VENC) 756 if (dssdev->type != OMAP_DISPLAY_TYPE_VENC)
844 continue; 757 continue;
845 758
846 r = venc_init_display(dssdev); 759 if (def_dssdev == NULL)
847 if (r) { 760 def_dssdev = dssdev;
848 DSSERR("device %s init failed: %d\n", dssdev->name, r); 761
849 continue; 762 if (def_disp_name != NULL &&
763 strcmp(dssdev->name, def_disp_name) == 0) {
764 def_dssdev = dssdev;
765 break;
850 } 766 }
767 }
768
769 return def_dssdev;
770}
771
772static void __init venc_probe_pdata(struct platform_device *vencdev)
773{
774 struct omap_dss_device *plat_dssdev;
775 struct omap_dss_device *dssdev;
776 int r;
777
778 plat_dssdev = venc_find_dssdev(vencdev);
851 779
852 r = omap_dss_register_device(dssdev, &pdev->dev, i); 780 if (!plat_dssdev)
853 if (r) 781 return;
854 DSSERR("device %s register failed: %d\n", 782
855 dssdev->name, r); 783 dssdev = dss_alloc_and_init_device(&vencdev->dev);
784 if (!dssdev)
785 return;
786
787 dss_copy_device_pdata(dssdev, plat_dssdev);
788
789 dssdev->channel = OMAP_DSS_CHANNEL_DIGIT;
790
791 r = venc_init_display(dssdev);
792 if (r) {
793 DSSERR("device %s init failed: %d\n", dssdev->name, r);
794 dss_put_device(dssdev);
795 return;
856 } 796 }
797
798 r = dss_add_device(dssdev);
799 if (r) {
800 DSSERR("device %s register failed: %d\n", dssdev->name, r);
801 dss_put_device(dssdev);
802 return;
803 }
804}
805
806static void __init venc_init_output(struct platform_device *pdev)
807{
808 struct omap_dss_output *out = &venc.output;
809
810 out->pdev = pdev;
811 out->id = OMAP_DSS_OUTPUT_VENC;
812 out->type = OMAP_DISPLAY_TYPE_VENC;
813
814 dss_register_output(out);
815}
816
817static void __exit venc_uninit_output(struct platform_device *pdev)
818{
819 struct omap_dss_output *out = &venc.output;
820
821 dss_unregister_output(out);
857} 822}
858 823
859/* VENC HW IP initialisation */ 824/* VENC HW IP initialisation */
@@ -897,17 +862,19 @@ static int __init omap_venchw_probe(struct platform_device *pdev)
897 862
898 venc_runtime_put(); 863 venc_runtime_put();
899 864
900 r = omap_dss_register_driver(&venc_driver); 865 r = venc_panel_init();
901 if (r) 866 if (r)
902 goto err_reg_panel_driver; 867 goto err_panel_init;
903 868
904 dss_debugfs_create_file("venc", venc_dump_regs); 869 dss_debugfs_create_file("venc", venc_dump_regs);
905 870
871 venc_init_output(pdev);
872
906 venc_probe_pdata(pdev); 873 venc_probe_pdata(pdev);
907 874
908 return 0; 875 return 0;
909 876
910err_reg_panel_driver: 877err_panel_init:
911err_runtime_get: 878err_runtime_get:
912 pm_runtime_disable(&pdev->dev); 879 pm_runtime_disable(&pdev->dev);
913 venc_put_clocks(); 880 venc_put_clocks();
@@ -916,14 +883,16 @@ err_runtime_get:
916 883
917static int __exit omap_venchw_remove(struct platform_device *pdev) 884static int __exit omap_venchw_remove(struct platform_device *pdev)
918{ 885{
919 omap_dss_unregister_child_devices(&pdev->dev); 886 dss_unregister_child_devices(&pdev->dev);
920 887
921 if (venc.vdda_dac_reg != NULL) { 888 if (venc.vdda_dac_reg != NULL) {
922 regulator_put(venc.vdda_dac_reg); 889 regulator_put(venc.vdda_dac_reg);
923 venc.vdda_dac_reg = NULL; 890 venc.vdda_dac_reg = NULL;
924 } 891 }
925 892
926 omap_dss_unregister_driver(&venc_driver); 893 venc_panel_exit();
894
895 venc_uninit_output(pdev);
927 896
928 pm_runtime_disable(&pdev->dev); 897 pm_runtime_disable(&pdev->dev);
929 venc_put_clocks(); 898 venc_put_clocks();
@@ -971,16 +940,10 @@ static struct platform_driver omap_venchw_driver = {
971 940
972int __init venc_init_platform_driver(void) 941int __init venc_init_platform_driver(void)
973{ 942{
974 if (cpu_is_omap44xx())
975 return 0;
976
977 return platform_driver_probe(&omap_venchw_driver, omap_venchw_probe); 943 return platform_driver_probe(&omap_venchw_driver, omap_venchw_probe);
978} 944}
979 945
980void __exit venc_uninit_platform_driver(void) 946void __exit venc_uninit_platform_driver(void)
981{ 947{
982 if (cpu_is_omap44xx())
983 return;
984
985 platform_driver_unregister(&omap_venchw_driver); 948 platform_driver_unregister(&omap_venchw_driver);
986} 949}
diff --git a/drivers/video/omap2/dss/venc_panel.c b/drivers/video/omap2/dss/venc_panel.c
new file mode 100644
index 000000000000..d55b8784ecfd
--- /dev/null
+++ b/drivers/video/omap2/dss/venc_panel.c
@@ -0,0 +1,251 @@
1/*
2 * Copyright (C) 2009 Nokia Corporation
3 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
4 *
5 * VENC panel driver
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/kernel.h>
21#include <linux/err.h>
22#include <linux/io.h>
23#include <linux/mutex.h>
24#include <linux/module.h>
25
26#include <video/omapdss.h>
27
28#include "dss.h"
29
30static struct {
31 struct mutex lock;
32} venc_panel;
33
34static ssize_t display_output_type_show(struct device *dev,
35 struct device_attribute *attr, char *buf)
36{
37 struct omap_dss_device *dssdev = to_dss_device(dev);
38 const char *ret;
39
40 switch (dssdev->phy.venc.type) {
41 case OMAP_DSS_VENC_TYPE_COMPOSITE:
42 ret = "composite";
43 break;
44 case OMAP_DSS_VENC_TYPE_SVIDEO:
45 ret = "svideo";
46 break;
47 default:
48 return -EINVAL;
49 }
50
51 return snprintf(buf, PAGE_SIZE, "%s\n", ret);
52}
53
54static ssize_t display_output_type_store(struct device *dev,
55 struct device_attribute *attr, const char *buf, size_t size)
56{
57 struct omap_dss_device *dssdev = to_dss_device(dev);
58 enum omap_dss_venc_type new_type;
59
60 if (sysfs_streq("composite", buf))
61 new_type = OMAP_DSS_VENC_TYPE_COMPOSITE;
62 else if (sysfs_streq("svideo", buf))
63 new_type = OMAP_DSS_VENC_TYPE_SVIDEO;
64 else
65 return -EINVAL;
66
67 mutex_lock(&venc_panel.lock);
68
69 if (dssdev->phy.venc.type != new_type) {
70 dssdev->phy.venc.type = new_type;
71 omapdss_venc_set_type(dssdev, new_type);
72 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
73 omapdss_venc_display_disable(dssdev);
74 omapdss_venc_display_enable(dssdev);
75 }
76 }
77
78 mutex_unlock(&venc_panel.lock);
79
80 return size;
81}
82
83static DEVICE_ATTR(output_type, S_IRUGO | S_IWUSR,
84 display_output_type_show, display_output_type_store);
85
86static int venc_panel_probe(struct omap_dss_device *dssdev)
87{
88 /* set default timings to PAL */
89 const struct omap_video_timings default_timings = {
90 .x_res = 720,
91 .y_res = 574,
92 .pixel_clock = 13500,
93 .hsw = 64,
94 .hfp = 12,
95 .hbp = 68,
96 .vsw = 5,
97 .vfp = 5,
98 .vbp = 41,
99
100 .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
101 .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
102
103 .interlace = true,
104 };
105
106 mutex_init(&venc_panel.lock);
107
108 dssdev->panel.timings = default_timings;
109
110 return device_create_file(&dssdev->dev, &dev_attr_output_type);
111}
112
113static void venc_panel_remove(struct omap_dss_device *dssdev)
114{
115 device_remove_file(&dssdev->dev, &dev_attr_output_type);
116}
117
118static int venc_panel_enable(struct omap_dss_device *dssdev)
119{
120 int r;
121
122 dev_dbg(&dssdev->dev, "venc_panel_enable\n");
123
124 mutex_lock(&venc_panel.lock);
125
126 if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
127 r = -EINVAL;
128 goto err;
129 }
130
131 omapdss_venc_set_timings(dssdev, &dssdev->panel.timings);
132 omapdss_venc_set_type(dssdev, dssdev->phy.venc.type);
133 omapdss_venc_invert_vid_out_polarity(dssdev,
134 dssdev->phy.venc.invert_polarity);
135
136 r = omapdss_venc_display_enable(dssdev);
137 if (r)
138 goto err;
139
140 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
141
142 mutex_unlock(&venc_panel.lock);
143
144 return 0;
145err:
146 mutex_unlock(&venc_panel.lock);
147
148 return r;
149}
150
151static void venc_panel_disable(struct omap_dss_device *dssdev)
152{
153 dev_dbg(&dssdev->dev, "venc_panel_disable\n");
154
155 mutex_lock(&venc_panel.lock);
156
157 if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED)
158 goto end;
159
160 if (dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED) {
161 /* suspended is the same as disabled with venc */
162 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
163 goto end;
164 }
165
166 omapdss_venc_display_disable(dssdev);
167
168 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
169end:
170 mutex_unlock(&venc_panel.lock);
171}
172
173static int venc_panel_suspend(struct omap_dss_device *dssdev)
174{
175 venc_panel_disable(dssdev);
176 return 0;
177}
178
179static int venc_panel_resume(struct omap_dss_device *dssdev)
180{
181 return venc_panel_enable(dssdev);
182}
183
184static void venc_panel_set_timings(struct omap_dss_device *dssdev,
185 struct omap_video_timings *timings)
186{
187 dev_dbg(&dssdev->dev, "venc_panel_set_timings\n");
188
189 mutex_lock(&venc_panel.lock);
190
191 omapdss_venc_set_timings(dssdev, timings);
192 dssdev->panel.timings = *timings;
193
194 mutex_unlock(&venc_panel.lock);
195}
196
197static int venc_panel_check_timings(struct omap_dss_device *dssdev,
198 struct omap_video_timings *timings)
199{
200 dev_dbg(&dssdev->dev, "venc_panel_check_timings\n");
201
202 return omapdss_venc_check_timings(dssdev, timings);
203}
204
205static u32 venc_panel_get_wss(struct omap_dss_device *dssdev)
206{
207 dev_dbg(&dssdev->dev, "venc_panel_get_wss\n");
208
209 return omapdss_venc_get_wss(dssdev);
210}
211
212static int venc_panel_set_wss(struct omap_dss_device *dssdev, u32 wss)
213{
214 dev_dbg(&dssdev->dev, "venc_panel_set_wss\n");
215
216 return omapdss_venc_set_wss(dssdev, wss);
217}
218
219static struct omap_dss_driver venc_driver = {
220 .probe = venc_panel_probe,
221 .remove = venc_panel_remove,
222
223 .enable = venc_panel_enable,
224 .disable = venc_panel_disable,
225 .suspend = venc_panel_suspend,
226 .resume = venc_panel_resume,
227
228 .get_resolution = omapdss_default_get_resolution,
229 .get_recommended_bpp = omapdss_default_get_recommended_bpp,
230
231 .set_timings = venc_panel_set_timings,
232 .check_timings = venc_panel_check_timings,
233
234 .get_wss = venc_panel_get_wss,
235 .set_wss = venc_panel_set_wss,
236
237 .driver = {
238 .name = "venc",
239 .owner = THIS_MODULE,
240 },
241};
242
243int venc_panel_init(void)
244{
245 return omap_dss_register_driver(&venc_driver);
246}
247
248void venc_panel_exit(void)
249{
250 omap_dss_unregister_driver(&venc_driver);
251}
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index c6cf372d22c5..606b89f12351 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -599,6 +599,7 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
599 struct omapfb_info *ofbi = FB2OFB(fbi); 599 struct omapfb_info *ofbi = FB2OFB(fbi);
600 struct omapfb2_device *fbdev = ofbi->fbdev; 600 struct omapfb2_device *fbdev = ofbi->fbdev;
601 struct omap_dss_device *display = fb2display(fbi); 601 struct omap_dss_device *display = fb2display(fbi);
602 struct omap_overlay_manager *mgr;
602 603
603 union { 604 union {
604 struct omapfb_update_window_old uwnd_o; 605 struct omapfb_update_window_old uwnd_o;
@@ -786,12 +787,14 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
786 787
787 case OMAPFB_WAITFORVSYNC: 788 case OMAPFB_WAITFORVSYNC:
788 DBG("ioctl WAITFORVSYNC\n"); 789 DBG("ioctl WAITFORVSYNC\n");
789 if (!display) { 790 if (!display && !display->output && !display->output->manager) {
790 r = -EINVAL; 791 r = -EINVAL;
791 break; 792 break;
792 } 793 }
793 794
794 r = display->manager->wait_for_vsync(display->manager); 795 mgr = display->output->manager;
796
797 r = mgr->wait_for_vsync(mgr);
795 break; 798 break;
796 799
797 case OMAPFB_WAITFORGO: 800 case OMAPFB_WAITFORGO:
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 15373f4aee19..16db1589bd91 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -1593,6 +1593,20 @@ static int omapfb_allocate_all_fbs(struct omapfb2_device *fbdev)
1593 return 0; 1593 return 0;
1594} 1594}
1595 1595
1596static void omapfb_clear_fb(struct fb_info *fbi)
1597{
1598 const struct fb_fillrect rect = {
1599 .dx = 0,
1600 .dy = 0,
1601 .width = fbi->var.xres_virtual,
1602 .height = fbi->var.yres_virtual,
1603 .color = 0,
1604 .rop = ROP_COPY,
1605 };
1606
1607 cfb_fillrect(fbi, &rect);
1608}
1609
1596int omapfb_realloc_fbmem(struct fb_info *fbi, unsigned long size, int type) 1610int omapfb_realloc_fbmem(struct fb_info *fbi, unsigned long size, int type)
1597{ 1611{
1598 struct omapfb_info *ofbi = FB2OFB(fbi); 1612 struct omapfb_info *ofbi = FB2OFB(fbi);
@@ -1662,6 +1676,8 @@ int omapfb_realloc_fbmem(struct fb_info *fbi, unsigned long size, int type)
1662 goto err; 1676 goto err;
1663 } 1677 }
1664 1678
1679 omapfb_clear_fb(fbi);
1680
1665 return 0; 1681 return 0;
1666err: 1682err:
1667 omapfb_free_fbmem(fbi); 1683 omapfb_free_fbmem(fbi);
@@ -1946,6 +1962,16 @@ static int omapfb_create_framebuffers(struct omapfb2_device *fbdev)
1946 } 1962 }
1947 } 1963 }
1948 1964
1965 for (i = 0; i < fbdev->num_fbs; i++) {
1966 struct fb_info *fbi = fbdev->fbs[i];
1967 struct omapfb_info *ofbi = FB2OFB(fbi);
1968
1969 if (ofbi->region->size == 0)
1970 continue;
1971
1972 omapfb_clear_fb(fbi);
1973 }
1974
1949 DBG("fb_infos initialized\n"); 1975 DBG("fb_infos initialized\n");
1950 1976
1951 for (i = 0; i < fbdev->num_fbs; i++) { 1977 for (i = 0; i < fbdev->num_fbs; i++) {
@@ -2354,6 +2380,7 @@ static int __init omapfb_probe(struct platform_device *pdev)
2354 struct omap_overlay *ovl; 2380 struct omap_overlay *ovl;
2355 struct omap_dss_device *def_display; 2381 struct omap_dss_device *def_display;
2356 struct omap_dss_device *dssdev; 2382 struct omap_dss_device *dssdev;
2383 struct omap_dss_device *ovl_device;
2357 2384
2358 DBG("omapfb_probe\n"); 2385 DBG("omapfb_probe\n");
2359 2386
@@ -2427,8 +2454,9 @@ static int __init omapfb_probe(struct platform_device *pdev)
2427 /* gfx overlay should be the default one. find a display 2454 /* gfx overlay should be the default one. find a display
2428 * connected to that, and use it as default display */ 2455 * connected to that, and use it as default display */
2429 ovl = omap_dss_get_overlay(0); 2456 ovl = omap_dss_get_overlay(0);
2430 if (ovl->manager && ovl->manager->device) { 2457 ovl_device = ovl->get_device(ovl);
2431 def_display = ovl->manager->device; 2458 if (ovl_device) {
2459 def_display = ovl_device;
2432 } else { 2460 } else {
2433 dev_warn(&pdev->dev, "cannot find default display\n"); 2461 dev_warn(&pdev->dev, "cannot find default display\n");
2434 def_display = NULL; 2462 def_display = NULL;
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
index 30361a09aecd..5ced9b334d35 100644
--- a/drivers/video/omap2/omapfb/omapfb.h
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -148,8 +148,9 @@ static inline struct omap_dss_device *fb2display(struct fb_info *fbi)
148 148
149 /* XXX: returns the display connected to first attached overlay */ 149 /* XXX: returns the display connected to first attached overlay */
150 for (i = 0; i < ofbi->num_overlays; i++) { 150 for (i = 0; i < ofbi->num_overlays; i++) {
151 if (ofbi->overlays[i]->manager) 151 struct omap_overlay *ovl = ofbi->overlays[i];
152 return ofbi->overlays[i]->manager->device; 152
153 return ovl->get_device(ovl);
153 } 154 }
154 155
155 return NULL; 156 return NULL;
diff --git a/drivers/video/omap2/vram.c b/drivers/video/omap2/vram.c
index 87e421e25afe..f2b15c4a75bc 100644
--- a/drivers/video/omap2/vram.c
+++ b/drivers/video/omap2/vram.c
@@ -34,7 +34,6 @@
34#include <asm/setup.h> 34#include <asm/setup.h>
35 35
36#include <plat/vram.h> 36#include <plat/vram.h>
37#include <plat/dma.h>
38 37
39#ifdef DEBUG 38#ifdef DEBUG
40#define DBG(format, ...) pr_debug("VRAM: " format, ## __VA_ARGS__) 39#define DBG(format, ...) pr_debug("VRAM: " format, ## __VA_ARGS__)
@@ -250,59 +249,6 @@ int omap_vram_reserve(unsigned long paddr, size_t size)
250} 249}
251EXPORT_SYMBOL(omap_vram_reserve); 250EXPORT_SYMBOL(omap_vram_reserve);
252 251
253static void _omap_vram_dma_cb(int lch, u16 ch_status, void *data)
254{
255 struct completion *compl = data;
256 complete(compl);
257}
258
259static int _omap_vram_clear(u32 paddr, unsigned pages)
260{
261 struct completion compl;
262 unsigned elem_count;
263 unsigned frame_count;
264 int r;
265 int lch;
266
267 init_completion(&compl);
268
269 r = omap_request_dma(OMAP_DMA_NO_DEVICE, "VRAM DMA",
270 _omap_vram_dma_cb,
271 &compl, &lch);
272 if (r) {
273 pr_err("VRAM: request_dma failed for memory clear\n");
274 return -EBUSY;
275 }
276
277 elem_count = pages * PAGE_SIZE / 4;
278 frame_count = 1;
279
280 omap_set_dma_transfer_params(lch, OMAP_DMA_DATA_TYPE_S32,
281 elem_count, frame_count,
282 OMAP_DMA_SYNC_ELEMENT,
283 0, 0);
284
285 omap_set_dma_dest_params(lch, 0, OMAP_DMA_AMODE_POST_INC,
286 paddr, 0, 0);
287
288 omap_set_dma_color_mode(lch, OMAP_DMA_CONSTANT_FILL, 0x000000);
289
290 omap_start_dma(lch);
291
292 if (wait_for_completion_timeout(&compl, msecs_to_jiffies(1000)) == 0) {
293 omap_stop_dma(lch);
294 pr_err("VRAM: dma timeout while clearing memory\n");
295 r = -EIO;
296 goto err;
297 }
298
299 r = 0;
300err:
301 omap_free_dma(lch);
302
303 return r;
304}
305
306static int _omap_vram_alloc(unsigned pages, unsigned long *paddr) 252static int _omap_vram_alloc(unsigned pages, unsigned long *paddr)
307{ 253{
308 struct vram_region *rm; 254 struct vram_region *rm;
@@ -337,8 +283,6 @@ found:
337 283
338 *paddr = start; 284 *paddr = start;
339 285
340 _omap_vram_clear(start, pages);
341
342 return 0; 286 return 0;
343 } 287 }
344 288
diff --git a/drivers/video/pnx4008/Makefile b/drivers/video/pnx4008/Makefile
deleted file mode 100644
index 636aaccf01fd..000000000000
--- a/drivers/video/pnx4008/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
1#
2# Makefile for the new PNX4008 framebuffer device driver
3#
4
5obj-$(CONFIG_FB_PNX4008_DUM) += sdum.o
6obj-$(CONFIG_FB_PNX4008_DUM_RGB) += pnxrgbfb.o
7
diff --git a/drivers/video/pnx4008/dum.h b/drivers/video/pnx4008/dum.h
deleted file mode 100644
index 1234d4375d92..000000000000
--- a/drivers/video/pnx4008/dum.h
+++ /dev/null
@@ -1,211 +0,0 @@
1/*
2 * linux/drivers/video/pnx4008/dum.h
3 *
4 * Internal header for SDUM
5 *
6 * 2005 (c) Koninklijke Philips N.V. This file is licensed under
7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied.
10 */
11
12#ifndef __PNX008_DUM_H__
13#define __PNX008_DUM_H__
14
15#include <mach/platform.h>
16
17#define PNX4008_DUMCONF_VA_BASE IO_ADDRESS(PNX4008_DUMCONF_BASE)
18#define PNX4008_DUM_MAIN_VA_BASE IO_ADDRESS(PNX4008_DUM_MAINCFG_BASE)
19
20/* DUM CFG ADDRESSES */
21#define DUM_CH_BASE_ADR (PNX4008_DUMCONF_VA_BASE + 0x00)
22#define DUM_CH_MIN_ADR (PNX4008_DUMCONF_VA_BASE + 0x00)
23#define DUM_CH_MAX_ADR (PNX4008_DUMCONF_VA_BASE + 0x04)
24#define DUM_CH_CONF_ADR (PNX4008_DUMCONF_VA_BASE + 0x08)
25#define DUM_CH_STAT_ADR (PNX4008_DUMCONF_VA_BASE + 0x0C)
26#define DUM_CH_CTRL_ADR (PNX4008_DUMCONF_VA_BASE + 0x10)
27
28#define CH_MARG (0x100 / sizeof(u32))
29#define DUM_CH_MIN(i) (*((volatile u32 *)DUM_CH_MIN_ADR + (i) * CH_MARG))
30#define DUM_CH_MAX(i) (*((volatile u32 *)DUM_CH_MAX_ADR + (i) * CH_MARG))
31#define DUM_CH_CONF(i) (*((volatile u32 *)DUM_CH_CONF_ADR + (i) * CH_MARG))
32#define DUM_CH_STAT(i) (*((volatile u32 *)DUM_CH_STAT_ADR + (i) * CH_MARG))
33#define DUM_CH_CTRL(i) (*((volatile u32 *)DUM_CH_CTRL_ADR + (i) * CH_MARG))
34
35#define DUM_CONF_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x00)
36#define DUM_CTRL_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x04)
37#define DUM_STAT_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x08)
38#define DUM_DECODE_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x0C)
39#define DUM_COM_BASE_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x10)
40#define DUM_SYNC_C_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x14)
41#define DUM_CLK_DIV_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x18)
42#define DUM_DIRTY_LOW_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x20)
43#define DUM_DIRTY_HIGH_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x24)
44#define DUM_FORMAT_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x28)
45#define DUM_WTCFG1_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x30)
46#define DUM_RTCFG1_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x34)
47#define DUM_WTCFG2_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x38)
48#define DUM_RTCFG2_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x3C)
49#define DUM_TCFG_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x40)
50#define DUM_OUTP_FORMAT1_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x44)
51#define DUM_OUTP_FORMAT2_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x48)
52#define DUM_SYNC_MODE_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x4C)
53#define DUM_SYNC_OUT_C_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x50)
54
55#define DUM_CONF (*(volatile u32 *)(DUM_CONF_ADR))
56#define DUM_CTRL (*(volatile u32 *)(DUM_CTRL_ADR))
57#define DUM_STAT (*(volatile u32 *)(DUM_STAT_ADR))
58#define DUM_DECODE (*(volatile u32 *)(DUM_DECODE_ADR))
59#define DUM_COM_BASE (*(volatile u32 *)(DUM_COM_BASE_ADR))
60#define DUM_SYNC_C (*(volatile u32 *)(DUM_SYNC_C_ADR))
61#define DUM_CLK_DIV (*(volatile u32 *)(DUM_CLK_DIV_ADR))
62#define DUM_DIRTY_LOW (*(volatile u32 *)(DUM_DIRTY_LOW_ADR))
63#define DUM_DIRTY_HIGH (*(volatile u32 *)(DUM_DIRTY_HIGH_ADR))
64#define DUM_FORMAT (*(volatile u32 *)(DUM_FORMAT_ADR))
65#define DUM_WTCFG1 (*(volatile u32 *)(DUM_WTCFG1_ADR))
66#define DUM_RTCFG1 (*(volatile u32 *)(DUM_RTCFG1_ADR))
67#define DUM_WTCFG2 (*(volatile u32 *)(DUM_WTCFG2_ADR))
68#define DUM_RTCFG2 (*(volatile u32 *)(DUM_RTCFG2_ADR))
69#define DUM_TCFG (*(volatile u32 *)(DUM_TCFG_ADR))
70#define DUM_OUTP_FORMAT1 (*(volatile u32 *)(DUM_OUTP_FORMAT1_ADR))
71#define DUM_OUTP_FORMAT2 (*(volatile u32 *)(DUM_OUTP_FORMAT2_ADR))
72#define DUM_SYNC_MODE (*(volatile u32 *)(DUM_SYNC_MODE_ADR))
73#define DUM_SYNC_OUT_C (*(volatile u32 *)(DUM_SYNC_OUT_C_ADR))
74
75/* DUM SLAVE ADDRESSES */
76#define DUM_SLAVE_WRITE_ADR (PNX4008_DUM_MAINCFG_BASE + 0x0000000)
77#define DUM_SLAVE_READ1_I_ADR (PNX4008_DUM_MAINCFG_BASE + 0x1000000)
78#define DUM_SLAVE_READ1_R_ADR (PNX4008_DUM_MAINCFG_BASE + 0x1000004)
79#define DUM_SLAVE_READ2_I_ADR (PNX4008_DUM_MAINCFG_BASE + 0x1000008)
80#define DUM_SLAVE_READ2_R_ADR (PNX4008_DUM_MAINCFG_BASE + 0x100000C)
81
82#define DUM_SLAVE_WRITE_W ((volatile u32 *)(DUM_SLAVE_WRITE_ADR))
83#define DUM_SLAVE_WRITE_HW ((volatile u16 *)(DUM_SLAVE_WRITE_ADR))
84#define DUM_SLAVE_READ1_I ((volatile u8 *)(DUM_SLAVE_READ1_I_ADR))
85#define DUM_SLAVE_READ1_R ((volatile u16 *)(DUM_SLAVE_READ1_R_ADR))
86#define DUM_SLAVE_READ2_I ((volatile u8 *)(DUM_SLAVE_READ2_I_ADR))
87#define DUM_SLAVE_READ2_R ((volatile u16 *)(DUM_SLAVE_READ2_R_ADR))
88
89/* Sony display register addresses */
90#define DISP_0_REG (0x00)
91#define DISP_1_REG (0x01)
92#define DISP_CAL_REG (0x20)
93#define DISP_ID_REG (0x2A)
94#define DISP_XMIN_L_REG (0x30)
95#define DISP_XMIN_H_REG (0x31)
96#define DISP_YMIN_REG (0x32)
97#define DISP_XMAX_L_REG (0x34)
98#define DISP_XMAX_H_REG (0x35)
99#define DISP_YMAX_REG (0x36)
100#define DISP_SYNC_EN_REG (0x38)
101#define DISP_SYNC_RISE_L_REG (0x3C)
102#define DISP_SYNC_RISE_H_REG (0x3D)
103#define DISP_SYNC_FALL_L_REG (0x3E)
104#define DISP_SYNC_FALL_H_REG (0x3F)
105#define DISP_PIXEL_REG (0x0B)
106#define DISP_DUMMY1_REG (0x28)
107#define DISP_DUMMY2_REG (0x29)
108#define DISP_TIMING_REG (0x98)
109#define DISP_DUMP_REG (0x99)
110
111/* Sony display constants */
112#define SONY_ID1 (0x22)
113#define SONY_ID2 (0x23)
114
115/* Philips display register addresses */
116#define PH_DISP_ORIENT_REG (0x003)
117#define PH_DISP_YPOINT_REG (0x200)
118#define PH_DISP_XPOINT_REG (0x201)
119#define PH_DISP_PIXEL_REG (0x202)
120#define PH_DISP_YMIN_REG (0x406)
121#define PH_DISP_YMAX_REG (0x407)
122#define PH_DISP_XMIN_REG (0x408)
123#define PH_DISP_XMAX_REG (0x409)
124
125/* Misc constants */
126#define NO_VALID_DISPLAY_FOUND (0)
127#define DISPLAY2_IS_NOT_CONNECTED (0)
128
129/* register values */
130#define V_BAC_ENABLE (BIT(0))
131#define V_BAC_DISABLE_IDLE (BIT(1))
132#define V_BAC_DISABLE_TRIG (BIT(2))
133#define V_DUM_RESET (BIT(3))
134#define V_MUX_RESET (BIT(4))
135#define BAC_ENABLED (BIT(0))
136#define BAC_DISABLED 0
137
138/* Sony LCD commands */
139#define V_LCD_STANDBY_OFF ((BIT(25)) | (0 << 16) | DISP_0_REG)
140#define V_LCD_USE_9BIT_BUS ((BIT(25)) | (2 << 16) | DISP_1_REG)
141#define V_LCD_SYNC_RISE_L ((BIT(25)) | (0 << 16) | DISP_SYNC_RISE_L_REG)
142#define V_LCD_SYNC_RISE_H ((BIT(25)) | (0 << 16) | DISP_SYNC_RISE_H_REG)
143#define V_LCD_SYNC_FALL_L ((BIT(25)) | (160 << 16) | DISP_SYNC_FALL_L_REG)
144#define V_LCD_SYNC_FALL_H ((BIT(25)) | (0 << 16) | DISP_SYNC_FALL_H_REG)
145#define V_LCD_SYNC_ENABLE ((BIT(25)) | (128 << 16) | DISP_SYNC_EN_REG)
146#define V_LCD_DISPLAY_ON ((BIT(25)) | (64 << 16) | DISP_0_REG)
147
148enum {
149 PAD_NONE,
150 PAD_512,
151 PAD_1024
152};
153
154enum {
155 RGB888,
156 RGB666,
157 RGB565,
158 BGR565,
159 ARGB1555,
160 ABGR1555,
161 ARGB4444,
162 ABGR4444
163};
164
165struct dum_setup {
166 int sync_neg_edge;
167 int round_robin;
168 int mux_int;
169 int synced_dirty_flag_int;
170 int dirty_flag_int;
171 int error_int;
172 int pf_empty_int;
173 int sf_empty_int;
174 int bac_dis_int;
175 u32 dirty_base_adr;
176 u32 command_base_adr;
177 u32 sync_clk_div;
178 int sync_output;
179 u32 sync_restart_val;
180 u32 set_sync_high;
181 u32 set_sync_low;
182};
183
184struct dum_ch_setup {
185 int disp_no;
186 u32 xmin;
187 u32 ymin;
188 u32 xmax;
189 u32 ymax;
190 int xmirror;
191 int ymirror;
192 int rotate;
193 u32 minadr;
194 u32 maxadr;
195 u32 dirtybuffer;
196 int pad;
197 int format;
198 int hwdirty;
199 int slave_trans;
200};
201
202struct disp_window {
203 u32 xmin_l;
204 u32 xmin_h;
205 u32 ymin;
206 u32 xmax_l;
207 u32 xmax_h;
208 u32 ymax;
209};
210
211#endif /* #ifndef __PNX008_DUM_H__ */
diff --git a/drivers/video/pnx4008/fbcommon.h b/drivers/video/pnx4008/fbcommon.h
deleted file mode 100644
index 4ebc87dafafb..000000000000
--- a/drivers/video/pnx4008/fbcommon.h
+++ /dev/null
@@ -1,43 +0,0 @@
1/*
2 * Copyright (C) 2005 Philips Semiconductors
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA, or http://www.gnu.org/licenses/gpl.html
18*/
19
20#define QCIF_W (176)
21#define QCIF_H (144)
22
23#define CIF_W (352)
24#define CIF_H (288)
25
26#define LCD_X_RES 208
27#define LCD_Y_RES 320
28#define LCD_X_PAD 256
29#define LCD_BBP 4 /* Bytes Per Pixel */
30
31#define DISP_MAX_X_SIZE (320)
32#define DISP_MAX_Y_SIZE (208)
33
34#define RETURNVAL_BASE (0x400)
35
36enum fb_ioctl_returntype {
37 ENORESOURCESLEFT = RETURNVAL_BASE,
38 ERESOURCESNOTFREED,
39 EPROCNOTOWNER,
40 EFBNOTOWNER,
41 ECOPYFAILED,
42 EIOREMAPFAILED,
43};
diff --git a/drivers/video/pnx4008/pnxrgbfb.c b/drivers/video/pnx4008/pnxrgbfb.c
deleted file mode 100644
index 6d30428e9cf9..000000000000
--- a/drivers/video/pnx4008/pnxrgbfb.c
+++ /dev/null
@@ -1,198 +0,0 @@
1/*
2 * drivers/video/pnx4008/pnxrgbfb.c
3 *
4 * PNX4008's framebuffer support
5 *
6 * Author: Grigory Tolstolytkin <gtolstolytkin@ru.mvista.com>
7 * Based on Philips Semiconductors's code
8 *
9 * Copyrght (c) 2005 MontaVista Software, Inc.
10 * Copyright (c) 2005 Philips Semiconductors
11 * This file is licensed under the terms of the GNU General Public License
12 * version 2. This program is licensed "as is" without any warranty of any
13 * kind, whether express or implied.
14 */
15
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/mm.h>
21#include <linux/vmalloc.h>
22#include <linux/delay.h>
23#include <linux/interrupt.h>
24#include <linux/fb.h>
25#include <linux/init.h>
26#include <linux/platform_device.h>
27
28#include "sdum.h"
29#include "fbcommon.h"
30
31static u32 colreg[16];
32
33static struct fb_var_screeninfo rgbfb_var __initdata = {
34 .xres = LCD_X_RES,
35 .yres = LCD_Y_RES,
36 .xres_virtual = LCD_X_RES,
37 .yres_virtual = LCD_Y_RES,
38 .bits_per_pixel = 32,
39 .red.offset = 16,
40 .red.length = 8,
41 .green.offset = 8,
42 .green.length = 8,
43 .blue.offset = 0,
44 .blue.length = 8,
45 .left_margin = 0,
46 .right_margin = 0,
47 .upper_margin = 0,
48 .lower_margin = 0,
49 .vmode = FB_VMODE_NONINTERLACED,
50};
51static struct fb_fix_screeninfo rgbfb_fix __initdata = {
52 .id = "RGBFB",
53 .line_length = LCD_X_RES * LCD_BBP,
54 .type = FB_TYPE_PACKED_PIXELS,
55 .visual = FB_VISUAL_TRUECOLOR,
56 .xpanstep = 0,
57 .ypanstep = 0,
58 .ywrapstep = 0,
59 .accel = FB_ACCEL_NONE,
60};
61
62static int channel_owned;
63
64static int no_cursor(struct fb_info *info, struct fb_cursor *cursor)
65{
66 return 0;
67}
68
69static int rgbfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
70 u_int transp, struct fb_info *info)
71{
72 if (regno > 15)
73 return 1;
74
75 colreg[regno] = ((red & 0xff00) << 8) | (green & 0xff00) |
76 ((blue & 0xff00) >> 8);
77 return 0;
78}
79
80static int rgbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
81{
82 return pnx4008_sdum_mmap(info, vma, NULL);
83}
84
85static struct fb_ops rgbfb_ops = {
86 .fb_mmap = rgbfb_mmap,
87 .fb_setcolreg = rgbfb_setcolreg,
88 .fb_fillrect = cfb_fillrect,
89 .fb_copyarea = cfb_copyarea,
90 .fb_imageblit = cfb_imageblit,
91};
92
93static int rgbfb_remove(struct platform_device *pdev)
94{
95 struct fb_info *info = platform_get_drvdata(pdev);
96
97 if (info) {
98 unregister_framebuffer(info);
99 fb_dealloc_cmap(&info->cmap);
100 framebuffer_release(info);
101 platform_set_drvdata(pdev, NULL);
102 }
103
104 pnx4008_free_dum_channel(channel_owned, pdev->id);
105 pnx4008_set_dum_exit_notification(pdev->id);
106
107 return 0;
108}
109
110static int __devinit rgbfb_probe(struct platform_device *pdev)
111{
112 struct fb_info *info;
113 struct dumchannel_uf chan_uf;
114 int ret;
115 char *option;
116
117 info = framebuffer_alloc(sizeof(u32) * 16, &pdev->dev);
118 if (!info) {
119 ret = -ENOMEM;
120 goto err;
121 }
122
123 pnx4008_get_fb_addresses(FB_TYPE_RGB, (void **)&info->screen_base,
124 (dma_addr_t *) &rgbfb_fix.smem_start,
125 &rgbfb_fix.smem_len);
126
127 if ((ret = pnx4008_alloc_dum_channel(pdev->id)) < 0)
128 goto err0;
129 else {
130 channel_owned = ret;
131 chan_uf.channelnr = channel_owned;
132 chan_uf.dirty = (u32 *) NULL;
133 chan_uf.source = (u32 *) rgbfb_fix.smem_start;
134 chan_uf.x_offset = 0;
135 chan_uf.y_offset = 0;
136 chan_uf.width = LCD_X_RES;
137 chan_uf.height = LCD_Y_RES;
138
139 if ((ret = pnx4008_put_dum_channel_uf(chan_uf, pdev->id))< 0)
140 goto err1;
141
142 if ((ret =
143 pnx4008_set_dum_channel_sync(channel_owned, CONF_SYNC_ON,
144 pdev->id)) < 0)
145 goto err1;
146
147 if ((ret =
148 pnx4008_set_dum_channel_dirty_detect(channel_owned,
149 CONF_DIRTYDETECTION_ON,
150 pdev->id)) < 0)
151 goto err1;
152 }
153
154 if (!fb_get_options("pnxrgbfb", &option) && option &&
155 !strcmp(option, "nocursor"))
156 rgbfb_ops.fb_cursor = no_cursor;
157
158 info->node = -1;
159 info->flags = FBINFO_FLAG_DEFAULT;
160 info->fbops = &rgbfb_ops;
161 info->fix = rgbfb_fix;
162 info->var = rgbfb_var;
163 info->screen_size = rgbfb_fix.smem_len;
164 info->pseudo_palette = info->par;
165 info->par = NULL;
166
167 ret = fb_alloc_cmap(&info->cmap, 256, 0);
168 if (ret < 0)
169 goto err1;
170
171 ret = register_framebuffer(info);
172 if (ret < 0)
173 goto err2;
174 platform_set_drvdata(pdev, info);
175
176 return 0;
177
178err2:
179 fb_dealloc_cmap(&info->cmap);
180err1:
181 pnx4008_free_dum_channel(channel_owned, pdev->id);
182err0:
183 framebuffer_release(info);
184err:
185 return ret;
186}
187
188static struct platform_driver rgbfb_driver = {
189 .driver = {
190 .name = "pnx4008-rgbfb",
191 },
192 .probe = rgbfb_probe,
193 .remove = rgbfb_remove,
194};
195
196module_platform_driver(rgbfb_driver);
197
198MODULE_LICENSE("GPL");
diff --git a/drivers/video/pnx4008/sdum.c b/drivers/video/pnx4008/sdum.c
deleted file mode 100644
index c5c741452cac..000000000000
--- a/drivers/video/pnx4008/sdum.c
+++ /dev/null
@@ -1,861 +0,0 @@
1/*
2 * drivers/video/pnx4008/sdum.c
3 *
4 * Display Update Master support
5 *
6 * Authors: Grigory Tolstolytkin <gtolstolytkin@ru.mvista.com>
7 * Vitaly Wool <vitalywool@gmail.com>
8 * Based on Philips Semiconductors's code
9 *
10 * Copyrght (c) 2005-2006 MontaVista Software, Inc.
11 * Copyright (c) 2005 Philips Semiconductors
12 * This file is licensed under the terms of the GNU General Public License
13 * version 2. This program is licensed "as is" without any warranty of any
14 * kind, whether express or implied.
15 */
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/errno.h>
20#include <linux/string.h>
21#include <linux/mm.h>
22#include <linux/tty.h>
23#include <linux/vmalloc.h>
24#include <linux/delay.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/fb.h>
28#include <linux/init.h>
29#include <linux/dma-mapping.h>
30#include <linux/clk.h>
31#include <linux/gfp.h>
32#include <asm/uaccess.h>
33#include <asm/gpio.h>
34
35#include "sdum.h"
36#include "fbcommon.h"
37#include "dum.h"
38
39/* Framebuffers we have */
40
41static struct pnx4008_fb_addr {
42 int fb_type;
43 long addr_offset;
44 long fb_length;
45} fb_addr[] = {
46 [0] = {
47 FB_TYPE_YUV, 0, 0xB0000
48 },
49 [1] = {
50 FB_TYPE_RGB, 0xB0000, 0x50000
51 },
52};
53
54static struct dum_data {
55 u32 lcd_phys_start;
56 u32 lcd_virt_start;
57 u32 slave_phys_base;
58 u32 *slave_virt_base;
59 int fb_owning_channel[MAX_DUM_CHANNELS];
60 struct dumchannel_uf chan_uf_store[MAX_DUM_CHANNELS];
61} dum_data;
62
63/* Different local helper functions */
64
65static u32 nof_pixels_dx(struct dum_ch_setup *ch_setup)
66{
67 return (ch_setup->xmax - ch_setup->xmin + 1);
68}
69
70static u32 nof_pixels_dy(struct dum_ch_setup *ch_setup)
71{
72 return (ch_setup->ymax - ch_setup->ymin + 1);
73}
74
75static u32 nof_pixels_dxy(struct dum_ch_setup *ch_setup)
76{
77 return (nof_pixels_dx(ch_setup) * nof_pixels_dy(ch_setup));
78}
79
80static u32 nof_bytes(struct dum_ch_setup *ch_setup)
81{
82 u32 r = nof_pixels_dxy(ch_setup);
83 switch (ch_setup->format) {
84 case RGB888:
85 case RGB666:
86 r *= 4;
87 break;
88
89 default:
90 r *= 2;
91 break;
92 }
93 return r;
94}
95
96static u32 build_command(int disp_no, u32 reg, u32 val)
97{
98 return ((disp_no << 26) | BIT(25) | (val << 16) | (disp_no << 10) |
99 (reg << 0));
100}
101
102static u32 build_double_index(int disp_no, u32 val)
103{
104 return ((disp_no << 26) | (val << 16) | (disp_no << 10) | (val << 0));
105}
106
107static void build_disp_window(struct dum_ch_setup * ch_setup, struct disp_window * dw)
108{
109 dw->ymin = ch_setup->ymin;
110 dw->ymax = ch_setup->ymax;
111 dw->xmin_l = ch_setup->xmin & 0xFF;
112 dw->xmin_h = (ch_setup->xmin & BIT(8)) >> 8;
113 dw->xmax_l = ch_setup->xmax & 0xFF;
114 dw->xmax_h = (ch_setup->xmax & BIT(8)) >> 8;
115}
116
117static int put_channel(struct dumchannel chan)
118{
119 int i = chan.channelnr;
120
121 if (i < 0 || i > MAX_DUM_CHANNELS)
122 return -EINVAL;
123 else {
124 DUM_CH_MIN(i) = chan.dum_ch_min;
125 DUM_CH_MAX(i) = chan.dum_ch_max;
126 DUM_CH_CONF(i) = chan.dum_ch_conf;
127 DUM_CH_CTRL(i) = chan.dum_ch_ctrl;
128 }
129
130 return 0;
131}
132
133static void clear_channel(int channr)
134{
135 struct dumchannel chan;
136
137 chan.channelnr = channr;
138 chan.dum_ch_min = 0;
139 chan.dum_ch_max = 0;
140 chan.dum_ch_conf = 0;
141 chan.dum_ch_ctrl = 0;
142
143 put_channel(chan);
144}
145
146static int put_cmd_string(struct cmdstring cmds)
147{
148 u16 *cmd_str_virtaddr;
149 u32 *cmd_ptr0_virtaddr;
150 u32 cmd_str_physaddr;
151
152 int i = cmds.channelnr;
153
154 if (i < 0 || i > MAX_DUM_CHANNELS)
155 return -EINVAL;
156 else if ((cmd_ptr0_virtaddr =
157 (int *)ioremap_nocache(DUM_COM_BASE,
158 sizeof(int) * MAX_DUM_CHANNELS)) ==
159 NULL)
160 return -EIOREMAPFAILED;
161 else {
162 cmd_str_physaddr = ioread32(&cmd_ptr0_virtaddr[cmds.channelnr]);
163 if ((cmd_str_virtaddr =
164 (u16 *) ioremap_nocache(cmd_str_physaddr,
165 sizeof(cmds))) == NULL) {
166 iounmap(cmd_ptr0_virtaddr);
167 return -EIOREMAPFAILED;
168 } else {
169 int t;
170 for (t = 0; t < 8; t++)
171 iowrite16(*((u16 *)&cmds.prestringlen + t),
172 cmd_str_virtaddr + t);
173
174 for (t = 0; t < cmds.prestringlen / 2; t++)
175 iowrite16(*((u16 *)&cmds.precmd + t),
176 cmd_str_virtaddr + t + 8);
177
178 for (t = 0; t < cmds.poststringlen / 2; t++)
179 iowrite16(*((u16 *)&cmds.postcmd + t),
180 cmd_str_virtaddr + t + 8 +
181 cmds.prestringlen / 2);
182
183 iounmap(cmd_ptr0_virtaddr);
184 iounmap(cmd_str_virtaddr);
185 }
186 }
187
188 return 0;
189}
190
191static u32 dum_ch_setup(int ch_no, struct dum_ch_setup * ch_setup)
192{
193 struct cmdstring cmds_c;
194 struct cmdstring *cmds = &cmds_c;
195 struct disp_window dw;
196 int standard;
197 u32 orientation = 0;
198 struct dumchannel chan = { 0 };
199 int ret;
200
201 if ((ch_setup->xmirror) || (ch_setup->ymirror) || (ch_setup->rotate)) {
202 standard = 0;
203
204 orientation = BIT(1); /* always set 9-bit-bus */
205 if (ch_setup->xmirror)
206 orientation |= BIT(4);
207 if (ch_setup->ymirror)
208 orientation |= BIT(3);
209 if (ch_setup->rotate)
210 orientation |= BIT(0);
211 } else
212 standard = 1;
213
214 cmds->channelnr = ch_no;
215
216 /* build command string header */
217 if (standard) {
218 cmds->prestringlen = 32;
219 cmds->poststringlen = 0;
220 } else {
221 cmds->prestringlen = 48;
222 cmds->poststringlen = 16;
223 }
224
225 cmds->format =
226 (u16) ((ch_setup->disp_no << 4) | (BIT(3)) | (ch_setup->format));
227 cmds->reserved = 0x0;
228 cmds->startaddr_low = (ch_setup->minadr & 0xFFFF);
229 cmds->startaddr_high = (ch_setup->minadr >> 16);
230
231 if ((ch_setup->minadr == 0) && (ch_setup->maxadr == 0)
232 && (ch_setup->xmin == 0)
233 && (ch_setup->ymin == 0) && (ch_setup->xmax == 0)
234 && (ch_setup->ymax == 0)) {
235 cmds->pixdatlen_low = 0;
236 cmds->pixdatlen_high = 0;
237 } else {
238 u32 nbytes = nof_bytes(ch_setup);
239 cmds->pixdatlen_low = (nbytes & 0xFFFF);
240 cmds->pixdatlen_high = (nbytes >> 16);
241 }
242
243 if (ch_setup->slave_trans)
244 cmds->pixdatlen_high |= BIT(15);
245
246 /* build pre-string */
247 build_disp_window(ch_setup, &dw);
248
249 if (standard) {
250 cmds->precmd[0] =
251 build_command(ch_setup->disp_no, DISP_XMIN_L_REG, 0x99);
252 cmds->precmd[1] =
253 build_command(ch_setup->disp_no, DISP_XMIN_L_REG,
254 dw.xmin_l);
255 cmds->precmd[2] =
256 build_command(ch_setup->disp_no, DISP_XMIN_H_REG,
257 dw.xmin_h);
258 cmds->precmd[3] =
259 build_command(ch_setup->disp_no, DISP_YMIN_REG, dw.ymin);
260 cmds->precmd[4] =
261 build_command(ch_setup->disp_no, DISP_XMAX_L_REG,
262 dw.xmax_l);
263 cmds->precmd[5] =
264 build_command(ch_setup->disp_no, DISP_XMAX_H_REG,
265 dw.xmax_h);
266 cmds->precmd[6] =
267 build_command(ch_setup->disp_no, DISP_YMAX_REG, dw.ymax);
268 cmds->precmd[7] =
269 build_double_index(ch_setup->disp_no, DISP_PIXEL_REG);
270 } else {
271 if (dw.xmin_l == ch_no)
272 cmds->precmd[0] =
273 build_command(ch_setup->disp_no, DISP_XMIN_L_REG,
274 0x99);
275 else
276 cmds->precmd[0] =
277 build_command(ch_setup->disp_no, DISP_XMIN_L_REG,
278 ch_no);
279
280 cmds->precmd[1] =
281 build_command(ch_setup->disp_no, DISP_XMIN_L_REG,
282 dw.xmin_l);
283 cmds->precmd[2] =
284 build_command(ch_setup->disp_no, DISP_XMIN_H_REG,
285 dw.xmin_h);
286 cmds->precmd[3] =
287 build_command(ch_setup->disp_no, DISP_YMIN_REG, dw.ymin);
288 cmds->precmd[4] =
289 build_command(ch_setup->disp_no, DISP_XMAX_L_REG,
290 dw.xmax_l);
291 cmds->precmd[5] =
292 build_command(ch_setup->disp_no, DISP_XMAX_H_REG,
293 dw.xmax_h);
294 cmds->precmd[6] =
295 build_command(ch_setup->disp_no, DISP_YMAX_REG, dw.ymax);
296 cmds->precmd[7] =
297 build_command(ch_setup->disp_no, DISP_1_REG, orientation);
298 cmds->precmd[8] =
299 build_double_index(ch_setup->disp_no, DISP_PIXEL_REG);
300 cmds->precmd[9] =
301 build_double_index(ch_setup->disp_no, DISP_PIXEL_REG);
302 cmds->precmd[0xA] =
303 build_double_index(ch_setup->disp_no, DISP_PIXEL_REG);
304 cmds->precmd[0xB] =
305 build_double_index(ch_setup->disp_no, DISP_PIXEL_REG);
306 cmds->postcmd[0] =
307 build_command(ch_setup->disp_no, DISP_1_REG, BIT(1));
308 cmds->postcmd[1] =
309 build_command(ch_setup->disp_no, DISP_DUMMY1_REG, 1);
310 cmds->postcmd[2] =
311 build_command(ch_setup->disp_no, DISP_DUMMY1_REG, 2);
312 cmds->postcmd[3] =
313 build_command(ch_setup->disp_no, DISP_DUMMY1_REG, 3);
314 }
315
316 if ((ret = put_cmd_string(cmds_c)) != 0) {
317 return ret;
318 }
319
320 chan.channelnr = cmds->channelnr;
321 chan.dum_ch_min = ch_setup->dirtybuffer + ch_setup->minadr;
322 chan.dum_ch_max = ch_setup->dirtybuffer + ch_setup->maxadr;
323 chan.dum_ch_conf = 0x002;
324 chan.dum_ch_ctrl = 0x04;
325
326 put_channel(chan);
327
328 return 0;
329}
330
331static u32 display_open(int ch_no, int auto_update, u32 * dirty_buffer,
332 u32 * frame_buffer, u32 xpos, u32 ypos, u32 w, u32 h)
333{
334
335 struct dum_ch_setup k;
336 int ret;
337
338 /* keep width & height within display area */
339 if ((xpos + w) > DISP_MAX_X_SIZE)
340 w = DISP_MAX_X_SIZE - xpos;
341
342 if ((ypos + h) > DISP_MAX_Y_SIZE)
343 h = DISP_MAX_Y_SIZE - ypos;
344
345 /* assume 1 display only */
346 k.disp_no = 0;
347 k.xmin = xpos;
348 k.ymin = ypos;
349 k.xmax = xpos + (w - 1);
350 k.ymax = ypos + (h - 1);
351
352 /* adjust min and max values if necessary */
353 if (k.xmin > DISP_MAX_X_SIZE - 1)
354 k.xmin = DISP_MAX_X_SIZE - 1;
355 if (k.ymin > DISP_MAX_Y_SIZE - 1)
356 k.ymin = DISP_MAX_Y_SIZE - 1;
357
358 if (k.xmax > DISP_MAX_X_SIZE - 1)
359 k.xmax = DISP_MAX_X_SIZE - 1;
360 if (k.ymax > DISP_MAX_Y_SIZE - 1)
361 k.ymax = DISP_MAX_Y_SIZE - 1;
362
363 k.xmirror = 0;
364 k.ymirror = 0;
365 k.rotate = 0;
366 k.minadr = (u32) frame_buffer;
367 k.maxadr = (u32) frame_buffer + (((w - 1) << 10) | ((h << 2) - 2));
368 k.pad = PAD_1024;
369 k.dirtybuffer = (u32) dirty_buffer;
370 k.format = RGB888;
371 k.hwdirty = 0;
372 k.slave_trans = 0;
373
374 ret = dum_ch_setup(ch_no, &k);
375
376 return ret;
377}
378
379static void lcd_reset(void)
380{
381 u32 *dum_pio_base = (u32 *)IO_ADDRESS(PNX4008_PIO_BASE);
382
383 udelay(1);
384 iowrite32(BIT(19), &dum_pio_base[2]);
385 udelay(1);
386 iowrite32(BIT(19), &dum_pio_base[1]);
387 udelay(1);
388}
389
390static int dum_init(struct platform_device *pdev)
391{
392 struct clk *clk;
393
394 /* enable DUM clock */
395 clk = clk_get(&pdev->dev, "dum_ck");
396 if (IS_ERR(clk)) {
397 printk(KERN_ERR "pnx4008_dum: Unable to access DUM clock\n");
398 return PTR_ERR(clk);
399 }
400
401 clk_set_rate(clk, 1);
402 clk_put(clk);
403
404 DUM_CTRL = V_DUM_RESET;
405
406 /* set priority to "round-robin". All other params to "false" */
407 DUM_CONF = BIT(9);
408
409 /* Display 1 */
410 DUM_WTCFG1 = PNX4008_DUM_WT_CFG;
411 DUM_RTCFG1 = PNX4008_DUM_RT_CFG;
412 DUM_TCFG = PNX4008_DUM_T_CFG;
413
414 return 0;
415}
416
417static void dum_chan_init(void)
418{
419 int i = 0, ch = 0;
420 u32 *cmdptrs;
421 u32 *cmdstrings;
422
423 DUM_COM_BASE =
424 CMDSTRING_BASEADDR + BYTES_PER_CMDSTRING * NR_OF_CMDSTRINGS;
425
426 if ((cmdptrs =
427 (u32 *) ioremap_nocache(DUM_COM_BASE,
428 sizeof(u32) * NR_OF_CMDSTRINGS)) == NULL)
429 return;
430
431 for (ch = 0; ch < NR_OF_CMDSTRINGS; ch++)
432 iowrite32(CMDSTRING_BASEADDR + BYTES_PER_CMDSTRING * ch,
433 cmdptrs + ch);
434
435 for (ch = 0; ch < MAX_DUM_CHANNELS; ch++)
436 clear_channel(ch);
437
438 /* Clear the cmdstrings */
439 cmdstrings =
440 (u32 *)ioremap_nocache(*cmdptrs,
441 BYTES_PER_CMDSTRING * NR_OF_CMDSTRINGS);
442
443 if (!cmdstrings)
444 goto out;
445
446 for (i = 0; i < NR_OF_CMDSTRINGS * BYTES_PER_CMDSTRING / sizeof(u32);
447 i++)
448 iowrite32(0, cmdstrings + i);
449
450 iounmap((u32 *)cmdstrings);
451
452out:
453 iounmap((u32 *)cmdptrs);
454}
455
456static void lcd_init(void)
457{
458 lcd_reset();
459
460 DUM_OUTP_FORMAT1 = 0; /* RGB666 */
461
462 udelay(1);
463 iowrite32(V_LCD_STANDBY_OFF, dum_data.slave_virt_base);
464 udelay(1);
465 iowrite32(V_LCD_USE_9BIT_BUS, dum_data.slave_virt_base);
466 udelay(1);
467 iowrite32(V_LCD_SYNC_RISE_L, dum_data.slave_virt_base);
468 udelay(1);
469 iowrite32(V_LCD_SYNC_RISE_H, dum_data.slave_virt_base);
470 udelay(1);
471 iowrite32(V_LCD_SYNC_FALL_L, dum_data.slave_virt_base);
472 udelay(1);
473 iowrite32(V_LCD_SYNC_FALL_H, dum_data.slave_virt_base);
474 udelay(1);
475 iowrite32(V_LCD_SYNC_ENABLE, dum_data.slave_virt_base);
476 udelay(1);
477 iowrite32(V_LCD_DISPLAY_ON, dum_data.slave_virt_base);
478 udelay(1);
479}
480
481/* Interface exported to framebuffer drivers */
482
483int pnx4008_get_fb_addresses(int fb_type, void **virt_addr,
484 dma_addr_t *phys_addr, int *fb_length)
485{
486 int i;
487 int ret = -1;
488 for (i = 0; i < ARRAY_SIZE(fb_addr); i++)
489 if (fb_addr[i].fb_type == fb_type) {
490 *virt_addr = (void *)(dum_data.lcd_virt_start +
491 fb_addr[i].addr_offset);
492 *phys_addr =
493 dum_data.lcd_phys_start + fb_addr[i].addr_offset;
494 *fb_length = fb_addr[i].fb_length;
495 ret = 0;
496 break;
497 }
498
499 return ret;
500}
501
502EXPORT_SYMBOL(pnx4008_get_fb_addresses);
503
504int pnx4008_alloc_dum_channel(int dev_id)
505{
506 int i = 0;
507
508 while ((i < MAX_DUM_CHANNELS) && (dum_data.fb_owning_channel[i] != -1))
509 i++;
510
511 if (i == MAX_DUM_CHANNELS)
512 return -ENORESOURCESLEFT;
513 else {
514 dum_data.fb_owning_channel[i] = dev_id;
515 return i;
516 }
517}
518
519EXPORT_SYMBOL(pnx4008_alloc_dum_channel);
520
521int pnx4008_free_dum_channel(int channr, int dev_id)
522{
523 if (channr < 0 || channr > MAX_DUM_CHANNELS)
524 return -EINVAL;
525 else if (dum_data.fb_owning_channel[channr] != dev_id)
526 return -EFBNOTOWNER;
527 else {
528 clear_channel(channr);
529 dum_data.fb_owning_channel[channr] = -1;
530 }
531
532 return 0;
533}
534
535EXPORT_SYMBOL(pnx4008_free_dum_channel);
536
537int pnx4008_put_dum_channel_uf(struct dumchannel_uf chan_uf, int dev_id)
538{
539 int i = chan_uf.channelnr;
540 int ret;
541
542 if (i < 0 || i > MAX_DUM_CHANNELS)
543 return -EINVAL;
544 else if (dum_data.fb_owning_channel[i] != dev_id)
545 return -EFBNOTOWNER;
546 else if ((ret =
547 display_open(chan_uf.channelnr, 0, chan_uf.dirty,
548 chan_uf.source, chan_uf.y_offset,
549 chan_uf.x_offset, chan_uf.height,
550 chan_uf.width)) != 0)
551 return ret;
552 else {
553 dum_data.chan_uf_store[i].dirty = chan_uf.dirty;
554 dum_data.chan_uf_store[i].source = chan_uf.source;
555 dum_data.chan_uf_store[i].x_offset = chan_uf.x_offset;
556 dum_data.chan_uf_store[i].y_offset = chan_uf.y_offset;
557 dum_data.chan_uf_store[i].width = chan_uf.width;
558 dum_data.chan_uf_store[i].height = chan_uf.height;
559 }
560
561 return 0;
562}
563
564EXPORT_SYMBOL(pnx4008_put_dum_channel_uf);
565
566int pnx4008_set_dum_channel_sync(int channr, int val, int dev_id)
567{
568 if (channr < 0 || channr > MAX_DUM_CHANNELS)
569 return -EINVAL;
570 else if (dum_data.fb_owning_channel[channr] != dev_id)
571 return -EFBNOTOWNER;
572 else {
573 if (val == CONF_SYNC_ON) {
574 DUM_CH_CONF(channr) |= CONF_SYNCENABLE;
575 DUM_CH_CONF(channr) |= DUM_CHANNEL_CFG_SYNC_MASK |
576 DUM_CHANNEL_CFG_SYNC_MASK_SET;
577 } else if (val == CONF_SYNC_OFF)
578 DUM_CH_CONF(channr) &= ~CONF_SYNCENABLE;
579 else
580 return -EINVAL;
581 }
582
583 return 0;
584}
585
586EXPORT_SYMBOL(pnx4008_set_dum_channel_sync);
587
588int pnx4008_set_dum_channel_dirty_detect(int channr, int val, int dev_id)
589{
590 if (channr < 0 || channr > MAX_DUM_CHANNELS)
591 return -EINVAL;
592 else if (dum_data.fb_owning_channel[channr] != dev_id)
593 return -EFBNOTOWNER;
594 else {
595 if (val == CONF_DIRTYDETECTION_ON)
596 DUM_CH_CONF(channr) |= CONF_DIRTYENABLE;
597 else if (val == CONF_DIRTYDETECTION_OFF)
598 DUM_CH_CONF(channr) &= ~CONF_DIRTYENABLE;
599 else
600 return -EINVAL;
601 }
602
603 return 0;
604}
605
606EXPORT_SYMBOL(pnx4008_set_dum_channel_dirty_detect);
607
608#if 0 /* Functions not used currently, but likely to be used in future */
609
610static int get_channel(struct dumchannel *p_chan)
611{
612 int i = p_chan->channelnr;
613
614 if (i < 0 || i > MAX_DUM_CHANNELS)
615 return -EINVAL;
616 else {
617 p_chan->dum_ch_min = DUM_CH_MIN(i);
618 p_chan->dum_ch_max = DUM_CH_MAX(i);
619 p_chan->dum_ch_conf = DUM_CH_CONF(i);
620 p_chan->dum_ch_stat = DUM_CH_STAT(i);
621 p_chan->dum_ch_ctrl = 0; /* WriteOnly control register */
622 }
623
624 return 0;
625}
626
627int pnx4008_get_dum_channel_uf(struct dumchannel_uf *p_chan_uf, int dev_id)
628{
629 int i = p_chan_uf->channelnr;
630
631 if (i < 0 || i > MAX_DUM_CHANNELS)
632 return -EINVAL;
633 else if (dum_data.fb_owning_channel[i] != dev_id)
634 return -EFBNOTOWNER;
635 else {
636 p_chan_uf->dirty = dum_data.chan_uf_store[i].dirty;
637 p_chan_uf->source = dum_data.chan_uf_store[i].source;
638 p_chan_uf->x_offset = dum_data.chan_uf_store[i].x_offset;
639 p_chan_uf->y_offset = dum_data.chan_uf_store[i].y_offset;
640 p_chan_uf->width = dum_data.chan_uf_store[i].width;
641 p_chan_uf->height = dum_data.chan_uf_store[i].height;
642 }
643
644 return 0;
645}
646
647EXPORT_SYMBOL(pnx4008_get_dum_channel_uf);
648
649int pnx4008_get_dum_channel_config(int channr, int dev_id)
650{
651 int ret;
652 struct dumchannel chan;
653
654 if (channr < 0 || channr > MAX_DUM_CHANNELS)
655 return -EINVAL;
656 else if (dum_data.fb_owning_channel[channr] != dev_id)
657 return -EFBNOTOWNER;
658 else {
659 chan.channelnr = channr;
660 if ((ret = get_channel(&chan)) != 0)
661 return ret;
662 }
663
664 return (chan.dum_ch_conf & DUM_CHANNEL_CFG_MASK);
665}
666
667EXPORT_SYMBOL(pnx4008_get_dum_channel_config);
668
669int pnx4008_force_update_dum_channel(int channr, int dev_id)
670{
671 if (channr < 0 || channr > MAX_DUM_CHANNELS)
672 return -EINVAL;
673
674 else if (dum_data.fb_owning_channel[channr] != dev_id)
675 return -EFBNOTOWNER;
676 else
677 DUM_CH_CTRL(channr) = CTRL_SETDIRTY;
678
679 return 0;
680}
681
682EXPORT_SYMBOL(pnx4008_force_update_dum_channel);
683
684#endif
685
686int pnx4008_sdum_mmap(struct fb_info *info, struct vm_area_struct *vma,
687 struct device *dev)
688{
689 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
690
691 if (off < info->fix.smem_len) {
692 vma->vm_pgoff += 1;
693 return dma_mmap_writecombine(dev, vma,
694 (void *)dum_data.lcd_virt_start,
695 dum_data.lcd_phys_start,
696 FB_DMA_SIZE);
697 }
698 return -EINVAL;
699}
700
701EXPORT_SYMBOL(pnx4008_sdum_mmap);
702
703int pnx4008_set_dum_exit_notification(int dev_id)
704{
705 int i;
706
707 for (i = 0; i < MAX_DUM_CHANNELS; i++)
708 if (dum_data.fb_owning_channel[i] == dev_id)
709 return -ERESOURCESNOTFREED;
710
711 return 0;
712}
713
714EXPORT_SYMBOL(pnx4008_set_dum_exit_notification);
715
716/* Platform device driver for DUM */
717
718static int sdum_suspend(struct platform_device *pdev, pm_message_t state)
719{
720 int retval = 0;
721 struct clk *clk;
722
723 clk = clk_get(0, "dum_ck");
724 if (!IS_ERR(clk)) {
725 clk_set_rate(clk, 0);
726 clk_put(clk);
727 } else
728 retval = PTR_ERR(clk);
729
730 /* disable BAC */
731 DUM_CTRL = V_BAC_DISABLE_IDLE;
732
733 /* LCD standby & turn off display */
734 lcd_reset();
735
736 return retval;
737}
738
739static int sdum_resume(struct platform_device *pdev)
740{
741 int retval = 0;
742 struct clk *clk;
743
744 clk = clk_get(0, "dum_ck");
745 if (!IS_ERR(clk)) {
746 clk_set_rate(clk, 1);
747 clk_put(clk);
748 } else
749 retval = PTR_ERR(clk);
750
751 /* wait for BAC disable */
752 DUM_CTRL = V_BAC_DISABLE_TRIG;
753
754 while (DUM_CTRL & BAC_ENABLED)
755 udelay(10);
756
757 /* re-init LCD */
758 lcd_init();
759
760 /* enable BAC and reset MUX */
761 DUM_CTRL = V_BAC_ENABLE;
762 udelay(1);
763 DUM_CTRL = V_MUX_RESET;
764 return 0;
765}
766
767static int __devinit sdum_probe(struct platform_device *pdev)
768{
769 int ret = 0, i = 0;
770
771 /* map frame buffer */
772 dum_data.lcd_virt_start = (u32) dma_alloc_writecombine(&pdev->dev,
773 FB_DMA_SIZE,
774 &dum_data.lcd_phys_start,
775 GFP_KERNEL);
776
777 if (!dum_data.lcd_virt_start) {
778 ret = -ENOMEM;
779 goto out_3;
780 }
781
782 /* map slave registers */
783 dum_data.slave_phys_base = PNX4008_DUM_SLAVE_BASE;
784 dum_data.slave_virt_base =
785 (u32 *) ioremap_nocache(dum_data.slave_phys_base, sizeof(u32));
786
787 if (dum_data.slave_virt_base == NULL) {
788 ret = -ENOMEM;
789 goto out_2;
790 }
791
792 /* initialize DUM and LCD display */
793 ret = dum_init(pdev);
794 if (ret)
795 goto out_1;
796
797 dum_chan_init();
798 lcd_init();
799
800 DUM_CTRL = V_BAC_ENABLE;
801 udelay(1);
802 DUM_CTRL = V_MUX_RESET;
803
804 /* set decode address and sync clock divider */
805 DUM_DECODE = dum_data.lcd_phys_start & DUM_DECODE_MASK;
806 DUM_CLK_DIV = PNX4008_DUM_CLK_DIV;
807
808 for (i = 0; i < MAX_DUM_CHANNELS; i++)
809 dum_data.fb_owning_channel[i] = -1;
810
811 /*setup wakeup interrupt */
812 start_int_set_rising_edge(SE_DISP_SYNC_INT);
813 start_int_ack(SE_DISP_SYNC_INT);
814 start_int_umask(SE_DISP_SYNC_INT);
815
816 return 0;
817
818out_1:
819 iounmap((void *)dum_data.slave_virt_base);
820out_2:
821 dma_free_writecombine(&pdev->dev, FB_DMA_SIZE,
822 (void *)dum_data.lcd_virt_start,
823 dum_data.lcd_phys_start);
824out_3:
825 return ret;
826}
827
828static int sdum_remove(struct platform_device *pdev)
829{
830 struct clk *clk;
831
832 start_int_mask(SE_DISP_SYNC_INT);
833
834 clk = clk_get(0, "dum_ck");
835 if (!IS_ERR(clk)) {
836 clk_set_rate(clk, 0);
837 clk_put(clk);
838 }
839
840 iounmap((void *)dum_data.slave_virt_base);
841
842 dma_free_writecombine(&pdev->dev, FB_DMA_SIZE,
843 (void *)dum_data.lcd_virt_start,
844 dum_data.lcd_phys_start);
845
846 return 0;
847}
848
849static struct platform_driver sdum_driver = {
850 .driver = {
851 .name = "pnx4008-sdum",
852 },
853 .probe = sdum_probe,
854 .remove = sdum_remove,
855 .suspend = sdum_suspend,
856 .resume = sdum_resume,
857};
858
859module_platform_driver(sdum_driver);
860
861MODULE_LICENSE("GPL");
diff --git a/drivers/video/pnx4008/sdum.h b/drivers/video/pnx4008/sdum.h
deleted file mode 100644
index 189c3d641383..000000000000
--- a/drivers/video/pnx4008/sdum.h
+++ /dev/null
@@ -1,136 +0,0 @@
1/*
2 * Copyright (C) 2005 Philips Semiconductors
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA, or http://www.gnu.org/licenses/gpl.html
18*/
19
20#define MAX_DUM_CHANNELS 64
21
22#define RGB_MEM_WINDOW(x) (0x10000000 + (x)*0x00100000)
23
24#define QCIF_OFFSET(x) (((x) == 0) ? 0x00000: ((x) == 1) ? 0x30000: -1)
25#define CIF_OFFSET(x) (((x) == 0) ? 0x00000: ((x) == 1) ? 0x60000: -1)
26
27#define CTRL_SETDIRTY (0x00000001)
28#define CONF_DIRTYENABLE (0x00000020)
29#define CONF_SYNCENABLE (0x00000004)
30
31#define DIRTY_ENABLED(conf) ((conf) & 0x0020)
32#define SYNC_ENABLED(conf) ((conf) & 0x0004)
33
34/* Display 1 & 2 Write Timing Configuration */
35#define PNX4008_DUM_WT_CFG 0x00372000
36
37/* Display 1 & 2 Read Timing Configuration */
38#define PNX4008_DUM_RT_CFG 0x00003A47
39
40/* DUM Transit State Timing Configuration */
41#define PNX4008_DUM_T_CFG 0x1D /* 29 HCLK cycles */
42
43/* DUM Sync count clock divider */
44#define PNX4008_DUM_CLK_DIV 0x02DD
45
46/* Memory size for framebuffer, allocated through dma_alloc_writecombine().
47 * Must be PAGE aligned
48 */
49#define FB_DMA_SIZE (PAGE_ALIGN(SZ_1M + PAGE_SIZE))
50
51#define OFFSET_RGBBUFFER (0xB0000)
52#define OFFSET_YUVBUFFER (0x00000)
53
54#define YUVBUFFER (lcd_video_start + OFFSET_YUVBUFFER)
55#define RGBBUFFER (lcd_video_start + OFFSET_RGBBUFFER)
56
57#define CMDSTRING_BASEADDR (0x00C000) /* iram */
58#define BYTES_PER_CMDSTRING (0x80)
59#define NR_OF_CMDSTRINGS (64)
60
61#define MAX_NR_PRESTRINGS (0x40)
62#define MAX_NR_POSTSTRINGS (0x40)
63
64/* various mask definitions */
65#define DUM_CLK_ENABLE 0x01
66#define DUM_CLK_DISABLE 0
67#define DUM_DECODE_MASK 0x1FFFFFFF
68#define DUM_CHANNEL_CFG_MASK 0x01FF
69#define DUM_CHANNEL_CFG_SYNC_MASK 0xFFFE00FF
70#define DUM_CHANNEL_CFG_SYNC_MASK_SET 0x0CA00
71
72#define SDUM_RETURNVAL_BASE (0x500)
73
74#define CONF_SYNC_OFF (0x602)
75#define CONF_SYNC_ON (0x603)
76
77#define CONF_DIRTYDETECTION_OFF (0x600)
78#define CONF_DIRTYDETECTION_ON (0x601)
79
80struct dumchannel_uf {
81 int channelnr;
82 u32 *dirty;
83 u32 *source;
84 u32 x_offset;
85 u32 y_offset;
86 u32 width;
87 u32 height;
88};
89
90enum {
91 FB_TYPE_YUV,
92 FB_TYPE_RGB
93};
94
95struct cmdstring {
96 int channelnr;
97 uint16_t prestringlen;
98 uint16_t poststringlen;
99 uint16_t format;
100 uint16_t reserved;
101 uint16_t startaddr_low;
102 uint16_t startaddr_high;
103 uint16_t pixdatlen_low;
104 uint16_t pixdatlen_high;
105 u32 precmd[MAX_NR_PRESTRINGS];
106 u32 postcmd[MAX_NR_POSTSTRINGS];
107
108};
109
110struct dumchannel {
111 int channelnr;
112 int dum_ch_min;
113 int dum_ch_max;
114 int dum_ch_conf;
115 int dum_ch_stat;
116 int dum_ch_ctrl;
117};
118
119int pnx4008_alloc_dum_channel(int dev_id);
120int pnx4008_free_dum_channel(int channr, int dev_id);
121
122int pnx4008_get_dum_channel_uf(struct dumchannel_uf *pChan_uf, int dev_id);
123int pnx4008_put_dum_channel_uf(struct dumchannel_uf chan_uf, int dev_id);
124
125int pnx4008_set_dum_channel_sync(int channr, int val, int dev_id);
126int pnx4008_set_dum_channel_dirty_detect(int channr, int val, int dev_id);
127
128int pnx4008_force_dum_update_channel(int channr, int dev_id);
129
130int pnx4008_get_dum_channel_config(int channr, int dev_id);
131
132int pnx4008_sdum_mmap(struct fb_info *info, struct vm_area_struct *vma, struct device *dev);
133int pnx4008_set_dum_exit_notification(int dev_id);
134
135int pnx4008_get_fb_addresses(int fb_type, void **virt_addr,
136 dma_addr_t * phys_addr, int *fb_length);
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index 4e292f29bf5d..0b340d6ff8a4 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -1034,6 +1034,7 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
1034 if (status) { 1034 if (status) {
1035 dev_err(&dev->core, "%s: lv1_gpu_memory_allocate failed: %d\n", 1035 dev_err(&dev->core, "%s: lv1_gpu_memory_allocate failed: %d\n",
1036 __func__, status); 1036 __func__, status);
1037 retval = -ENOMEM;
1037 goto err_close_device; 1038 goto err_close_device;
1038 } 1039 }
1039 dev_dbg(&dev->core, "ddr:lpar:0x%llx\n", ddr_lpar); 1040 dev_dbg(&dev->core, "ddr:lpar:0x%llx\n", ddr_lpar);
@@ -1046,6 +1047,7 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
1046 dev_err(&dev->core, 1047 dev_err(&dev->core,
1047 "%s: lv1_gpu_context_allocate failed: %d\n", __func__, 1048 "%s: lv1_gpu_context_allocate failed: %d\n", __func__,
1048 status); 1049 status);
1050 retval = -ENOMEM;
1049 goto err_gpu_memory_free; 1051 goto err_gpu_memory_free;
1050 } 1052 }
1051 1053
@@ -1053,6 +1055,7 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
1053 dinfo = (void __force *)ioremap(lpar_driver_info, 128 * 1024); 1055 dinfo = (void __force *)ioremap(lpar_driver_info, 128 * 1024);
1054 if (!dinfo) { 1056 if (!dinfo) {
1055 dev_err(&dev->core, "%s: ioremap failed\n", __func__); 1057 dev_err(&dev->core, "%s: ioremap failed\n", __func__);
1058 retval = -ENOMEM;
1056 goto err_gpu_context_free; 1059 goto err_gpu_context_free;
1057 } 1060 }
1058 1061
@@ -1121,8 +1124,10 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
1121 } 1124 }
1122 1125
1123 info = framebuffer_alloc(sizeof(struct ps3fb_par), &dev->core); 1126 info = framebuffer_alloc(sizeof(struct ps3fb_par), &dev->core);
1124 if (!info) 1127 if (!info) {
1128 retval = -ENOMEM;
1125 goto err_context_fb_close; 1129 goto err_context_fb_close;
1130 }
1126 1131
1127 par = info->par; 1132 par = info->par;
1128 par->mode_id = ~ps3fb_mode; /* != ps3fb_mode, to trigger change */ 1133 par->mode_id = ~ps3fb_mode; /* != ps3fb_mode, to trigger change */
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 69bf9d07c237..2ed7b633bbd9 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -25,8 +25,8 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
27 27
28#include <video/samsung_fimd.h>
28#include <mach/map.h> 29#include <mach/map.h>
29#include <plat/regs-fb-v4.h>
30#include <plat/fb.h> 30#include <plat/fb.h>
31 31
32/* This driver will export a number of framebuffer interfaces depending 32/* This driver will export a number of framebuffer interfaces depending
@@ -1398,35 +1398,28 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
1398 1398
1399 spin_lock_init(&sfb->slock); 1399 spin_lock_init(&sfb->slock);
1400 1400
1401 sfb->bus_clk = clk_get(dev, "lcd"); 1401 sfb->bus_clk = devm_clk_get(dev, "lcd");
1402 if (IS_ERR(sfb->bus_clk)) { 1402 if (IS_ERR(sfb->bus_clk)) {
1403 dev_err(dev, "failed to get bus clock\n"); 1403 dev_err(dev, "failed to get bus clock\n");
1404 ret = PTR_ERR(sfb->bus_clk); 1404 return PTR_ERR(sfb->bus_clk);
1405 goto err_sfb;
1406 } 1405 }
1407 1406
1408 clk_enable(sfb->bus_clk); 1407 clk_prepare_enable(sfb->bus_clk);
1409 1408
1410 if (!sfb->variant.has_clksel) { 1409 if (!sfb->variant.has_clksel) {
1411 sfb->lcd_clk = clk_get(dev, "sclk_fimd"); 1410 sfb->lcd_clk = devm_clk_get(dev, "sclk_fimd");
1412 if (IS_ERR(sfb->lcd_clk)) { 1411 if (IS_ERR(sfb->lcd_clk)) {
1413 dev_err(dev, "failed to get lcd clock\n"); 1412 dev_err(dev, "failed to get lcd clock\n");
1414 ret = PTR_ERR(sfb->lcd_clk); 1413 ret = PTR_ERR(sfb->lcd_clk);
1415 goto err_bus_clk; 1414 goto err_bus_clk;
1416 } 1415 }
1417 1416
1418 clk_enable(sfb->lcd_clk); 1417 clk_prepare_enable(sfb->lcd_clk);
1419 } 1418 }
1420 1419
1421 pm_runtime_enable(sfb->dev); 1420 pm_runtime_enable(sfb->dev);
1422 1421
1423 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1422 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1424 if (!res) {
1425 dev_err(dev, "failed to find registers\n");
1426 ret = -ENOENT;
1427 goto err_lcd_clk;
1428 }
1429
1430 sfb->regs = devm_request_and_ioremap(dev, res); 1423 sfb->regs = devm_request_and_ioremap(dev, res);
1431 if (!sfb->regs) { 1424 if (!sfb->regs) {
1432 dev_err(dev, "failed to map registers\n"); 1425 dev_err(dev, "failed to map registers\n");
@@ -1510,16 +1503,12 @@ err_pm_runtime:
1510err_lcd_clk: 1503err_lcd_clk:
1511 pm_runtime_disable(sfb->dev); 1504 pm_runtime_disable(sfb->dev);
1512 1505
1513 if (!sfb->variant.has_clksel) { 1506 if (!sfb->variant.has_clksel)
1514 clk_disable(sfb->lcd_clk); 1507 clk_disable_unprepare(sfb->lcd_clk);
1515 clk_put(sfb->lcd_clk);
1516 }
1517 1508
1518err_bus_clk: 1509err_bus_clk:
1519 clk_disable(sfb->bus_clk); 1510 clk_disable_unprepare(sfb->bus_clk);
1520 clk_put(sfb->bus_clk);
1521 1511
1522err_sfb:
1523 return ret; 1512 return ret;
1524} 1513}
1525 1514
@@ -1541,13 +1530,10 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev)
1541 if (sfb->windows[win]) 1530 if (sfb->windows[win])
1542 s3c_fb_release_win(sfb, sfb->windows[win]); 1531 s3c_fb_release_win(sfb, sfb->windows[win]);
1543 1532
1544 if (!sfb->variant.has_clksel) { 1533 if (!sfb->variant.has_clksel)
1545 clk_disable(sfb->lcd_clk); 1534 clk_disable_unprepare(sfb->lcd_clk);
1546 clk_put(sfb->lcd_clk);
1547 }
1548 1535
1549 clk_disable(sfb->bus_clk); 1536 clk_disable_unprepare(sfb->bus_clk);
1550 clk_put(sfb->bus_clk);
1551 1537
1552 pm_runtime_put_sync(sfb->dev); 1538 pm_runtime_put_sync(sfb->dev);
1553 pm_runtime_disable(sfb->dev); 1539 pm_runtime_disable(sfb->dev);
@@ -1575,9 +1561,9 @@ static int s3c_fb_suspend(struct device *dev)
1575 } 1561 }
1576 1562
1577 if (!sfb->variant.has_clksel) 1563 if (!sfb->variant.has_clksel)
1578 clk_disable(sfb->lcd_clk); 1564 clk_disable_unprepare(sfb->lcd_clk);
1579 1565
1580 clk_disable(sfb->bus_clk); 1566 clk_disable_unprepare(sfb->bus_clk);
1581 1567
1582 pm_runtime_put_sync(sfb->dev); 1568 pm_runtime_put_sync(sfb->dev);
1583 1569
@@ -1595,10 +1581,10 @@ static int s3c_fb_resume(struct device *dev)
1595 1581
1596 pm_runtime_get_sync(sfb->dev); 1582 pm_runtime_get_sync(sfb->dev);
1597 1583
1598 clk_enable(sfb->bus_clk); 1584 clk_prepare_enable(sfb->bus_clk);
1599 1585
1600 if (!sfb->variant.has_clksel) 1586 if (!sfb->variant.has_clksel)
1601 clk_enable(sfb->lcd_clk); 1587 clk_prepare_enable(sfb->lcd_clk);
1602 1588
1603 /* setup gpio and output polarity controls */ 1589 /* setup gpio and output polarity controls */
1604 pd->setup_gpio(); 1590 pd->setup_gpio();
@@ -1654,9 +1640,9 @@ static int s3c_fb_runtime_suspend(struct device *dev)
1654 struct s3c_fb *sfb = platform_get_drvdata(pdev); 1640 struct s3c_fb *sfb = platform_get_drvdata(pdev);
1655 1641
1656 if (!sfb->variant.has_clksel) 1642 if (!sfb->variant.has_clksel)
1657 clk_disable(sfb->lcd_clk); 1643 clk_disable_unprepare(sfb->lcd_clk);
1658 1644
1659 clk_disable(sfb->bus_clk); 1645 clk_disable_unprepare(sfb->bus_clk);
1660 1646
1661 return 0; 1647 return 0;
1662} 1648}
@@ -1667,10 +1653,10 @@ static int s3c_fb_runtime_resume(struct device *dev)
1667 struct s3c_fb *sfb = platform_get_drvdata(pdev); 1653 struct s3c_fb *sfb = platform_get_drvdata(pdev);
1668 struct s3c_fb_platdata *pd = sfb->pdata; 1654 struct s3c_fb_platdata *pd = sfb->pdata;
1669 1655
1670 clk_enable(sfb->bus_clk); 1656 clk_prepare_enable(sfb->bus_clk);
1671 1657
1672 if (!sfb->variant.has_clksel) 1658 if (!sfb->variant.has_clksel)
1673 clk_enable(sfb->lcd_clk); 1659 clk_prepare_enable(sfb->lcd_clk);
1674 1660
1675 /* setup gpio and output polarity controls */ 1661 /* setup gpio and output polarity controls */
1676 pd->setup_gpio(); 1662 pd->setup_gpio();
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index 77f34c614c86..1083bb9469ee 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -11,6 +11,8 @@
11 * Driver based on skeletonfb.c, sa1100fb.c and others. 11 * Driver based on skeletonfb.c, sa1100fb.c and others.
12*/ 12*/
13 13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
14#include <linux/module.h> 16#include <linux/module.h>
15#include <linux/kernel.h> 17#include <linux/kernel.h>
16#include <linux/err.h> 18#include <linux/err.h>
@@ -48,7 +50,11 @@ static int debug = 1;
48static int debug; 50static int debug;
49#endif 51#endif
50 52
51#define dprintk(msg...) if (debug) printk(KERN_DEBUG "s3c2410fb: " msg); 53#define dprintk(msg...) \
54do { \
55 if (debug) \
56 pr_debug(msg); \
57} while (0)
52 58
53/* useful functions */ 59/* useful functions */
54 60
@@ -598,11 +604,11 @@ static int s3c2410fb_debug_store(struct device *dev,
598 if (strnicmp(buf, "on", 2) == 0 || 604 if (strnicmp(buf, "on", 2) == 0 ||
599 strnicmp(buf, "1", 1) == 0) { 605 strnicmp(buf, "1", 1) == 0) {
600 debug = 1; 606 debug = 1;
601 printk(KERN_DEBUG "s3c2410fb: Debug On"); 607 dev_dbg(dev, "s3c2410fb: Debug On");
602 } else if (strnicmp(buf, "off", 3) == 0 || 608 } else if (strnicmp(buf, "off", 3) == 0 ||
603 strnicmp(buf, "0", 1) == 0) { 609 strnicmp(buf, "0", 1) == 0) {
604 debug = 0; 610 debug = 0;
605 printk(KERN_DEBUG "s3c2410fb: Debug Off"); 611 dev_dbg(dev, "s3c2410fb: Debug Off");
606 } else { 612 } else {
607 return -EINVAL; 613 return -EINVAL;
608 } 614 }
@@ -921,7 +927,7 @@ static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
921 927
922 info->clk = clk_get(NULL, "lcd"); 928 info->clk = clk_get(NULL, "lcd");
923 if (IS_ERR(info->clk)) { 929 if (IS_ERR(info->clk)) {
924 printk(KERN_ERR "failed to get lcd clock source\n"); 930 dev_err(&pdev->dev, "failed to get lcd clock source\n");
925 ret = PTR_ERR(info->clk); 931 ret = PTR_ERR(info->clk);
926 goto release_irq; 932 goto release_irq;
927 } 933 }
@@ -929,7 +935,7 @@ static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
929 clk_enable(info->clk); 935 clk_enable(info->clk);
930 dprintk("got and enabled clock\n"); 936 dprintk("got and enabled clock\n");
931 937
932 usleep_range(1000, 1000); 938 usleep_range(1000, 1100);
933 939
934 info->clk_rate = clk_get_rate(info->clk); 940 info->clk_rate = clk_get_rate(info->clk);
935 941
@@ -947,7 +953,7 @@ static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
947 /* Initialize video memory */ 953 /* Initialize video memory */
948 ret = s3c2410fb_map_video_memory(fbinfo); 954 ret = s3c2410fb_map_video_memory(fbinfo);
949 if (ret) { 955 if (ret) {
950 printk(KERN_ERR "Failed to allocate video RAM: %d\n", ret); 956 dev_err(&pdev->dev, "Failed to allocate video RAM: %d\n", ret);
951 ret = -ENOMEM; 957 ret = -ENOMEM;
952 goto release_clock; 958 goto release_clock;
953 } 959 }
@@ -970,7 +976,7 @@ static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
970 976
971 ret = register_framebuffer(fbinfo); 977 ret = register_framebuffer(fbinfo);
972 if (ret < 0) { 978 if (ret < 0) {
973 printk(KERN_ERR "Failed to register framebuffer device: %d\n", 979 dev_err(&pdev->dev, "Failed to register framebuffer device: %d\n",
974 ret); 980 ret);
975 goto free_cpufreq; 981 goto free_cpufreq;
976 } 982 }
@@ -978,9 +984,9 @@ static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
978 /* create device files */ 984 /* create device files */
979 ret = device_create_file(&pdev->dev, &dev_attr_debug); 985 ret = device_create_file(&pdev->dev, &dev_attr_debug);
980 if (ret) 986 if (ret)
981 printk(KERN_ERR "failed to add debug attribute\n"); 987 dev_err(&pdev->dev, "failed to add debug attribute\n");
982 988
983 printk(KERN_INFO "fb%d: %s frame buffer device\n", 989 dev_info(&pdev->dev, "fb%d: %s frame buffer device\n",
984 fbinfo->node, fbinfo->fix.id); 990 fbinfo->node, fbinfo->fix.id);
985 991
986 return 0; 992 return 0;
@@ -1028,7 +1034,7 @@ static int __devexit s3c2410fb_remove(struct platform_device *pdev)
1028 s3c2410fb_cpufreq_deregister(info); 1034 s3c2410fb_cpufreq_deregister(info);
1029 1035
1030 s3c2410fb_lcd_enable(info, 0); 1036 s3c2410fb_lcd_enable(info, 0);
1031 usleep_range(1000, 1000); 1037 usleep_range(1000, 1100);
1032 1038
1033 s3c2410fb_unmap_video_memory(fbinfo); 1039 s3c2410fb_unmap_video_memory(fbinfo);
1034 1040
@@ -1065,7 +1071,7 @@ static int s3c2410fb_suspend(struct platform_device *dev, pm_message_t state)
1065 * the LCD DMA engine is not going to get back on the bus 1071 * the LCD DMA engine is not going to get back on the bus
1066 * before the clock goes off again (bjd) */ 1072 * before the clock goes off again (bjd) */
1067 1073
1068 usleep_range(1000, 1000); 1074 usleep_range(1000, 1100);
1069 clk_disable(info->clk); 1075 clk_disable(info->clk);
1070 1076
1071 return 0; 1077 return 0;
@@ -1077,7 +1083,7 @@ static int s3c2410fb_resume(struct platform_device *dev)
1077 struct s3c2410fb_info *info = fbinfo->par; 1083 struct s3c2410fb_info *info = fbinfo->par;
1078 1084
1079 clk_enable(info->clk); 1085 clk_enable(info->clk);
1080 usleep_range(1000, 1000); 1086 usleep_range(1000, 1100);
1081 1087
1082 s3c2410fb_init_registers(fbinfo); 1088 s3c2410fb_init_registers(fbinfo);
1083 1089
@@ -1134,8 +1140,8 @@ static void __exit s3c2410fb_cleanup(void)
1134module_init(s3c2410fb_init); 1140module_init(s3c2410fb_init);
1135module_exit(s3c2410fb_cleanup); 1141module_exit(s3c2410fb_cleanup);
1136 1142
1137MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>, " 1143MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
1138 "Ben Dooks <ben-linux@fluff.org>"); 1144MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>");
1139MODULE_DESCRIPTION("Framebuffer driver for the s3c2410"); 1145MODULE_DESCRIPTION("Framebuffer driver for the s3c2410");
1140MODULE_LICENSE("GPL"); 1146MODULE_LICENSE("GPL");
1141MODULE_ALIAS("platform:s3c2410-lcd"); 1147MODULE_ALIAS("platform:s3c2410-lcd");
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index 0d0f52c18fd8..f4f53b082d05 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -2266,8 +2266,10 @@ static int __devinit savagefb_probe(struct pci_dev* dev,
2266 lpitch = info->var.xres_virtual*((info->var.bits_per_pixel + 7) >> 3); 2266 lpitch = info->var.xres_virtual*((info->var.bits_per_pixel + 7) >> 3);
2267 info->var.yres_virtual = info->fix.smem_len/lpitch; 2267 info->var.yres_virtual = info->fix.smem_len/lpitch;
2268 2268
2269 if (info->var.yres_virtual < info->var.yres) 2269 if (info->var.yres_virtual < info->var.yres) {
2270 err = -ENOMEM;
2270 goto failed; 2271 goto failed;
2272 }
2271 2273
2272#if defined(CONFIG_FB_SAVAGE_ACCEL) 2274#if defined(CONFIG_FB_SAVAGE_ACCEL)
2273 /* 2275 /*
diff --git a/drivers/video/sis/initextlfb.c b/drivers/video/sis/initextlfb.c
index 9dec64da4015..3ab18f5a3759 100644
--- a/drivers/video/sis/initextlfb.c
+++ b/drivers/video/sis/initextlfb.c
@@ -65,7 +65,7 @@ sisfb_mode_rate_to_dclock(struct SiS_Private *SiS_Pr, unsigned char modeno,
65 } 65 }
66#endif 66#endif
67 67
68 if(!(SiS_SearchModeID(SiS_Pr, &ModeNo, &ModeIdIndex))) {; 68 if(!(SiS_SearchModeID(SiS_Pr, &ModeNo, &ModeIdIndex))) {
69 printk(KERN_ERR "Could not find mode %x\n", ModeNo); 69 printk(KERN_ERR "Could not find mode %x\n", ModeNo);
70 return 65000; 70 return 65000;
71 } 71 }
diff --git a/drivers/video/sunxvr1000.c b/drivers/video/sunxvr1000.c
index b7f27acaf817..729a50722bdf 100644
--- a/drivers/video/sunxvr1000.c
+++ b/drivers/video/sunxvr1000.c
@@ -141,8 +141,10 @@ static int __devinit gfb_probe(struct platform_device *op)
141 141
142 gp->fb_base = of_ioremap(&op->resource[6], 0, 142 gp->fb_base = of_ioremap(&op->resource[6], 0,
143 gp->fb_size, "gfb fb"); 143 gp->fb_size, "gfb fb");
144 if (!gp->fb_base) 144 if (!gp->fb_base) {
145 err = -ENOMEM;
145 goto err_release_fb; 146 goto err_release_fb;
147 }
146 148
147 err = gfb_set_fbinfo(gp); 149 err = gfb_set_fbinfo(gp);
148 if (err) 150 if (err)
diff --git a/drivers/video/sunxvr2500.c b/drivers/video/sunxvr2500.c
index 5848436c19da..7fbcba86d1a2 100644
--- a/drivers/video/sunxvr2500.c
+++ b/drivers/video/sunxvr2500.c
@@ -181,8 +181,10 @@ static int __devinit s3d_pci_register(struct pci_dev *pdev,
181 sp->fb_size = info->fix.line_length * sp->height; 181 sp->fb_size = info->fix.line_length * sp->height;
182 182
183 sp->fb_base = ioremap(sp->fb_base_phys, sp->fb_size); 183 sp->fb_base = ioremap(sp->fb_base_phys, sp->fb_size);
184 if (!sp->fb_base) 184 if (!sp->fb_base) {
185 err = -ENOMEM;
185 goto err_release_pci; 186 goto err_release_pci;
187 }
186 188
187 err = s3d_set_fbinfo(sp); 189 err = s3d_set_fbinfo(sp);
188 if (err) 190 if (err)
diff --git a/drivers/video/sunxvr500.c b/drivers/video/sunxvr500.c
index eb931b8626fa..6c71b1b44477 100644
--- a/drivers/video/sunxvr500.c
+++ b/drivers/video/sunxvr500.c
@@ -298,8 +298,10 @@ static int __devinit e3d_pci_register(struct pci_dev *pdev,
298 goto err_release_fb; 298 goto err_release_fb;
299 } 299 }
300 ep->ramdac = ioremap(ep->regs_base_phys + 0x8000, 0x1000); 300 ep->ramdac = ioremap(ep->regs_base_phys + 0x8000, 0x1000);
301 if (!ep->ramdac) 301 if (!ep->ramdac) {
302 err = -ENOMEM;
302 goto err_release_pci1; 303 goto err_release_pci1;
304 }
303 305
304 ep->fb8_0_off = readl(ep->ramdac + RAMDAC_VID_8FB_0); 306 ep->fb8_0_off = readl(ep->ramdac + RAMDAC_VID_8FB_0);
305 ep->fb8_0_off -= ep->fb_base_reg; 307 ep->fb8_0_off -= ep->fb_base_reg;
@@ -343,8 +345,10 @@ static int __devinit e3d_pci_register(struct pci_dev *pdev,
343 ep->fb_size = info->fix.line_length * ep->height; 345 ep->fb_size = info->fix.line_length * ep->height;
344 346
345 ep->fb_base = ioremap(ep->fb_base_phys, ep->fb_size); 347 ep->fb_base = ioremap(ep->fb_base_phys, ep->fb_size);
346 if (!ep->fb_base) 348 if (!ep->fb_base) {
349 err = -ENOMEM;
347 goto err_release_pci0; 350 goto err_release_pci0;
351 }
348 352
349 err = e3d_set_fbinfo(ep); 353 err = e3d_set_fbinfo(ep);
350 if (err) 354 if (err)
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index f45eba3d6150..86d449ea3169 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -646,7 +646,7 @@ static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf,
646 result = fb_sys_write(info, buf, count, ppos); 646 result = fb_sys_write(info, buf, count, ppos);
647 647
648 if (result > 0) { 648 if (result > 0) {
649 int start = max((int)(offset / info->fix.line_length) - 1, 0); 649 int start = max((int)(offset / info->fix.line_length), 0);
650 int lines = min((u32)((result / info->fix.line_length) + 1), 650 int lines = min((u32)((result / info->fix.line_length) + 1),
651 (u32)info->var.yres); 651 (u32)info->var.yres);
652 652
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index b0e2a4261afe..2f8f82d874a1 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -659,6 +659,8 @@ static int __devinit uvesafb_vbe_getedid(struct uvesafb_ktask *task,
659 task->t.flags = TF_BUF_RET | TF_BUF_ESDI; 659 task->t.flags = TF_BUF_RET | TF_BUF_ESDI;
660 task->t.buf_len = EDID_LENGTH; 660 task->t.buf_len = EDID_LENGTH;
661 task->buf = kzalloc(EDID_LENGTH, GFP_KERNEL); 661 task->buf = kzalloc(EDID_LENGTH, GFP_KERNEL);
662 if (!task->buf)
663 return -ENOMEM;
662 664
663 err = uvesafb_exec(task); 665 err = uvesafb_exec(task);
664 666
diff --git a/drivers/video/vermilion/vermilion.c b/drivers/video/vermilion/vermilion.c
index 89aef343e295..4709edc3cb7f 100644
--- a/drivers/video/vermilion/vermilion.c
+++ b/drivers/video/vermilion/vermilion.c
@@ -1167,8 +1167,7 @@ void vmlfb_unregister_subsys(struct vml_sys *sys)
1167 list_for_each_entry_safe(entry, next, &global_has_mode, head) { 1167 list_for_each_entry_safe(entry, next, &global_has_mode, head) {
1168 printk(KERN_DEBUG MODULE_NAME ": subsys disable pipe\n"); 1168 printk(KERN_DEBUG MODULE_NAME ": subsys disable pipe\n");
1169 vmlfb_disable_pipe(entry); 1169 vmlfb_disable_pipe(entry);
1170 list_del(&entry->head); 1170 list_move_tail(&entry->head, &global_no_mode);
1171 list_add_tail(&entry->head, &global_no_mode);
1172 } 1171 }
1173 mutex_unlock(&vml_mutex); 1172 mutex_unlock(&vml_mutex);
1174} 1173}
diff --git a/drivers/video/via/via_clock.c b/drivers/video/via/via_clock.c
index af8f26b643c1..db1e39277e32 100644
--- a/drivers/video/via/via_clock.c
+++ b/drivers/video/via/via_clock.c
@@ -25,6 +25,7 @@
25 25
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/via-core.h> 27#include <linux/via-core.h>
28#include <asm/olpc.h>
28#include "via_clock.h" 29#include "via_clock.h"
29#include "global.h" 30#include "global.h"
30#include "debug.h" 31#include "debug.h"
@@ -289,6 +290,10 @@ static void dummy_set_pll(struct via_pll_config config)
289 printk(KERN_INFO "Using undocumented set PLL.\n%s", via_slap); 290 printk(KERN_INFO "Using undocumented set PLL.\n%s", via_slap);
290} 291}
291 292
293static void noop_set_clock_state(u8 state)
294{
295}
296
292void via_clock_init(struct via_clock *clock, int gfx_chip) 297void via_clock_init(struct via_clock *clock, int gfx_chip)
293{ 298{
294 switch (gfx_chip) { 299 switch (gfx_chip) {
@@ -346,4 +351,18 @@ void via_clock_init(struct via_clock *clock, int gfx_chip)
346 break; 351 break;
347 352
348 } 353 }
354
355 if (machine_is_olpc()) {
356 /* The OLPC XO-1.5 cannot suspend/resume reliably if the
357 * IGA1/IGA2 clocks are set as on or off (memory rot
358 * occasionally happens during suspend under such
359 * configurations).
360 *
361 * The only known stable scenario is to leave this bits as-is,
362 * which in their default states are documented to enable the
363 * clock only when it is needed.
364 */
365 clock->set_primary_clock_state = noop_set_clock_state;
366 clock->set_secondary_clock_state = noop_set_clock_state;
367 }
349} 368}
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index b7f5173ff9e9..917bb5681684 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -641,7 +641,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
641 case XenbusStateReconfiguring: 641 case XenbusStateReconfiguring:
642 case XenbusStateReconfigured: 642 case XenbusStateReconfigured:
643 case XenbusStateUnknown: 643 case XenbusStateUnknown:
644 case XenbusStateClosed:
645 break; 644 break;
646 645
647 case XenbusStateInitWait: 646 case XenbusStateInitWait:
@@ -670,6 +669,10 @@ InitWait:
670 info->feature_resize = val; 669 info->feature_resize = val;
671 break; 670 break;
672 671
672 case XenbusStateClosed:
673 if (dev->state == XenbusStateClosed)
674 break;
675 /* Missed the backend's CLOSING state -- fallthrough */
673 case XenbusStateClosing: 676 case XenbusStateClosing:
674 xenbus_frontend_closed(dev); 677 xenbus_frontend_closed(dev);
675 break; 678 break;
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index d4dffcd52873..126d8ce591ce 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -3,6 +3,7 @@ menu "Xen driver support"
3 3
4config XEN_BALLOON 4config XEN_BALLOON
5 bool "Xen memory balloon driver" 5 bool "Xen memory balloon driver"
6 depends on !ARM
6 default y 7 default y
7 help 8 help
8 The balloon driver allows the Xen domain to request more memory from 9 The balloon driver allows the Xen domain to request more memory from
@@ -145,6 +146,7 @@ config SWIOTLB_XEN
145 146
146config XEN_TMEM 147config XEN_TMEM
147 bool 148 bool
149 depends on !ARM
148 default y if (CLEANCACHE || FRONTSWAP) 150 default y if (CLEANCACHE || FRONTSWAP)
149 help 151 help
150 Shim to interface in-kernel Transcendent Memory hooks 152 Shim to interface in-kernel Transcendent Memory hooks
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 31ab82fda38a..d6886d90ccfd 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -55,7 +55,6 @@
55#include <asm/pgalloc.h> 55#include <asm/pgalloc.h>
56#include <asm/pgtable.h> 56#include <asm/pgtable.h>
57#include <asm/tlb.h> 57#include <asm/tlb.h>
58#include <asm/e820.h>
59 58
60#include <asm/xen/hypervisor.h> 59#include <asm/xen/hypervisor.h>
61#include <asm/xen/hypercall.h> 60#include <asm/xen/hypercall.h>
@@ -88,7 +87,7 @@ struct balloon_stats balloon_stats;
88EXPORT_SYMBOL_GPL(balloon_stats); 87EXPORT_SYMBOL_GPL(balloon_stats);
89 88
90/* We increase/decrease in batches which fit in a page */ 89/* We increase/decrease in batches which fit in a page */
91static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; 90static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
92 91
93#ifdef CONFIG_HIGHMEM 92#ifdef CONFIG_HIGHMEM
94#define inc_totalhigh_pages() (totalhigh_pages++) 93#define inc_totalhigh_pages() (totalhigh_pages++)
diff --git a/drivers/xen/dbgp.c b/drivers/xen/dbgp.c
index 42569c77ccc8..f3ccc80a455f 100644
--- a/drivers/xen/dbgp.c
+++ b/drivers/xen/dbgp.c
@@ -8,7 +8,9 @@
8 8
9static int xen_dbgp_op(struct usb_hcd *hcd, int op) 9static int xen_dbgp_op(struct usb_hcd *hcd, int op)
10{ 10{
11#ifdef CONFIG_PCI
11 const struct device *ctrlr = hcd_to_bus(hcd)->controller; 12 const struct device *ctrlr = hcd_to_bus(hcd)->controller;
13#endif
12 struct physdev_dbgp_op dbgp; 14 struct physdev_dbgp_op dbgp;
13 15
14 if (!xen_initial_domain()) 16 if (!xen_initial_domain())
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 59e10a1286d5..912ac81b6dbf 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -115,7 +115,9 @@ struct irq_info {
115#define PIRQ_SHAREABLE (1 << 1) 115#define PIRQ_SHAREABLE (1 << 1)
116 116
117static int *evtchn_to_irq; 117static int *evtchn_to_irq;
118#ifdef CONFIG_X86
118static unsigned long *pirq_eoi_map; 119static unsigned long *pirq_eoi_map;
120#endif
119static bool (*pirq_needs_eoi)(unsigned irq); 121static bool (*pirq_needs_eoi)(unsigned irq);
120 122
121static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG], 123static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
@@ -277,10 +279,12 @@ static unsigned int cpu_from_evtchn(unsigned int evtchn)
277 return ret; 279 return ret;
278} 280}
279 281
282#ifdef CONFIG_X86
280static bool pirq_check_eoi_map(unsigned irq) 283static bool pirq_check_eoi_map(unsigned irq)
281{ 284{
282 return test_bit(pirq_from_irq(irq), pirq_eoi_map); 285 return test_bit(pirq_from_irq(irq), pirq_eoi_map);
283} 286}
287#endif
284 288
285static bool pirq_needs_eoi_flag(unsigned irq) 289static bool pirq_needs_eoi_flag(unsigned irq)
286{ 290{
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 610bfc6be177..2e22df2f7a3f 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -105,6 +105,21 @@ static void gntdev_print_maps(struct gntdev_priv *priv,
105#endif 105#endif
106} 106}
107 107
108static void gntdev_free_map(struct grant_map *map)
109{
110 if (map == NULL)
111 return;
112
113 if (map->pages)
114 free_xenballooned_pages(map->count, map->pages);
115 kfree(map->pages);
116 kfree(map->grants);
117 kfree(map->map_ops);
118 kfree(map->unmap_ops);
119 kfree(map->kmap_ops);
120 kfree(map);
121}
122
108static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count) 123static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
109{ 124{
110 struct grant_map *add; 125 struct grant_map *add;
@@ -142,12 +157,7 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
142 return add; 157 return add;
143 158
144err: 159err:
145 kfree(add->pages); 160 gntdev_free_map(add);
146 kfree(add->grants);
147 kfree(add->map_ops);
148 kfree(add->unmap_ops);
149 kfree(add->kmap_ops);
150 kfree(add);
151 return NULL; 161 return NULL;
152} 162}
153 163
@@ -198,17 +208,9 @@ static void gntdev_put_map(struct grant_map *map)
198 evtchn_put(map->notify.event); 208 evtchn_put(map->notify.event);
199 } 209 }
200 210
201 if (map->pages) { 211 if (map->pages && !use_ptemod)
202 if (!use_ptemod) 212 unmap_grant_pages(map, 0, map->count);
203 unmap_grant_pages(map, 0, map->count); 213 gntdev_free_map(map);
204
205 free_xenballooned_pages(map->count, map->pages);
206 }
207 kfree(map->pages);
208 kfree(map->grants);
209 kfree(map->map_ops);
210 kfree(map->unmap_ops);
211 kfree(map);
212} 214}
213 215
214/* ------------------------------------------------------------------ */ 216/* ------------------------------------------------------------------ */
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index b2b0a375b348..b91f14e83164 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -84,7 +84,7 @@ struct gnttab_ops {
84 * nr_gframes is the number of frames to map grant table. Returning 84 * nr_gframes is the number of frames to map grant table. Returning
85 * GNTST_okay means success and negative value means failure. 85 * GNTST_okay means success and negative value means failure.
86 */ 86 */
87 int (*map_frames)(unsigned long *frames, unsigned int nr_gframes); 87 int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
88 /* 88 /*
89 * Release a list of frames which are mapped in map_frames for grant 89 * Release a list of frames which are mapped in map_frames for grant
90 * entry status. 90 * entry status.
@@ -960,7 +960,7 @@ static unsigned nr_status_frames(unsigned nr_grant_frames)
960 return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP; 960 return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP;
961} 961}
962 962
963static int gnttab_map_frames_v1(unsigned long *frames, unsigned int nr_gframes) 963static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
964{ 964{
965 int rc; 965 int rc;
966 966
@@ -977,7 +977,7 @@ static void gnttab_unmap_frames_v1(void)
977 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames); 977 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
978} 978}
979 979
980static int gnttab_map_frames_v2(unsigned long *frames, unsigned int nr_gframes) 980static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
981{ 981{
982 uint64_t *sframes; 982 uint64_t *sframes;
983 unsigned int nr_sframes; 983 unsigned int nr_sframes;
@@ -1029,7 +1029,7 @@ static void gnttab_unmap_frames_v2(void)
1029static int gnttab_map(unsigned int start_idx, unsigned int end_idx) 1029static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1030{ 1030{
1031 struct gnttab_setup_table setup; 1031 struct gnttab_setup_table setup;
1032 unsigned long *frames; 1032 xen_pfn_t *frames;
1033 unsigned int nr_gframes = end_idx + 1; 1033 unsigned int nr_gframes = end_idx + 1;
1034 int rc; 1034 int rc;
1035 1035
diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
index 5e5ad7e28858..96453f8a85c5 100644
--- a/drivers/xen/sys-hypervisor.c
+++ b/drivers/xen/sys-hypervisor.c
@@ -11,6 +11,7 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/kobject.h> 13#include <linux/kobject.h>
14#include <linux/err.h>
14 15
15#include <asm/xen/hypervisor.h> 16#include <asm/xen/hypervisor.h>
16#include <asm/xen/hypercall.h> 17#include <asm/xen/hypercall.h>
@@ -284,7 +285,8 @@ static ssize_t virtual_start_show(struct hyp_sysfs_attr *attr, char *buffer)
284 ret = HYPERVISOR_xen_version(XENVER_platform_parameters, 285 ret = HYPERVISOR_xen_version(XENVER_platform_parameters,
285 parms); 286 parms);
286 if (!ret) 287 if (!ret)
287 ret = sprintf(buffer, "%lx\n", parms->virt_start); 288 ret = sprintf(buffer, "%"PRI_xen_ulong"\n",
289 parms->virt_start);
288 kfree(parms); 290 kfree(parms);
289 } 291 }
290 292
diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
index 46d140baebd8..0f478ac483cd 100644
--- a/drivers/xen/xen-pciback/vpci.c
+++ b/drivers/xen/xen-pciback/vpci.c
@@ -89,9 +89,15 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
89 89
90 mutex_lock(&vpci_dev->lock); 90 mutex_lock(&vpci_dev->lock);
91 91
92 /* Keep multi-function devices together on the virtual PCI bus */ 92 /*
93 for (slot = 0; slot < PCI_SLOT_MAX; slot++) { 93 * Keep multi-function devices together on the virtual PCI bus, except
94 if (!list_empty(&vpci_dev->dev_list[slot])) { 94 * virtual functions.
95 */
96 if (!dev->is_virtfn) {
97 for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
98 if (list_empty(&vpci_dev->dev_list[slot]))
99 continue;
100
95 t = list_entry(list_first(&vpci_dev->dev_list[slot]), 101 t = list_entry(list_first(&vpci_dev->dev_list[slot]),
96 struct pci_dev_entry, list); 102 struct pci_dev_entry, list);
97 103
@@ -116,7 +122,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
116 pci_name(dev), slot); 122 pci_name(dev), slot);
117 list_add_tail(&dev_entry->list, 123 list_add_tail(&dev_entry->list,
118 &vpci_dev->dev_list[slot]); 124 &vpci_dev->dev_list[slot]);
119 func = PCI_FUNC(dev->devfn); 125 func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn);
120 goto unlock; 126 goto unlock;
121 } 127 }
122 } 128 }
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 89f76252a16f..ac727028e658 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -458,7 +458,7 @@ static ssize_t xenbus_file_write(struct file *filp,
458 goto out; 458 goto out;
459 459
460 /* Can't write a xenbus message larger we can buffer */ 460 /* Can't write a xenbus message larger we can buffer */
461 if ((len + u->len) > sizeof(u->u.buffer)) { 461 if (len > sizeof(u->u.buffer) - u->len) {
462 /* On error, dump existing buffer */ 462 /* On error, dump existing buffer */
463 u->len = 0; 463 u->len = 0;
464 rc = -EINVAL; 464 rc = -EINVAL;
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 131dec04794e..acedeabe589c 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -48,6 +48,7 @@
48#include <xen/xenbus.h> 48#include <xen/xenbus.h>
49#include <xen/xen.h> 49#include <xen/xen.h>
50#include "xenbus_comms.h" 50#include "xenbus_comms.h"
51#include <asm/xen/hypervisor.h>
51 52
52struct xs_stored_msg { 53struct xs_stored_msg {
53 struct list_head list; 54 struct list_head list;
@@ -618,7 +619,26 @@ static struct xenbus_watch *find_watch(const char *token)
618 619
619 return NULL; 620 return NULL;
620} 621}
622/*
623 * Certain older XenBus toolstack cannot handle reading values that are
624 * not populated. Some Xen 3.4 installation are incapable of doing this
625 * so if we are running on anything older than 4 do not attempt to read
626 * control/platform-feature-xs_reset_watches.
627 */
628static bool xen_strict_xenbus_quirk(void)
629{
630#ifdef CONFIG_X86
631 uint32_t eax, ebx, ecx, edx, base;
632
633 base = xen_cpuid_base();
634 cpuid(base + 1, &eax, &ebx, &ecx, &edx);
621 635
636 if ((eax >> 16) < 4)
637 return true;
638#endif
639 return false;
640
641}
622static void xs_reset_watches(void) 642static void xs_reset_watches(void)
623{ 643{
624 int err, supported = 0; 644 int err, supported = 0;
@@ -626,6 +646,9 @@ static void xs_reset_watches(void)
626 if (!xen_hvm_domain() || xen_initial_domain()) 646 if (!xen_hvm_domain() || xen_initial_domain())
627 return; 647 return;
628 648
649 if (xen_strict_xenbus_quirk())
650 return;
651
629 err = xenbus_scanf(XBT_NIL, "control", 652 err = xenbus_scanf(XBT_NIL, "control",
630 "platform-feature-xs_reset_watches", "%d", &supported); 653 "platform-feature-xs_reset_watches", "%d", &supported);
631 if (err != 1 || !supported) 654 if (err != 1 || !supported)